2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Distribute under GPLv2.
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
11 #include <linux/module.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
16 #include <linux/notifier.h>
17 #include <linux/percpu.h>
18 #include <linux/cpu.h>
19 #include <linux/freezer.h>
20 #include <linux/kthread.h>
21 #include <linux/rcupdate.h>
22 #include <linux/smp.h>
23 #include <linux/tick.h>
27 - No shared variables, all the data are CPU local.
28 - If a softirq needs serialization, let it serialize itself
30 - Even if softirq is serialized, only local cpu is marked for
31 execution. Hence, we get something sort of weak cpu binding.
32 Though it is still not clear, will it result in better locality
36 - NET RX softirq. It is multithreaded and does not require
37 any global serialization.
38 - NET TX softirq. It kicks software netdevice queues, hence
39 it is logically serialized per device, but this serialization
40 is invisible to common code.
41 - Tasklets: serialized wrt itself.
44 #ifndef __ARCH_IRQ_STAT
45 irq_cpustat_t irq_stat
[NR_CPUS
] ____cacheline_aligned
;
46 EXPORT_SYMBOL(irq_stat
);
49 static struct softirq_action softirq_vec
[32] __cacheline_aligned_in_smp
;
51 static DEFINE_PER_CPU(struct task_struct
*, ksoftirqd
);
54 * we cannot loop indefinitely here to avoid userspace starvation,
55 * but we also don't want to introduce a worst case 1/HZ latency
56 * to the pending events, so lets the scheduler to balance
57 * the softirq load for us.
59 static inline void wakeup_softirqd(void)
61 /* Interrupts are disabled: no need to stop preemption */
62 struct task_struct
*tsk
= __get_cpu_var(ksoftirqd
);
64 if (tsk
&& tsk
->state
!= TASK_RUNNING
)
69 * This one is for softirq.c-internal use,
70 * where hardirqs are disabled legitimately:
72 #ifdef CONFIG_TRACE_IRQFLAGS
73 static void __local_bh_disable(unsigned long ip
)
77 WARN_ON_ONCE(in_irq());
79 raw_local_irq_save(flags
);
80 add_preempt_count(SOFTIRQ_OFFSET
);
82 * Were softirqs turned off above:
84 if (softirq_count() == SOFTIRQ_OFFSET
)
85 trace_softirqs_off(ip
);
86 raw_local_irq_restore(flags
);
88 #else /* !CONFIG_TRACE_IRQFLAGS */
89 static inline void __local_bh_disable(unsigned long ip
)
91 add_preempt_count(SOFTIRQ_OFFSET
);
94 #endif /* CONFIG_TRACE_IRQFLAGS */
96 void local_bh_disable(void)
98 __local_bh_disable((unsigned long)__builtin_return_address(0));
101 EXPORT_SYMBOL(local_bh_disable
);
103 void __local_bh_enable(void)
105 WARN_ON_ONCE(in_irq());
108 * softirqs should never be enabled by __local_bh_enable(),
109 * it always nests inside local_bh_enable() sections:
111 WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET
);
113 sub_preempt_count(SOFTIRQ_OFFSET
);
115 EXPORT_SYMBOL_GPL(__local_bh_enable
);
118 * Special-case - softirqs can safely be enabled in
119 * cond_resched_softirq(), or by __do_softirq(),
120 * without processing still-pending softirqs:
122 void _local_bh_enable(void)
124 WARN_ON_ONCE(in_irq());
125 WARN_ON_ONCE(!irqs_disabled());
127 if (softirq_count() == SOFTIRQ_OFFSET
)
128 trace_softirqs_on((unsigned long)__builtin_return_address(0));
129 sub_preempt_count(SOFTIRQ_OFFSET
);
132 EXPORT_SYMBOL(_local_bh_enable
);
134 void local_bh_enable(void)
136 #ifdef CONFIG_TRACE_IRQFLAGS
139 WARN_ON_ONCE(in_irq());
141 WARN_ON_ONCE(irqs_disabled());
143 #ifdef CONFIG_TRACE_IRQFLAGS
144 local_irq_save(flags
);
147 * Are softirqs going to be turned on now:
149 if (softirq_count() == SOFTIRQ_OFFSET
)
150 trace_softirqs_on((unsigned long)__builtin_return_address(0));
152 * Keep preemption disabled until we are done with
153 * softirq processing:
155 sub_preempt_count(SOFTIRQ_OFFSET
- 1);
157 if (unlikely(!in_interrupt() && local_softirq_pending()))
161 #ifdef CONFIG_TRACE_IRQFLAGS
162 local_irq_restore(flags
);
164 preempt_check_resched();
166 EXPORT_SYMBOL(local_bh_enable
);
168 void local_bh_enable_ip(unsigned long ip
)
170 #ifdef CONFIG_TRACE_IRQFLAGS
173 WARN_ON_ONCE(in_irq());
175 local_irq_save(flags
);
178 * Are softirqs going to be turned on now:
180 if (softirq_count() == SOFTIRQ_OFFSET
)
181 trace_softirqs_on(ip
);
183 * Keep preemption disabled until we are done with
184 * softirq processing:
186 sub_preempt_count(SOFTIRQ_OFFSET
- 1);
188 if (unlikely(!in_interrupt() && local_softirq_pending()))
192 #ifdef CONFIG_TRACE_IRQFLAGS
193 local_irq_restore(flags
);
195 preempt_check_resched();
197 EXPORT_SYMBOL(local_bh_enable_ip
);
200 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
201 * and we fall back to softirqd after that.
203 * This number has been established via experimentation.
204 * The two things to balance is latency against fairness -
205 * we want to handle softirqs as soon as possible, but they
206 * should not be able to lock up the box.
208 #define MAX_SOFTIRQ_RESTART 10
210 asmlinkage
void __do_softirq(void)
212 struct softirq_action
*h
;
214 int max_restart
= MAX_SOFTIRQ_RESTART
;
217 pending
= local_softirq_pending();
218 account_system_vtime(current
);
220 __local_bh_disable((unsigned long)__builtin_return_address(0));
221 trace_softirq_enter();
223 cpu
= smp_processor_id();
225 /* Reset the pending bitmask before enabling irqs */
226 set_softirq_pending(0);
235 rcu_bh_qsctr_inc(cpu
);
243 pending
= local_softirq_pending();
244 if (pending
&& --max_restart
)
250 trace_softirq_exit();
252 account_system_vtime(current
);
256 #ifndef __ARCH_HAS_DO_SOFTIRQ
258 asmlinkage
void do_softirq(void)
266 local_irq_save(flags
);
268 pending
= local_softirq_pending();
273 local_irq_restore(flags
);
279 * Enter an interrupt context.
284 int cpu
= smp_processor_id();
285 if (idle_cpu(cpu
) && !in_interrupt())
286 tick_nohz_stop_idle(cpu
);
291 tick_nohz_update_jiffies();
295 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
296 # define invoke_softirq() __do_softirq()
298 # define invoke_softirq() do_softirq()
302 * Exit an interrupt context. Process softirqs if needed and possible:
306 account_system_vtime(current
);
307 trace_hardirq_exit();
308 sub_preempt_count(IRQ_EXIT_OFFSET
);
309 if (!in_interrupt() && local_softirq_pending())
313 /* Make sure that timer wheel updates are propagated */
314 if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched())
315 tick_nohz_stop_sched_tick();
317 preempt_enable_no_resched();
321 * This function must run with irqs disabled!
323 inline void raise_softirq_irqoff(unsigned int nr
)
325 __raise_softirq_irqoff(nr
);
328 * If we're in an interrupt or softirq, we're done
329 * (this also catches softirq-disabled code). We will
330 * actually run the softirq once we return from
331 * the irq or softirq.
333 * Otherwise we wake up ksoftirqd to make sure we
334 * schedule the softirq soon.
340 void raise_softirq(unsigned int nr
)
344 local_irq_save(flags
);
345 raise_softirq_irqoff(nr
);
346 local_irq_restore(flags
);
349 void open_softirq(int nr
, void (*action
)(struct softirq_action
*), void *data
)
351 softirq_vec
[nr
].data
= data
;
352 softirq_vec
[nr
].action
= action
;
358 struct tasklet_struct
*list
;
361 /* Some compilers disobey section attribute on statics when not
363 static DEFINE_PER_CPU(struct tasklet_head
, tasklet_vec
) = { NULL
};
364 static DEFINE_PER_CPU(struct tasklet_head
, tasklet_hi_vec
) = { NULL
};
366 void __tasklet_schedule(struct tasklet_struct
*t
)
370 local_irq_save(flags
);
371 t
->next
= __get_cpu_var(tasklet_vec
).list
;
372 __get_cpu_var(tasklet_vec
).list
= t
;
373 raise_softirq_irqoff(TASKLET_SOFTIRQ
);
374 local_irq_restore(flags
);
377 EXPORT_SYMBOL(__tasklet_schedule
);
379 void __tasklet_hi_schedule(struct tasklet_struct
*t
)
383 local_irq_save(flags
);
384 t
->next
= __get_cpu_var(tasklet_hi_vec
).list
;
385 __get_cpu_var(tasklet_hi_vec
).list
= t
;
386 raise_softirq_irqoff(HI_SOFTIRQ
);
387 local_irq_restore(flags
);
390 EXPORT_SYMBOL(__tasklet_hi_schedule
);
392 static void tasklet_action(struct softirq_action
*a
)
394 struct tasklet_struct
*list
;
397 list
= __get_cpu_var(tasklet_vec
).list
;
398 __get_cpu_var(tasklet_vec
).list
= NULL
;
402 struct tasklet_struct
*t
= list
;
406 if (tasklet_trylock(t
)) {
407 if (!atomic_read(&t
->count
)) {
408 if (!test_and_clear_bit(TASKLET_STATE_SCHED
, &t
->state
))
418 t
->next
= __get_cpu_var(tasklet_vec
).list
;
419 __get_cpu_var(tasklet_vec
).list
= t
;
420 __raise_softirq_irqoff(TASKLET_SOFTIRQ
);
425 static void tasklet_hi_action(struct softirq_action
*a
)
427 struct tasklet_struct
*list
;
430 list
= __get_cpu_var(tasklet_hi_vec
).list
;
431 __get_cpu_var(tasklet_hi_vec
).list
= NULL
;
435 struct tasklet_struct
*t
= list
;
439 if (tasklet_trylock(t
)) {
440 if (!atomic_read(&t
->count
)) {
441 if (!test_and_clear_bit(TASKLET_STATE_SCHED
, &t
->state
))
451 t
->next
= __get_cpu_var(tasklet_hi_vec
).list
;
452 __get_cpu_var(tasklet_hi_vec
).list
= t
;
453 __raise_softirq_irqoff(HI_SOFTIRQ
);
459 void tasklet_init(struct tasklet_struct
*t
,
460 void (*func
)(unsigned long), unsigned long data
)
464 atomic_set(&t
->count
, 0);
469 EXPORT_SYMBOL(tasklet_init
);
471 void tasklet_kill(struct tasklet_struct
*t
)
474 printk("Attempt to kill tasklet from interrupt\n");
476 while (test_and_set_bit(TASKLET_STATE_SCHED
, &t
->state
)) {
479 while (test_bit(TASKLET_STATE_SCHED
, &t
->state
));
481 tasklet_unlock_wait(t
);
482 clear_bit(TASKLET_STATE_SCHED
, &t
->state
);
485 EXPORT_SYMBOL(tasklet_kill
);
487 void __init
softirq_init(void)
489 open_softirq(TASKLET_SOFTIRQ
, tasklet_action
, NULL
);
490 open_softirq(HI_SOFTIRQ
, tasklet_hi_action
, NULL
);
493 static int ksoftirqd(void * __bind_cpu
)
495 set_current_state(TASK_INTERRUPTIBLE
);
497 while (!kthread_should_stop()) {
499 if (!local_softirq_pending()) {
500 preempt_enable_no_resched();
505 __set_current_state(TASK_RUNNING
);
507 while (local_softirq_pending()) {
508 /* Preempt disable stops cpu going offline.
509 If already offline, we'll be on wrong CPU:
511 if (cpu_is_offline((long)__bind_cpu
))
514 preempt_enable_no_resched();
519 set_current_state(TASK_INTERRUPTIBLE
);
521 __set_current_state(TASK_RUNNING
);
526 /* Wait for kthread_stop */
527 set_current_state(TASK_INTERRUPTIBLE
);
528 while (!kthread_should_stop()) {
530 set_current_state(TASK_INTERRUPTIBLE
);
532 __set_current_state(TASK_RUNNING
);
536 #ifdef CONFIG_HOTPLUG_CPU
538 * tasklet_kill_immediate is called to remove a tasklet which can already be
539 * scheduled for execution on @cpu.
541 * Unlike tasklet_kill, this function removes the tasklet
542 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
544 * When this function is called, @cpu must be in the CPU_DEAD state.
546 void tasklet_kill_immediate(struct tasklet_struct
*t
, unsigned int cpu
)
548 struct tasklet_struct
**i
;
550 BUG_ON(cpu_online(cpu
));
551 BUG_ON(test_bit(TASKLET_STATE_RUN
, &t
->state
));
553 if (!test_bit(TASKLET_STATE_SCHED
, &t
->state
))
556 /* CPU is dead, so no lock needed. */
557 for (i
= &per_cpu(tasklet_vec
, cpu
).list
; *i
; i
= &(*i
)->next
) {
566 static void takeover_tasklets(unsigned int cpu
)
568 struct tasklet_struct
**i
;
570 /* CPU is dead, so no lock needed. */
573 /* Find end, append list for that CPU. */
574 for (i
= &__get_cpu_var(tasklet_vec
).list
; *i
; i
= &(*i
)->next
);
575 *i
= per_cpu(tasklet_vec
, cpu
).list
;
576 per_cpu(tasklet_vec
, cpu
).list
= NULL
;
577 raise_softirq_irqoff(TASKLET_SOFTIRQ
);
579 for (i
= &__get_cpu_var(tasklet_hi_vec
).list
; *i
; i
= &(*i
)->next
);
580 *i
= per_cpu(tasklet_hi_vec
, cpu
).list
;
581 per_cpu(tasklet_hi_vec
, cpu
).list
= NULL
;
582 raise_softirq_irqoff(HI_SOFTIRQ
);
586 #endif /* CONFIG_HOTPLUG_CPU */
588 static int __cpuinit
cpu_callback(struct notifier_block
*nfb
,
589 unsigned long action
,
592 int hotcpu
= (unsigned long)hcpu
;
593 struct task_struct
*p
;
597 case CPU_UP_PREPARE_FROZEN
:
598 p
= kthread_create(ksoftirqd
, hcpu
, "ksoftirqd/%d", hotcpu
);
600 printk("ksoftirqd for %i failed\n", hotcpu
);
603 kthread_bind(p
, hotcpu
);
604 per_cpu(ksoftirqd
, hotcpu
) = p
;
607 case CPU_ONLINE_FROZEN
:
608 wake_up_process(per_cpu(ksoftirqd
, hotcpu
));
610 #ifdef CONFIG_HOTPLUG_CPU
611 case CPU_UP_CANCELED
:
612 case CPU_UP_CANCELED_FROZEN
:
613 if (!per_cpu(ksoftirqd
, hotcpu
))
615 /* Unbind so it can run. Fall thru. */
616 kthread_bind(per_cpu(ksoftirqd
, hotcpu
),
617 any_online_cpu(cpu_online_map
));
619 case CPU_DEAD_FROZEN
: {
620 struct sched_param param
= { .sched_priority
= MAX_RT_PRIO
-1 };
622 p
= per_cpu(ksoftirqd
, hotcpu
);
623 per_cpu(ksoftirqd
, hotcpu
) = NULL
;
624 sched_setscheduler(p
, SCHED_FIFO
, ¶m
);
626 takeover_tasklets(hotcpu
);
629 #endif /* CONFIG_HOTPLUG_CPU */
634 static struct notifier_block __cpuinitdata cpu_nfb
= {
635 .notifier_call
= cpu_callback
638 __init
int spawn_ksoftirqd(void)
640 void *cpu
= (void *)(long)smp_processor_id();
641 int err
= cpu_callback(&cpu_nfb
, CPU_UP_PREPARE
, cpu
);
643 BUG_ON(err
== NOTIFY_BAD
);
644 cpu_callback(&cpu_nfb
, CPU_ONLINE
, cpu
);
645 register_cpu_notifier(&cpu_nfb
);
651 * Call a function on all processors
653 int on_each_cpu(void (*func
) (void *info
), void *info
, int retry
, int wait
)
658 ret
= smp_call_function(func
, info
, retry
, wait
);
665 EXPORT_SYMBOL(on_each_cpu
);