2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9 #include <linux/module.h>
10 #include <linux/kernel_stat.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
14 #include <linux/notifier.h>
15 #include <linux/percpu.h>
16 #include <linux/cpu.h>
17 #include <linux/kthread.h>
18 #include <linux/rcupdate.h>
19 #include <linux/smp.h>
23 - No shared variables, all the data are CPU local.
24 - If a softirq needs serialization, let it serialize itself
26 - Even if softirq is serialized, only local cpu is marked for
27 execution. Hence, we get something sort of weak cpu binding.
28 Though it is still not clear, will it result in better locality
32 - NET RX softirq. It is multithreaded and does not require
33 any global serialization.
34 - NET TX softirq. It kicks software netdevice queues, hence
35 it is logically serialized per device, but this serialization
36 is invisible to common code.
37 - Tasklets: serialized wrt itself.
40 #ifndef __ARCH_IRQ_STAT
41 irq_cpustat_t irq_stat
[NR_CPUS
] ____cacheline_aligned
;
42 EXPORT_SYMBOL(irq_stat
);
45 static struct softirq_action softirq_vec
[32] __cacheline_aligned_in_smp
;
47 static DEFINE_PER_CPU(struct task_struct
*, ksoftirqd
);
50 * we cannot loop indefinitely here to avoid userspace starvation,
51 * but we also don't want to introduce a worst case 1/HZ latency
52 * to the pending events, so lets the scheduler to balance
53 * the softirq load for us.
55 static inline void wakeup_softirqd(void)
57 /* Interrupts are disabled: no need to stop preemption */
58 struct task_struct
*tsk
= __get_cpu_var(ksoftirqd
);
60 if (tsk
&& tsk
->state
!= TASK_RUNNING
)
65 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
66 * and we fall back to softirqd after that.
68 * This number has been established via experimentation.
69 * The two things to balance is latency against fairness -
70 * we want to handle softirqs as soon as possible, but they
71 * should not be able to lock up the box.
73 #define MAX_SOFTIRQ_RESTART 10
75 asmlinkage
void __do_softirq(void)
77 struct softirq_action
*h
;
79 int max_restart
= MAX_SOFTIRQ_RESTART
;
82 pending
= local_softirq_pending();
85 cpu
= smp_processor_id();
87 /* Reset the pending bitmask before enabling irqs */
88 set_softirq_pending(0);
97 rcu_bh_qsctr_inc(cpu
);
105 pending
= local_softirq_pending();
106 if (pending
&& --max_restart
)
115 #ifndef __ARCH_HAS_DO_SOFTIRQ
117 asmlinkage
void do_softirq(void)
125 local_irq_save(flags
);
127 pending
= local_softirq_pending();
132 local_irq_restore(flags
);
135 EXPORT_SYMBOL(do_softirq
);
139 void local_bh_enable(void)
141 WARN_ON(irqs_disabled());
143 * Keep preemption disabled until we are done with
144 * softirq processing:
146 sub_preempt_count(SOFTIRQ_OFFSET
- 1);
148 if (unlikely(!in_interrupt() && local_softirq_pending()))
152 preempt_check_resched();
154 EXPORT_SYMBOL(local_bh_enable
);
156 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
157 # define invoke_softirq() __do_softirq()
159 # define invoke_softirq() do_softirq()
163 * Exit an interrupt context. Process softirqs if needed and possible:
167 account_system_vtime(current
);
168 sub_preempt_count(IRQ_EXIT_OFFSET
);
169 if (!in_interrupt() && local_softirq_pending())
171 preempt_enable_no_resched();
175 * This function must run with irqs disabled!
177 inline fastcall
void raise_softirq_irqoff(unsigned int nr
)
179 __raise_softirq_irqoff(nr
);
182 * If we're in an interrupt or softirq, we're done
183 * (this also catches softirq-disabled code). We will
184 * actually run the softirq once we return from
185 * the irq or softirq.
187 * Otherwise we wake up ksoftirqd to make sure we
188 * schedule the softirq soon.
194 EXPORT_SYMBOL(raise_softirq_irqoff
);
196 void fastcall
raise_softirq(unsigned int nr
)
200 local_irq_save(flags
);
201 raise_softirq_irqoff(nr
);
202 local_irq_restore(flags
);
205 void open_softirq(int nr
, void (*action
)(struct softirq_action
*), void *data
)
207 softirq_vec
[nr
].data
= data
;
208 softirq_vec
[nr
].action
= action
;
211 EXPORT_SYMBOL(open_softirq
);
216 struct tasklet_struct
*list
;
219 /* Some compilers disobey section attribute on statics when not
221 static DEFINE_PER_CPU(struct tasklet_head
, tasklet_vec
) = { NULL
};
222 static DEFINE_PER_CPU(struct tasklet_head
, tasklet_hi_vec
) = { NULL
};
224 void fastcall
__tasklet_schedule(struct tasklet_struct
*t
)
228 local_irq_save(flags
);
229 t
->next
= __get_cpu_var(tasklet_vec
).list
;
230 __get_cpu_var(tasklet_vec
).list
= t
;
231 raise_softirq_irqoff(TASKLET_SOFTIRQ
);
232 local_irq_restore(flags
);
235 EXPORT_SYMBOL(__tasklet_schedule
);
237 void fastcall
__tasklet_hi_schedule(struct tasklet_struct
*t
)
241 local_irq_save(flags
);
242 t
->next
= __get_cpu_var(tasklet_hi_vec
).list
;
243 __get_cpu_var(tasklet_hi_vec
).list
= t
;
244 raise_softirq_irqoff(HI_SOFTIRQ
);
245 local_irq_restore(flags
);
248 EXPORT_SYMBOL(__tasklet_hi_schedule
);
250 static void tasklet_action(struct softirq_action
*a
)
252 struct tasklet_struct
*list
;
255 list
= __get_cpu_var(tasklet_vec
).list
;
256 __get_cpu_var(tasklet_vec
).list
= NULL
;
260 struct tasklet_struct
*t
= list
;
264 if (tasklet_trylock(t
)) {
265 if (!atomic_read(&t
->count
)) {
266 if (!test_and_clear_bit(TASKLET_STATE_SCHED
, &t
->state
))
276 t
->next
= __get_cpu_var(tasklet_vec
).list
;
277 __get_cpu_var(tasklet_vec
).list
= t
;
278 __raise_softirq_irqoff(TASKLET_SOFTIRQ
);
283 static void tasklet_hi_action(struct softirq_action
*a
)
285 struct tasklet_struct
*list
;
288 list
= __get_cpu_var(tasklet_hi_vec
).list
;
289 __get_cpu_var(tasklet_hi_vec
).list
= NULL
;
293 struct tasklet_struct
*t
= list
;
297 if (tasklet_trylock(t
)) {
298 if (!atomic_read(&t
->count
)) {
299 if (!test_and_clear_bit(TASKLET_STATE_SCHED
, &t
->state
))
309 t
->next
= __get_cpu_var(tasklet_hi_vec
).list
;
310 __get_cpu_var(tasklet_hi_vec
).list
= t
;
311 __raise_softirq_irqoff(HI_SOFTIRQ
);
317 void tasklet_init(struct tasklet_struct
*t
,
318 void (*func
)(unsigned long), unsigned long data
)
322 atomic_set(&t
->count
, 0);
327 EXPORT_SYMBOL(tasklet_init
);
329 void tasklet_kill(struct tasklet_struct
*t
)
332 printk("Attempt to kill tasklet from interrupt\n");
334 while (test_and_set_bit(TASKLET_STATE_SCHED
, &t
->state
)) {
337 while (test_bit(TASKLET_STATE_SCHED
, &t
->state
));
339 tasklet_unlock_wait(t
);
340 clear_bit(TASKLET_STATE_SCHED
, &t
->state
);
343 EXPORT_SYMBOL(tasklet_kill
);
345 void __init
softirq_init(void)
347 open_softirq(TASKLET_SOFTIRQ
, tasklet_action
, NULL
);
348 open_softirq(HI_SOFTIRQ
, tasklet_hi_action
, NULL
);
351 static int ksoftirqd(void * __bind_cpu
)
353 set_user_nice(current
, 19);
354 current
->flags
|= PF_NOFREEZE
;
356 set_current_state(TASK_INTERRUPTIBLE
);
358 while (!kthread_should_stop()) {
360 if (!local_softirq_pending()) {
361 preempt_enable_no_resched();
366 __set_current_state(TASK_RUNNING
);
368 while (local_softirq_pending()) {
369 /* Preempt disable stops cpu going offline.
370 If already offline, we'll be on wrong CPU:
372 if (cpu_is_offline((long)__bind_cpu
))
375 preempt_enable_no_resched();
380 set_current_state(TASK_INTERRUPTIBLE
);
382 __set_current_state(TASK_RUNNING
);
387 /* Wait for kthread_stop */
388 set_current_state(TASK_INTERRUPTIBLE
);
389 while (!kthread_should_stop()) {
391 set_current_state(TASK_INTERRUPTIBLE
);
393 __set_current_state(TASK_RUNNING
);
397 #ifdef CONFIG_HOTPLUG_CPU
399 * tasklet_kill_immediate is called to remove a tasklet which can already be
400 * scheduled for execution on @cpu.
402 * Unlike tasklet_kill, this function removes the tasklet
403 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
405 * When this function is called, @cpu must be in the CPU_DEAD state.
407 void tasklet_kill_immediate(struct tasklet_struct
*t
, unsigned int cpu
)
409 struct tasklet_struct
**i
;
411 BUG_ON(cpu_online(cpu
));
412 BUG_ON(test_bit(TASKLET_STATE_RUN
, &t
->state
));
414 if (!test_bit(TASKLET_STATE_SCHED
, &t
->state
))
417 /* CPU is dead, so no lock needed. */
418 for (i
= &per_cpu(tasklet_vec
, cpu
).list
; *i
; i
= &(*i
)->next
) {
427 static void takeover_tasklets(unsigned int cpu
)
429 struct tasklet_struct
**i
;
431 /* CPU is dead, so no lock needed. */
434 /* Find end, append list for that CPU. */
435 for (i
= &__get_cpu_var(tasklet_vec
).list
; *i
; i
= &(*i
)->next
);
436 *i
= per_cpu(tasklet_vec
, cpu
).list
;
437 per_cpu(tasklet_vec
, cpu
).list
= NULL
;
438 raise_softirq_irqoff(TASKLET_SOFTIRQ
);
440 for (i
= &__get_cpu_var(tasklet_hi_vec
).list
; *i
; i
= &(*i
)->next
);
441 *i
= per_cpu(tasklet_hi_vec
, cpu
).list
;
442 per_cpu(tasklet_hi_vec
, cpu
).list
= NULL
;
443 raise_softirq_irqoff(HI_SOFTIRQ
);
447 #endif /* CONFIG_HOTPLUG_CPU */
449 static int __devinit
cpu_callback(struct notifier_block
*nfb
,
450 unsigned long action
,
453 int hotcpu
= (unsigned long)hcpu
;
454 struct task_struct
*p
;
458 BUG_ON(per_cpu(tasklet_vec
, hotcpu
).list
);
459 BUG_ON(per_cpu(tasklet_hi_vec
, hotcpu
).list
);
460 p
= kthread_create(ksoftirqd
, hcpu
, "ksoftirqd/%d", hotcpu
);
462 printk("ksoftirqd for %i failed\n", hotcpu
);
465 kthread_bind(p
, hotcpu
);
466 per_cpu(ksoftirqd
, hotcpu
) = p
;
469 wake_up_process(per_cpu(ksoftirqd
, hotcpu
));
471 #ifdef CONFIG_HOTPLUG_CPU
472 case CPU_UP_CANCELED
:
473 if (!per_cpu(ksoftirqd
, hotcpu
))
475 /* Unbind so it can run. Fall thru. */
476 kthread_bind(per_cpu(ksoftirqd
, hotcpu
),
477 any_online_cpu(cpu_online_map
));
479 p
= per_cpu(ksoftirqd
, hotcpu
);
480 per_cpu(ksoftirqd
, hotcpu
) = NULL
;
482 takeover_tasklets(hotcpu
);
484 #endif /* CONFIG_HOTPLUG_CPU */
489 static struct notifier_block __devinitdata cpu_nfb
= {
490 .notifier_call
= cpu_callback
493 __init
int spawn_ksoftirqd(void)
495 void *cpu
= (void *)(long)smp_processor_id();
496 cpu_callback(&cpu_nfb
, CPU_UP_PREPARE
, cpu
);
497 cpu_callback(&cpu_nfb
, CPU_ONLINE
, cpu
);
498 register_cpu_notifier(&cpu_nfb
);
504 * Call a function on all processors
506 int on_each_cpu(void (*func
) (void *info
), void *info
, int retry
, int wait
)
511 ret
= smp_call_function(func
, info
, retry
, wait
);
518 EXPORT_SYMBOL(on_each_cpu
);