2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9 #include <linux/module.h>
10 #include <linux/kernel_stat.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
14 #include <linux/notifier.h>
15 #include <linux/percpu.h>
16 #include <linux/cpu.h>
17 #include <linux/kthread.h>
18 #include <linux/rcupdate.h>
22 - No shared variables, all the data are CPU local.
23 - If a softirq needs serialization, let it serialize itself
25 - Even if softirq is serialized, only local cpu is marked for
26 execution. Hence, we get something sort of weak cpu binding.
27 Though it is still not clear, will it result in better locality
31 - NET RX softirq. It is multithreaded and does not require
32 any global serialization.
33 - NET TX softirq. It kicks software netdevice queues, hence
34 it is logically serialized per device, but this serialization
35 is invisible to common code.
36 - Tasklets: serialized wrt itself.
39 #ifndef __ARCH_IRQ_STAT
40 irq_cpustat_t irq_stat
[NR_CPUS
] ____cacheline_aligned
;
41 EXPORT_SYMBOL(irq_stat
);
44 static struct softirq_action softirq_vec
[32] __cacheline_aligned_in_smp
;
46 static DEFINE_PER_CPU(struct task_struct
*, ksoftirqd
);
49 * we cannot loop indefinitely here to avoid userspace starvation,
50 * but we also don't want to introduce a worst case 1/HZ latency
51 * to the pending events, so lets the scheduler to balance
52 * the softirq load for us.
54 static inline void wakeup_softirqd(void)
56 /* Interrupts are disabled: no need to stop preemption */
57 struct task_struct
*tsk
= __get_cpu_var(ksoftirqd
);
59 if (tsk
&& tsk
->state
!= TASK_RUNNING
)
64 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
65 * and we fall back to softirqd after that.
67 * This number has been established via experimentation.
68 * The two things to balance is latency against fairness -
69 * we want to handle softirqs as soon as possible, but they
70 * should not be able to lock up the box.
72 #define MAX_SOFTIRQ_RESTART 10
74 asmlinkage
void __do_softirq(void)
76 struct softirq_action
*h
;
78 int max_restart
= MAX_SOFTIRQ_RESTART
;
81 pending
= local_softirq_pending();
84 cpu
= smp_processor_id();
86 /* Reset the pending bitmask before enabling irqs */
87 set_softirq_pending(0);
96 rcu_bh_qsctr_inc(cpu
);
104 pending
= local_softirq_pending();
105 if (pending
&& --max_restart
)
114 #ifndef __ARCH_HAS_DO_SOFTIRQ
116 asmlinkage
void do_softirq(void)
124 local_irq_save(flags
);
126 pending
= local_softirq_pending();
131 local_irq_restore(flags
);
134 EXPORT_SYMBOL(do_softirq
);
138 void local_bh_enable(void)
140 WARN_ON(irqs_disabled());
142 * Keep preemption disabled until we are done with
143 * softirq processing:
145 sub_preempt_count(SOFTIRQ_OFFSET
- 1);
147 if (unlikely(!in_interrupt() && local_softirq_pending()))
151 preempt_check_resched();
153 EXPORT_SYMBOL(local_bh_enable
);
155 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
156 # define invoke_softirq() __do_softirq()
158 # define invoke_softirq() do_softirq()
162 * Exit an interrupt context. Process softirqs if needed and possible:
166 account_system_vtime(current
);
167 sub_preempt_count(IRQ_EXIT_OFFSET
);
168 if (!in_interrupt() && local_softirq_pending())
170 preempt_enable_no_resched();
174 * This function must run with irqs disabled!
176 inline fastcall
void raise_softirq_irqoff(unsigned int nr
)
178 __raise_softirq_irqoff(nr
);
181 * If we're in an interrupt or softirq, we're done
182 * (this also catches softirq-disabled code). We will
183 * actually run the softirq once we return from
184 * the irq or softirq.
186 * Otherwise we wake up ksoftirqd to make sure we
187 * schedule the softirq soon.
193 EXPORT_SYMBOL(raise_softirq_irqoff
);
195 void fastcall
raise_softirq(unsigned int nr
)
199 local_irq_save(flags
);
200 raise_softirq_irqoff(nr
);
201 local_irq_restore(flags
);
204 void open_softirq(int nr
, void (*action
)(struct softirq_action
*), void *data
)
206 softirq_vec
[nr
].data
= data
;
207 softirq_vec
[nr
].action
= action
;
210 EXPORT_SYMBOL(open_softirq
);
215 struct tasklet_struct
*list
;
218 /* Some compilers disobey section attribute on statics when not
220 static DEFINE_PER_CPU(struct tasklet_head
, tasklet_vec
) = { NULL
};
221 static DEFINE_PER_CPU(struct tasklet_head
, tasklet_hi_vec
) = { NULL
};
223 void fastcall
__tasklet_schedule(struct tasklet_struct
*t
)
227 local_irq_save(flags
);
228 t
->next
= __get_cpu_var(tasklet_vec
).list
;
229 __get_cpu_var(tasklet_vec
).list
= t
;
230 raise_softirq_irqoff(TASKLET_SOFTIRQ
);
231 local_irq_restore(flags
);
234 EXPORT_SYMBOL(__tasklet_schedule
);
236 void fastcall
__tasklet_hi_schedule(struct tasklet_struct
*t
)
240 local_irq_save(flags
);
241 t
->next
= __get_cpu_var(tasklet_hi_vec
).list
;
242 __get_cpu_var(tasklet_hi_vec
).list
= t
;
243 raise_softirq_irqoff(HI_SOFTIRQ
);
244 local_irq_restore(flags
);
247 EXPORT_SYMBOL(__tasklet_hi_schedule
);
249 static void tasklet_action(struct softirq_action
*a
)
251 struct tasklet_struct
*list
;
254 list
= __get_cpu_var(tasklet_vec
).list
;
255 __get_cpu_var(tasklet_vec
).list
= NULL
;
259 struct tasklet_struct
*t
= list
;
263 if (tasklet_trylock(t
)) {
264 if (!atomic_read(&t
->count
)) {
265 if (!test_and_clear_bit(TASKLET_STATE_SCHED
, &t
->state
))
275 t
->next
= __get_cpu_var(tasklet_vec
).list
;
276 __get_cpu_var(tasklet_vec
).list
= t
;
277 __raise_softirq_irqoff(TASKLET_SOFTIRQ
);
282 static void tasklet_hi_action(struct softirq_action
*a
)
284 struct tasklet_struct
*list
;
287 list
= __get_cpu_var(tasklet_hi_vec
).list
;
288 __get_cpu_var(tasklet_hi_vec
).list
= NULL
;
292 struct tasklet_struct
*t
= list
;
296 if (tasklet_trylock(t
)) {
297 if (!atomic_read(&t
->count
)) {
298 if (!test_and_clear_bit(TASKLET_STATE_SCHED
, &t
->state
))
308 t
->next
= __get_cpu_var(tasklet_hi_vec
).list
;
309 __get_cpu_var(tasklet_hi_vec
).list
= t
;
310 __raise_softirq_irqoff(HI_SOFTIRQ
);
316 void tasklet_init(struct tasklet_struct
*t
,
317 void (*func
)(unsigned long), unsigned long data
)
321 atomic_set(&t
->count
, 0);
326 EXPORT_SYMBOL(tasklet_init
);
328 void tasklet_kill(struct tasklet_struct
*t
)
331 printk("Attempt to kill tasklet from interrupt\n");
333 while (test_and_set_bit(TASKLET_STATE_SCHED
, &t
->state
)) {
336 while (test_bit(TASKLET_STATE_SCHED
, &t
->state
));
338 tasklet_unlock_wait(t
);
339 clear_bit(TASKLET_STATE_SCHED
, &t
->state
);
342 EXPORT_SYMBOL(tasklet_kill
);
344 void __init
softirq_init(void)
346 open_softirq(TASKLET_SOFTIRQ
, tasklet_action
, NULL
);
347 open_softirq(HI_SOFTIRQ
, tasklet_hi_action
, NULL
);
350 static int ksoftirqd(void * __bind_cpu
)
352 set_user_nice(current
, 19);
353 current
->flags
|= PF_NOFREEZE
;
355 set_current_state(TASK_INTERRUPTIBLE
);
357 while (!kthread_should_stop()) {
359 if (!local_softirq_pending()) {
360 preempt_enable_no_resched();
365 __set_current_state(TASK_RUNNING
);
367 while (local_softirq_pending()) {
368 /* Preempt disable stops cpu going offline.
369 If already offline, we'll be on wrong CPU:
371 if (cpu_is_offline((long)__bind_cpu
))
374 preempt_enable_no_resched();
379 set_current_state(TASK_INTERRUPTIBLE
);
381 __set_current_state(TASK_RUNNING
);
386 /* Wait for kthread_stop */
387 set_current_state(TASK_INTERRUPTIBLE
);
388 while (!kthread_should_stop()) {
390 set_current_state(TASK_INTERRUPTIBLE
);
392 __set_current_state(TASK_RUNNING
);
396 #ifdef CONFIG_HOTPLUG_CPU
398 * tasklet_kill_immediate is called to remove a tasklet which can already be
399 * scheduled for execution on @cpu.
401 * Unlike tasklet_kill, this function removes the tasklet
402 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
404 * When this function is called, @cpu must be in the CPU_DEAD state.
406 void tasklet_kill_immediate(struct tasklet_struct
*t
, unsigned int cpu
)
408 struct tasklet_struct
**i
;
410 BUG_ON(cpu_online(cpu
));
411 BUG_ON(test_bit(TASKLET_STATE_RUN
, &t
->state
));
413 if (!test_bit(TASKLET_STATE_SCHED
, &t
->state
))
416 /* CPU is dead, so no lock needed. */
417 for (i
= &per_cpu(tasklet_vec
, cpu
).list
; *i
; i
= &(*i
)->next
) {
426 static void takeover_tasklets(unsigned int cpu
)
428 struct tasklet_struct
**i
;
430 /* CPU is dead, so no lock needed. */
433 /* Find end, append list for that CPU. */
434 for (i
= &__get_cpu_var(tasklet_vec
).list
; *i
; i
= &(*i
)->next
);
435 *i
= per_cpu(tasklet_vec
, cpu
).list
;
436 per_cpu(tasklet_vec
, cpu
).list
= NULL
;
437 raise_softirq_irqoff(TASKLET_SOFTIRQ
);
439 for (i
= &__get_cpu_var(tasklet_hi_vec
).list
; *i
; i
= &(*i
)->next
);
440 *i
= per_cpu(tasklet_hi_vec
, cpu
).list
;
441 per_cpu(tasklet_hi_vec
, cpu
).list
= NULL
;
442 raise_softirq_irqoff(HI_SOFTIRQ
);
446 #endif /* CONFIG_HOTPLUG_CPU */
448 static int __devinit
cpu_callback(struct notifier_block
*nfb
,
449 unsigned long action
,
452 int hotcpu
= (unsigned long)hcpu
;
453 struct task_struct
*p
;
457 BUG_ON(per_cpu(tasklet_vec
, hotcpu
).list
);
458 BUG_ON(per_cpu(tasklet_hi_vec
, hotcpu
).list
);
459 p
= kthread_create(ksoftirqd
, hcpu
, "ksoftirqd/%d", hotcpu
);
461 printk("ksoftirqd for %i failed\n", hotcpu
);
464 kthread_bind(p
, hotcpu
);
465 per_cpu(ksoftirqd
, hotcpu
) = p
;
468 wake_up_process(per_cpu(ksoftirqd
, hotcpu
));
470 #ifdef CONFIG_HOTPLUG_CPU
471 case CPU_UP_CANCELED
:
472 /* Unbind so it can run. Fall thru. */
473 kthread_bind(per_cpu(ksoftirqd
, hotcpu
),
474 any_online_cpu(cpu_online_map
));
476 p
= per_cpu(ksoftirqd
, hotcpu
);
477 per_cpu(ksoftirqd
, hotcpu
) = NULL
;
479 takeover_tasklets(hotcpu
);
481 #endif /* CONFIG_HOTPLUG_CPU */
486 static struct notifier_block __devinitdata cpu_nfb
= {
487 .notifier_call
= cpu_callback
490 __init
int spawn_ksoftirqd(void)
492 void *cpu
= (void *)(long)smp_processor_id();
493 cpu_callback(&cpu_nfb
, CPU_UP_PREPARE
, cpu
);
494 cpu_callback(&cpu_nfb
, CPU_ONLINE
, cpu
);
495 register_cpu_notifier(&cpu_nfb
);