2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9 #include <linux/kernel_stat.h>
10 #include <linux/interrupt.h>
11 #include <linux/notifier.h>
12 #include <linux/percpu.h>
13 #include <linux/init.h>
17 - No shared variables, all the data are CPU local.
18 - If a softirq needs serialization, let it serialize itself
20 - Even if softirq is serialized, only local cpu is marked for
21 execution. Hence, we get something sort of weak cpu binding.
22 Though it is still not clear, will it result in better locality
26 - NET RX softirq. It is multithreaded and does not require
27 any global serialization.
28 - NET TX softirq. It kicks software netdevice queues, hence
29 it is logically serialized per device, but this serialization
30 is invisible to common code.
31 - Tasklets: serialized wrt itself.
34 irq_cpustat_t irq_stat
[NR_CPUS
];
36 static struct softirq_action softirq_vec
[32] __cacheline_aligned_in_smp
;
39 * we cannot loop indefinitely here to avoid userspace starvation,
40 * but we also don't want to introduce a worst case 1/HZ latency
41 * to the pending events, so lets the scheduler to balance
42 * the softirq load for us.
44 static inline void wakeup_softirqd(unsigned cpu
)
46 struct task_struct
* tsk
= ksoftirqd_task(cpu
);
48 if (tsk
&& tsk
->state
!= TASK_RUNNING
)
52 asmlinkage
void do_softirq()
62 local_irq_save(flags
);
63 cpu
= smp_processor_id();
65 pending
= softirq_pending(cpu
);
68 struct softirq_action
*h
;
73 /* Reset the pending bitmask before enabling irqs */
74 softirq_pending(cpu
) = 0;
89 pending
= softirq_pending(cpu
);
100 local_irq_restore(flags
);
104 * This function must run with irqs disabled!
106 inline void cpu_raise_softirq(unsigned int cpu
, unsigned int nr
)
108 __cpu_raise_softirq(cpu
, nr
);
111 * If we're in an interrupt or softirq, we're done
112 * (this also catches softirq-disabled code). We will
113 * actually run the softirq once we return from
114 * the irq or softirq.
116 * Otherwise we wake up ksoftirqd to make sure we
117 * schedule the softirq soon.
120 wakeup_softirqd(cpu
);
123 void raise_softirq(unsigned int nr
)
127 local_irq_save(flags
);
128 cpu_raise_softirq(smp_processor_id(), nr
);
129 local_irq_restore(flags
);
132 void open_softirq(int nr
, void (*action
)(struct softirq_action
*), void *data
)
134 softirq_vec
[nr
].data
= data
;
135 softirq_vec
[nr
].action
= action
;
142 struct tasklet_struct
*list
;
145 /* Some compilers disobey section attribute on statics when not
147 static DEFINE_PER_CPU(struct tasklet_head
, tasklet_vec
) = { NULL
};
148 static DEFINE_PER_CPU(struct tasklet_head
, tasklet_hi_vec
) = { NULL
};
150 void __tasklet_schedule(struct tasklet_struct
*t
)
154 local_irq_save(flags
);
155 t
->next
= __get_cpu_var(tasklet_vec
).list
;
156 __get_cpu_var(tasklet_vec
).list
= t
;
157 cpu_raise_softirq(smp_processor_id(), TASKLET_SOFTIRQ
);
158 local_irq_restore(flags
);
161 void __tasklet_hi_schedule(struct tasklet_struct
*t
)
165 local_irq_save(flags
);
166 t
->next
= __get_cpu_var(tasklet_hi_vec
).list
;
167 __get_cpu_var(tasklet_hi_vec
).list
= t
;
168 cpu_raise_softirq(smp_processor_id(), HI_SOFTIRQ
);
169 local_irq_restore(flags
);
172 static void tasklet_action(struct softirq_action
*a
)
174 struct tasklet_struct
*list
;
177 list
= __get_cpu_var(tasklet_vec
).list
;
178 __get_cpu_var(tasklet_vec
).list
= NULL
;
182 struct tasklet_struct
*t
= list
;
186 if (tasklet_trylock(t
)) {
187 if (!atomic_read(&t
->count
)) {
188 if (!test_and_clear_bit(TASKLET_STATE_SCHED
, &t
->state
))
198 t
->next
= __get_cpu_var(tasklet_vec
).list
;
199 __get_cpu_var(tasklet_vec
).list
= t
;
200 __cpu_raise_softirq(smp_processor_id(), TASKLET_SOFTIRQ
);
205 static void tasklet_hi_action(struct softirq_action
*a
)
207 struct tasklet_struct
*list
;
210 list
= __get_cpu_var(tasklet_hi_vec
).list
;
211 __get_cpu_var(tasklet_hi_vec
).list
= NULL
;
215 struct tasklet_struct
*t
= list
;
219 if (tasklet_trylock(t
)) {
220 if (!atomic_read(&t
->count
)) {
221 if (!test_and_clear_bit(TASKLET_STATE_SCHED
, &t
->state
))
231 t
->next
= __get_cpu_var(tasklet_hi_vec
).list
;
232 __get_cpu_var(tasklet_hi_vec
).list
= t
;
233 __cpu_raise_softirq(smp_processor_id(), HI_SOFTIRQ
);
239 void tasklet_init(struct tasklet_struct
*t
,
240 void (*func
)(unsigned long), unsigned long data
)
244 atomic_set(&t
->count
, 0);
249 void tasklet_kill(struct tasklet_struct
*t
)
252 printk("Attempt to kill tasklet from interrupt\n");
254 while (test_and_set_bit(TASKLET_STATE_SCHED
, &t
->state
)) {
257 while (test_bit(TASKLET_STATE_SCHED
, &t
->state
));
259 tasklet_unlock_wait(t
);
260 clear_bit(TASKLET_STATE_SCHED
, &t
->state
);
263 void __init
softirq_init()
265 open_softirq(TASKLET_SOFTIRQ
, tasklet_action
, NULL
);
266 open_softirq(HI_SOFTIRQ
, tasklet_hi_action
, NULL
);
269 static int ksoftirqd(void * __bind_cpu
)
271 int cpu
= (int) (long) __bind_cpu
;
274 set_user_nice(current
, 19);
275 current
->flags
|= PF_IOTHREAD
;
276 sigfillset(¤t
->blocked
);
278 /* Migrate to the right CPU */
279 set_cpus_allowed(current
, 1UL << cpu
);
280 if (smp_processor_id() != cpu
)
283 sprintf(current
->comm
, "ksoftirqd_CPU%d", cpu
);
285 __set_current_state(TASK_INTERRUPTIBLE
);
288 ksoftirqd_task(cpu
) = current
;
291 if (!softirq_pending(cpu
))
294 __set_current_state(TASK_RUNNING
);
296 while (softirq_pending(cpu
)) {
301 __set_current_state(TASK_INTERRUPTIBLE
);
305 static int __devinit
cpu_callback(struct notifier_block
*nfb
,
306 unsigned long action
,
309 int hotcpu
= (unsigned long)hcpu
;
311 if (action
== CPU_ONLINE
) {
312 if (kernel_thread(ksoftirqd
, hcpu
, CLONE_KERNEL
) < 0) {
313 printk("ksoftirqd for %i failed\n", hotcpu
);
317 while (!ksoftirqd_task(hotcpu
))
324 static struct notifier_block cpu_nfb
= { &cpu_callback
, NULL
, 0 };
326 __init
int spawn_ksoftirqd(void)
328 cpu_callback(&cpu_nfb
, CPU_ONLINE
, (void *)(long)smp_processor_id());
329 register_cpu_notifier(&cpu_nfb
);