2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Fixed a disable_bh()/enable_bh() race (was causing a console lockup)
7 * due bh_mask_count not atomic handling. Copyright (C) 1998 Andrea Arcangeli
9 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
12 #include <linux/config.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
20 - No shared variables, all the data are CPU local.
21 - If a softirq needs serialization, let it serialize itself
23 - Even if softirq is serialized, only local cpu is marked for
24 execution. Hence, we get something sort of weak cpu binding.
25 Though it is still not clear, will it result in better locality
27 - These softirqs are not masked by global cli() and start_bh_atomic()
28 (by clear reasons). Hence, old parts of code still using global locks
29 MUST NOT use softirqs, but insert interfacing routines acquiring
30 global locks. F.e. look at BHs implementation.
33 - NET RX softirq. It is multithreaded and does not require
34 any global serialization.
35 - NET TX softirq. It kicks software netdevice queues, hence
36 it is logically serialized per device, but this serialization
37 is invisible to common code.
38 - Tasklets: serialized wrt itself.
39 - Bottom halves: globally serialized, grr...
42 /* No separate irq_stat for s390, it is part of PSA */
43 #if !defined(CONFIG_ARCH_S390)
44 irq_cpustat_t irq_stat
[NR_CPUS
];
45 #endif /* CONFIG_ARCH_S390 */
47 static struct softirq_action softirq_vec
[32] __cacheline_aligned
;
49 asmlinkage
void do_softirq()
51 int cpu
= smp_processor_id();
60 mask
= softirq_mask(cpu
);
61 active
= softirq_active(cpu
) & mask
;
64 struct softirq_action
*h
;
67 /* Reset active bitmask before enabling irqs */
68 softirq_active(cpu
) &= ~active
;
84 active
= softirq_active(cpu
);
85 if ((active
&= mask
) != 0)
91 /* Leave with locally disabled hard irqs. It is critical to close
92 * window for infinite recursion, while we help local bh count,
93 * it protected us. Now we are defenceless.
102 static spinlock_t softirq_mask_lock
= SPIN_LOCK_UNLOCKED
;
104 void open_softirq(int nr
, void (*action
)(struct softirq_action
*), void *data
)
109 spin_lock_irqsave(&softirq_mask_lock
, flags
);
110 softirq_vec
[nr
].data
= data
;
111 softirq_vec
[nr
].action
= action
;
113 for (i
=0; i
<NR_CPUS
; i
++)
114 softirq_mask(i
) |= (1<<nr
);
115 spin_unlock_irqrestore(&softirq_mask_lock
, flags
);
121 struct tasklet_head tasklet_vec
[NR_CPUS
] __cacheline_aligned
;
123 static void tasklet_action(struct softirq_action
*a
)
125 int cpu
= smp_processor_id();
126 struct tasklet_struct
*list
;
129 list
= tasklet_vec
[cpu
].list
;
130 tasklet_vec
[cpu
].list
= NULL
;
133 while (list
!= NULL
) {
134 struct tasklet_struct
*t
= list
;
138 if (tasklet_trylock(t
)) {
139 if (atomic_read(&t
->count
) == 0) {
140 clear_bit(TASKLET_STATE_SCHED
, &t
->state
);
144 * talklet_trylock() uses test_and_set_bit that imply
145 * an mb when it returns zero, thus we need the explicit
146 * mb only here: while closing the critical section.
149 smp_mb__before_clear_bit();
157 t
->next
= tasklet_vec
[cpu
].list
;
158 tasklet_vec
[cpu
].list
= t
;
159 __cpu_raise_softirq(cpu
, TASKLET_SOFTIRQ
);
166 struct tasklet_head tasklet_hi_vec
[NR_CPUS
] __cacheline_aligned
;
168 static void tasklet_hi_action(struct softirq_action
*a
)
170 int cpu
= smp_processor_id();
171 struct tasklet_struct
*list
;
174 list
= tasklet_hi_vec
[cpu
].list
;
175 tasklet_hi_vec
[cpu
].list
= NULL
;
178 while (list
!= NULL
) {
179 struct tasklet_struct
*t
= list
;
183 if (tasklet_trylock(t
)) {
184 if (atomic_read(&t
->count
) == 0) {
185 clear_bit(TASKLET_STATE_SCHED
, &t
->state
);
194 t
->next
= tasklet_hi_vec
[cpu
].list
;
195 tasklet_hi_vec
[cpu
].list
= t
;
196 __cpu_raise_softirq(cpu
, HI_SOFTIRQ
);
202 void tasklet_init(struct tasklet_struct
*t
,
203 void (*func
)(unsigned long), unsigned long data
)
208 atomic_set(&t
->count
, 0);
211 void tasklet_kill(struct tasklet_struct
*t
)
214 printk("Attempt to kill tasklet from interrupt\n");
216 while (test_and_set_bit(TASKLET_STATE_SCHED
, &t
->state
)) {
217 current
->state
= TASK_RUNNING
;
219 current
->policy
|= SCHED_YIELD
;
221 } while (test_bit(TASKLET_STATE_SCHED
, &t
->state
));
223 tasklet_unlock_wait(t
);
224 clear_bit(TASKLET_STATE_SCHED
, &t
->state
);
231 static void (*bh_base
[32])(void);
232 struct tasklet_struct bh_task_vec
[32];
234 /* BHs are serialized by spinlock global_bh_lock.
236 It is still possible to make synchronize_bh() as
237 spin_unlock_wait(&global_bh_lock). This operation is not used
238 by kernel now, so that this lock is not made private only
239 due to wait_on_irq().
241 It can be removed only after auditing all the BHs.
243 spinlock_t global_bh_lock
= SPIN_LOCK_UNLOCKED
;
245 static void bh_action(unsigned long nr
)
247 int cpu
= smp_processor_id();
249 if (!spin_trylock(&global_bh_lock
))
252 if (!hardirq_trylock(cpu
))
258 hardirq_endlock(cpu
);
259 spin_unlock(&global_bh_lock
);
263 spin_unlock(&global_bh_lock
);
268 void init_bh(int nr
, void (*routine
)(void))
270 bh_base
[nr
] = routine
;
274 void remove_bh(int nr
)
276 tasklet_kill(bh_task_vec
+nr
);
280 void __init
softirq_init()
285 tasklet_init(bh_task_vec
+i
, bh_action
, i
);
287 open_softirq(TASKLET_SOFTIRQ
, tasklet_action
, NULL
);
288 open_softirq(HI_SOFTIRQ
, tasklet_hi_action
, NULL
);