2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * do_bottom_half() runs at normal kernel priority: all interrupts
7 * enabled. do_bottom_half() is atomic with respect to itself: a
8 * bottom_half handler need not be re-entrant.
10 * Fixed a disable_bh()/enable_bh() race (was causing a console lockup)
11 * due bh_mask_count not atomic handling. Copyright (C) 1998 Andrea Arcangeli
15 #include <linux/kernel_stat.h>
16 #include <linux/interrupt.h>
17 #include <linux/smp_lock.h>
21 /* intr_count died a painless death... -DaveM */
23 atomic_t bh_mask_count
[32];
24 unsigned long bh_active
= 0;
25 unsigned long bh_mask
= 0;
26 void (*bh_base
[32])(void);
29 * This needs to make sure that only one bottom half handler
30 * is ever active at a time. We do this without locking by
31 * doing an atomic increment on the intr_count, and checking
32 * (nonatomically) against 1. Only if it's 1 do we schedule
35 * Note that the non-atomicity of the test (as opposed to the
36 * actual update) means that the test may fail, and _nobody_
37 * runs the handlers if there is a race that makes multiple
38 * CPU's get here at the same time. That's ok, we'll run them
41 static inline void run_bottom_halves(void)
46 active
= get_active_bhs();
47 clear_active_bhs(active
);
57 asmlinkage
void do_bottom_half(void)
59 int cpu
= smp_processor_id();
61 if (softirq_trylock(cpu
)) {
62 if (hardirq_trylock(cpu
)) {