Import 2.2.8pre2
[davej-history.git] / kernel / softirq.c
blobd184c944e84341609bc9253c78539091e0b6e5fc
1 /*
2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * do_bottom_half() runs at normal kernel priority: all interrupts
7 * enabled. do_bottom_half() is atomic with respect to itself: a
8 * bottom_half handler need not be re-entrant.
10 * Fixed a disable_bh()/enable_bh() race (was causing a console lockup)
11 * due bh_mask_count not atomic handling. Copyright (C) 1998 Andrea Arcangeli
14 #include <linux/mm.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/interrupt.h>
17 #include <linux/smp_lock.h>
19 #include <asm/io.h>
21 /* intr_count died a painless death... -DaveM */
23 atomic_t bh_mask_count[32];
24 unsigned long bh_active = 0;
25 unsigned long bh_mask = 0;
26 void (*bh_base[32])(void);
29 * This needs to make sure that only one bottom half handler
30 * is ever active at a time. We do this without locking by
31 * doing an atomic increment on the intr_count, and checking
32 * (nonatomically) against 1. Only if it's 1 do we schedule
33 * the bottom half.
35 * Note that the non-atomicity of the test (as opposed to the
36 * actual update) means that the test may fail, and _nobody_
37 * runs the handlers if there is a race that makes multiple
38 * CPU's get here at the same time. That's ok, we'll run them
39 * next time around.
41 static inline void run_bottom_halves(void)
43 unsigned long active;
44 void (**bh)(void);
46 active = get_active_bhs();
47 clear_active_bhs(active);
48 bh = bh_base;
49 do {
50 if (active & 1)
51 (*bh)();
52 bh++;
53 active >>= 1;
54 } while (active);
57 asmlinkage void do_bottom_half(void)
59 int cpu = smp_processor_id();
61 if (softirq_trylock(cpu)) {
62 if (hardirq_trylock(cpu)) {
63 __sti();
64 run_bottom_halves();
65 __cli();
66 hardirq_endlock(cpu);
68 softirq_endlock(cpu);