Import 2.1.81
[davej-history.git] / kernel / softirq.c
blob4bc5ee4b6f5533546506c4738565b377f4d29759
1 /*
2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * do_bottom_half() runs at normal kernel priority: all interrupts
7 * enabled. do_bottom_half() is atomic with respect to itself: a
8 * bottom_half handler need not be re-entrant.
9 */
11 #include <linux/ptrace.h>
12 #include <linux/errno.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/interrupt.h>
17 #include <linux/mm.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
21 #include <asm/system.h>
22 #include <asm/io.h>
23 #include <asm/irq.h>
24 #include <asm/bitops.h>
25 #include <asm/atomic.h>
27 /* intr_count died a painless death... -DaveM */
29 int bh_mask_count[32];
30 unsigned long bh_active = 0;
31 unsigned long bh_mask = 0;
32 void (*bh_base[32])(void);
35 * This needs to make sure that only one bottom half handler
36 * is ever active at a time. We do this without locking by
37 * doing an atomic increment on the intr_count, and checking
38 * (nonatomically) against 1. Only if it's 1 do we schedule
39 * the bottom half.
41 * Note that the non-atomicity of the test (as opposed to the
42 * actual update) means that the test may fail, and _nobody_
43 * runs the handlers if there is a race that makes multiple
44 * CPU's get here at the same time. That's ok, we'll run them
45 * next time around.
47 static inline void run_bottom_halves(void)
49 unsigned long active;
50 void (**bh)(void);
52 active = get_active_bhs();
53 clear_active_bhs(active);
54 bh = bh_base;
55 do {
56 if (active & 1)
57 (*bh)();
58 bh++;
59 active >>= 1;
60 } while (active);
63 asmlinkage void do_bottom_half(void)
65 int cpu = smp_processor_id();
67 if (softirq_trylock(cpu)) {
68 if (hardirq_trylock(cpu)) {
69 __sti();
70 run_bottom_halves();
71 hardirq_endlock(cpu);
73 softirq_endlock(cpu);