Import 2.3.6
[davej-history.git] / include / asm-arm / softirq.h
blob6bad79dd4a7d452de330572884f3cce395b627dd
1 #ifndef __ASM_SOFTIRQ_H
2 #define __ASM_SOFTIRQ_H
4 #include <asm/atomic.h>
5 #include <asm/hardirq.h>
7 extern unsigned int local_bh_count[NR_CPUS];
8 #define in_bh() (local_bh_count[smp_processor_id()] != 0)
10 #define get_active_bhs() (bh_mask & bh_active)
11 #define clear_active_bhs(x) atomic_clear_mask((int)(x),&bh_active)
13 extern inline void init_bh(int nr, void (*routine)(void))
15 bh_base[nr] = routine;
16 atomic_set(&bh_mask_count[nr], 0);
17 bh_mask |= 1 << nr;
20 extern inline void remove_bh(int nr)
22 bh_base[nr] = NULL;
23 bh_mask &= ~(1 << nr);
26 extern inline void mark_bh(int nr)
28 set_bit(nr, &bh_active);
31 #ifdef __SMP__
32 #error SMP not supported
33 #else
35 extern inline void start_bh_atomic(void)
37 local_bh_count[smp_processor_id()]++;
38 barrier();
41 extern inline void end_bh_atomic(void)
43 barrier();
44 local_bh_count[smp_processor_id()]--;
47 /* These are for the irq's testing the lock */
48 #define softirq_trylock(cpu) (in_bh() ? 0 : (local_bh_count[smp_processor_id()]=1))
49 #define softirq_endlock(cpu) (local_bh_count[smp_processor_id()] = 0)
50 #define synchronize_bh() do { } while (0)
52 #endif /* SMP */
55 * These use a mask count to correctly handle
56 * nested disable/enable calls
58 extern inline void disable_bh(int nr)
60 bh_mask &= ~(1 << nr);
61 atomic_inc(&bh_mask_count[nr]);
62 synchronize_bh();
65 extern inline void enable_bh(int nr)
67 if (atomic_dec_and_test(&bh_mask_count[nr]))
68 bh_mask |= 1 << nr;
71 #endif /* __ASM_SOFTIRQ_H */