Import 2.3.18pre1
[davej-history.git] / include / asm-i386 / softirq.h
blob6eb68524a82deee2c4dbfec27b661edb4b38e695
1 #ifndef __ASM_SOFTIRQ_H
2 #define __ASM_SOFTIRQ_H
4 #include <asm/atomic.h>
5 #include <asm/hardirq.h>
7 extern unsigned int local_bh_count[NR_CPUS];
9 #define cpu_bh_disable(cpu) do { local_bh_count[(cpu)]++; barrier(); } while (0)
10 #define cpu_bh_enable(cpu) do { barrier(); local_bh_count[(cpu)]--; } while (0)
12 #define cpu_bh_trylock(cpu) (local_bh_count[(cpu)] ? 0 : (local_bh_count[(cpu)] = 1))
13 #define cpu_bh_endlock(cpu) (local_bh_count[(cpu)] = 0)
15 #define local_bh_disable() cpu_bh_disable(smp_processor_id())
16 #define local_bh_enable() cpu_bh_enable(smp_processor_id())
18 #define get_active_bhs() (bh_mask & bh_active)
19 #define clear_active_bhs(x) atomic_clear_mask((x),&bh_active)
21 extern spinlock_t i386_bh_lock;
23 #ifdef __SMP__
26 * The locking mechanism for base handlers, to prevent re-entrancy,
27 * is entirely private to an implementation, it should not be
28 * referenced at all outside of this file.
30 extern atomic_t global_bh_lock;
31 extern atomic_t global_bh_count;
33 extern void synchronize_bh(void);
35 static inline void start_bh_atomic(void)
37 atomic_inc(&global_bh_lock);
38 synchronize_bh();
41 static inline void end_bh_atomic(void)
43 atomic_dec(&global_bh_lock);
46 /* These are for the IRQs testing the lock */
47 static inline int softirq_trylock(int cpu)
49 if (cpu_bh_trylock(cpu)) {
50 if (!test_and_set_bit(0,&global_bh_count)) {
51 if (atomic_read(&global_bh_lock) == 0)
52 return 1;
53 clear_bit(0,&global_bh_count);
55 cpu_bh_endlock(cpu);
57 return 0;
60 static inline void softirq_endlock(int cpu)
62 cpu_bh_enable(cpu);
63 clear_bit(0,&global_bh_count);
66 #else
68 extern inline void start_bh_atomic(void)
70 local_bh_disable();
71 barrier();
74 extern inline void end_bh_atomic(void)
76 barrier();
77 local_bh_enable();
80 /* These are for the irq's testing the lock */
81 #define softirq_trylock(cpu) (cpu_bh_trylock(cpu))
82 #define softirq_endlock(cpu) (cpu_bh_endlock(cpu))
83 #define synchronize_bh() barrier()
85 #endif /* SMP */
87 extern inline void init_bh(int nr, void (*routine)(void))
89 unsigned long flags;
91 bh_base[nr] = routine;
92 atomic_set(&bh_mask_count[nr], 0);
94 spin_lock_irqsave(&i386_bh_lock, flags);
95 bh_mask |= 1 << nr;
96 spin_unlock_irqrestore(&i386_bh_lock, flags);
99 extern inline void remove_bh(int nr)
101 unsigned long flags;
103 spin_lock_irqsave(&i386_bh_lock, flags);
104 bh_mask &= ~(1 << nr);
105 spin_unlock_irqrestore(&i386_bh_lock, flags);
107 synchronize_bh();
108 bh_base[nr] = NULL;
111 extern inline void mark_bh(int nr)
113 set_bit(nr, &bh_active);
117 * These use a mask count to correctly handle
118 * nested disable/enable calls
120 extern inline void disable_bh(int nr)
122 unsigned long flags;
124 spin_lock_irqsave(&i386_bh_lock, flags);
125 bh_mask &= ~(1 << nr);
126 atomic_inc(&bh_mask_count[nr]);
127 spin_unlock_irqrestore(&i386_bh_lock, flags);
128 synchronize_bh();
131 extern inline void enable_bh(int nr)
133 unsigned long flags;
135 spin_lock_irqsave(&i386_bh_lock, flags);
136 if (atomic_dec_and_test(&bh_mask_count[nr]))
137 bh_mask |= 1 << nr;
138 spin_unlock_irqrestore(&i386_bh_lock, flags);
141 #endif /* __ASM_SOFTIRQ_H */