Import 2.3.18pre1
[davej-history.git] / include / asm-ppc / softirq.h
blob1188809e9b5112d58c7d10ffeab555a1378d4daa
1 #ifndef __ASM_SOFTIRQ_H
2 #define __ASM_SOFTIRQ_H
4 #include <asm/atomic.h>
5 #include <asm/hardirq.h>
7 #define get_active_bhs() (bh_mask & bh_active)
8 #define clear_active_bhs(x) atomic_clear_mask((x),&bh_active)
10 extern unsigned int ppc_local_bh_count[NR_CPUS];
12 extern inline void init_bh(int nr, void (*routine)(void))
14 bh_base[nr] = routine;
15 atomic_set(&bh_mask_count[nr], 0);
16 bh_mask |= 1 << nr;
19 extern inline void remove_bh(int nr)
21 bh_mask &= ~(1 << nr);
22 wmb();
23 bh_base[nr] = 0;
26 extern inline void mark_bh(int nr)
28 set_bit(nr, &bh_active);
31 #ifdef __SMP__
34 * The locking mechanism for base handlers, to prevent re-entrancy,
35 * is entirely private to an implementation, it should not be
36 * referenced at all outside of this file.
38 extern atomic_t global_bh_lock;
39 extern atomic_t global_bh_count;
41 extern void synchronize_bh(void);
43 static inline void start_bh_atomic(void)
45 atomic_inc(&global_bh_lock);
46 synchronize_bh();
49 static inline void end_bh_atomic(void)
51 atomic_dec(&global_bh_lock);
54 /* These are for the IRQs testing the lock */
55 static inline int softirq_trylock(int cpu)
57 if (ppc_local_bh_count[cpu] == 0) {
58 ppc_local_bh_count[cpu] = 1;
59 if (!test_and_set_bit(0,&global_bh_count)) {
60 mb();
61 if (atomic_read(&global_bh_lock) == 0)
62 return 1;
63 clear_bit(0,&global_bh_count);
65 ppc_local_bh_count[cpu] = 0;
66 mb();
68 return 0;
71 static inline void softirq_endlock(int cpu)
73 mb();
74 ppc_local_bh_count[cpu]--;
75 clear_bit(0,&global_bh_count);
78 #else
80 extern inline void start_bh_atomic(void)
82 ppc_local_bh_count[smp_processor_id()]++;
83 barrier();
86 extern inline void end_bh_atomic(void)
88 barrier();
89 ppc_local_bh_count[smp_processor_id()]--;
92 /* These are for the irq's testing the lock */
93 #define softirq_trylock(cpu) (ppc_local_bh_count[cpu] ? 0 : (ppc_local_bh_count[cpu]=1))
94 #define softirq_endlock(cpu) (ppc_local_bh_count[cpu] = 0)
95 #define synchronize_bh() barrier()
97 #endif /* SMP */
99 #define local_bh_disable() (ppc_local_bh_count[smp_processor_id()]++)
100 #define local_bh_enable() (ppc_local_bh_count[smp_processor_id()]--)
103 * These use a mask count to correctly handle
104 * nested disable/enable calls
106 extern inline void disable_bh(int nr)
108 bh_mask &= ~(1 << nr);
109 atomic_inc(&bh_mask_count[nr]);
110 synchronize_bh();
113 extern inline void enable_bh(int nr)
115 if (atomic_dec_and_test(&bh_mask_count[nr]))
116 bh_mask |= 1 << nr;
119 #endif /* __ASM_SOFTIRQ_H */