1 #ifndef __ASM_SOFTIRQ_H
2 #define __ASM_SOFTIRQ_H
4 #include <asm/atomic.h>
5 #include <asm/hardirq.h>
7 extern unsigned int local_bh_count
[NR_CPUS
];
8 #define in_bh() (local_bh_count[smp_processor_id()] != 0)
10 #define get_active_bhs() (bh_mask & bh_active)
11 #define clear_active_bhs(x) atomic_clear_mask((int)(x),&bh_active)
13 extern inline void init_bh(int nr
, void (*routine
)(void))
15 bh_base
[nr
] = routine
;
16 atomic_set(&bh_mask_count
[nr
], 0);
20 extern inline void remove_bh(int nr
)
23 bh_mask
&= ~(1 << nr
);
26 extern inline void mark_bh(int nr
)
28 set_bit(nr
, &bh_active
);
32 #error SMP not supported
35 extern inline void start_bh_atomic(void)
37 local_bh_count
[smp_processor_id()]++;
41 extern inline void end_bh_atomic(void)
44 local_bh_count
[smp_processor_id()]--;
47 /* These are for the irq's testing the lock */
48 #define softirq_trylock(cpu) (in_bh() ? 0 : (local_bh_count[smp_processor_id()]=1))
49 #define softirq_endlock(cpu) (local_bh_count[smp_processor_id()] = 0)
50 #define synchronize_bh() do { } while (0)
55 * These use a mask count to correctly handle
56 * nested disable/enable calls
58 extern inline void disable_bh(int nr
)
60 bh_mask
&= ~(1 << nr
);
61 atomic_inc(&bh_mask_count
[nr
]);
65 extern inline void enable_bh(int nr
)
67 if (atomic_dec_and_test(&bh_mask_count
[nr
]))
71 #endif /* __ASM_SOFTIRQ_H */