Linux-2.3.3 and a short hiatus..
[davej-history.git] / include / asm-alpha / softirq.h
blob41ccc29c96e51039b0f038d9b396b37af6eadd83
1 #ifndef _ALPHA_SOFTIRQ_H
2 #define _ALPHA_SOFTIRQ_H
4 #include <linux/stddef.h>
5 #include <asm/atomic.h>
6 #include <asm/hardirq.h>
8 extern unsigned int local_bh_count[NR_CPUS];
10 #define get_active_bhs() (bh_mask & bh_active)
12 static inline void clear_active_bhs(unsigned long x)
14 unsigned long temp;
15 __asm__ __volatile__(
16 "1: ldq_l %0,%1\n"
17 " bic %0,%2,%0\n"
18 " stq_c %0,%1\n"
19 " beq %0,2f\n"
20 ".section .text2,\"ax\"\n"
21 "2: br 1b\n"
22 ".previous"
23 :"=&r" (temp), "=m" (bh_active)
24 :"Ir" (x), "m" (bh_active));
27 extern inline void init_bh(int nr, void (*routine)(void))
29 bh_base[nr] = routine;
30 atomic_set(&bh_mask_count[nr], 0);
31 bh_mask |= 1 << nr;
34 extern inline void remove_bh(int nr)
36 bh_base[nr] = NULL;
37 bh_mask &= ~(1 << nr);
40 extern inline void mark_bh(int nr)
42 set_bit(nr, &bh_active);
45 #ifdef __SMP__
48 * The locking mechanism for base handlers, to prevent re-entrancy,
49 * is entirely private to an implementation, it should not be
50 * referenced at all outside of this file.
52 extern atomic_t global_bh_lock;
53 extern atomic_t global_bh_count;
55 extern void synchronize_bh(void);
57 static inline void start_bh_atomic(void)
59 atomic_inc(&global_bh_lock);
60 synchronize_bh();
63 static inline void end_bh_atomic(void)
65 atomic_dec(&global_bh_lock);
68 /* These are for the irq's testing the lock */
69 static inline int softirq_trylock(int cpu)
71 if (!test_and_set_bit(0,&global_bh_count)) {
72 if (atomic_read(&global_bh_lock) == 0) {
73 ++local_bh_count[cpu];
74 return 1;
76 clear_bit(0,&global_bh_count);
78 return 0;
81 static inline void softirq_endlock(int cpu)
83 local_bh_count[cpu]--;
84 clear_bit(0,&global_bh_count);
87 #else
89 extern inline void start_bh_atomic(void)
91 local_bh_count[smp_processor_id()]++;
92 barrier();
95 extern inline void end_bh_atomic(void)
97 barrier();
98 local_bh_count[smp_processor_id()]--;
101 /* These are for the irq's testing the lock */
102 #define softirq_trylock(cpu) \
103 (local_bh_count[cpu] ? 0 : (local_bh_count[cpu] = 1))
105 #define softirq_endlock(cpu) \
106 (local_bh_count[cpu] = 0)
108 #define synchronize_bh() do { } while (0)
110 #endif /* SMP */
113 * These use a mask count to correctly handle
114 * nested disable/enable calls
116 extern inline void disable_bh(int nr)
118 bh_mask &= ~(1 << nr);
119 atomic_inc(&bh_mask_count[nr]);
120 synchronize_bh();
123 extern inline void enable_bh(int nr)
125 if (atomic_dec_and_test(&bh_mask_count[nr]))
126 bh_mask |= 1 << nr;
129 #endif /* _ALPHA_SOFTIRQ_H */