Import 2.3.4
[davej-history.git] / include / asm-sparc64 / softirq.h
blobb8e017d79098aa18fbe123baed0e089d7afbed1e
1 /* softirq.h: 64-bit Sparc soft IRQ support.
3 * Copyright (C) 1997, 1998 David S. Miller (davem@caip.rutgers.edu)
4 */
6 #ifndef __SPARC64_SOFTIRQ_H
7 #define __SPARC64_SOFTIRQ_H
9 #include <asm/atomic.h>
10 #include <asm/hardirq.h>
11 #include <asm/system.h> /* for membar() */
13 #ifndef __SMP__
14 extern unsigned int local_bh_count;
15 #else
16 #define local_bh_count (cpu_data[smp_processor_id()].bh_count)
17 #endif
19 #define local_bh_disable() (local_bh_count++)
20 #define local_bh_enable() (local_bh_count--)
22 /* The locking mechanism for base handlers, to prevent re-entrancy,
23 * is entirely private to an implementation, it should not be
24 * referenced at all outside of this file.
27 #define get_active_bhs() (bh_mask & bh_active)
28 #define clear_active_bhs(mask) \
29 __asm__ __volatile__( \
30 "1: ldx [%1], %%g7\n" \
31 " andn %%g7, %0, %%g5\n" \
32 " casx [%1], %%g7, %%g5\n" \
33 " cmp %%g7, %%g5\n" \
34 " bne,pn %%xcc, 1b\n" \
35 " nop" \
36 : /* no outputs */ \
37 : "HIr" (mask), "r" (&bh_active) \
38 : "g5", "g7", "cc", "memory")
40 extern inline void init_bh(int nr, void (*routine)(void))
42 bh_base[nr] = routine;
43 atomic_set(&bh_mask_count[nr], 0);
44 bh_mask |= 1 << nr;
47 extern inline void remove_bh(int nr)
49 bh_mask &= ~(1 << nr);
50 membar("#StoreStore");
51 bh_base[nr] = NULL;
54 extern inline void mark_bh(int nr)
56 set_bit(nr, &bh_active);
59 #ifndef __SMP__
61 extern inline void start_bh_atomic(void)
63 local_bh_count++;
64 barrier();
67 extern inline void end_bh_atomic(void)
69 barrier();
70 local_bh_count--;
73 /* These are for the irq's testing the lock */
74 #define softirq_trylock(cpu) (local_bh_count ? 0 : (local_bh_count=1))
75 #define softirq_endlock(cpu) (local_bh_count = 0)
76 #define synchronize_bh() barrier()
78 #else /* (__SMP__) */
80 extern atomic_t global_bh_lock;
81 extern spinlock_t global_bh_count;
83 extern void synchronize_bh(void);
85 static inline void start_bh_atomic(void)
87 atomic_inc(&global_bh_lock);
88 synchronize_bh();
91 static inline void end_bh_atomic(void)
93 atomic_dec(&global_bh_lock);
96 /* These are for the IRQs testing the lock */
97 static inline int softirq_trylock(int cpu)
99 if (spin_trylock(&global_bh_count)) {
100 if (atomic_read(&global_bh_lock) == 0 &&
101 cpu_data[cpu].bh_count == 0) {
102 ++(cpu_data[cpu].bh_count);
103 return 1;
105 spin_unlock(&global_bh_count);
107 return 0;
110 static inline void softirq_endlock(int cpu)
112 (cpu_data[cpu].bh_count)--;
113 spin_unlock(&global_bh_count);
116 #endif /* (__SMP__) */
119 * These use a mask count to correctly handle
120 * nested disable/enable calls
122 extern inline void disable_bh(int nr)
124 bh_mask &= ~(1 << nr);
125 atomic_inc(&bh_mask_count[nr]);
126 synchronize_bh();
129 extern inline void enable_bh(int nr)
131 if (atomic_dec_and_test(&bh_mask_count[nr]))
132 bh_mask |= 1 << nr;
135 #endif /* !(__SPARC64_SOFTIRQ_H) */