Import 2.4.0-test6pre10
[davej-history.git] / include / linux / brlock.h
blob2fd39cac9ef4e573d97c15c1c4a918e029361240
1 #ifndef __LINUX_BRLOCK_H
2 #define __LINUX_BRLOCK_H
4 /*
5 * 'Big Reader' read-write spinlocks.
7 * super-fast read/write locks, with write-side penalty. The point
8 * is to have a per-CPU read/write lock. Readers lock their CPU-local
9 * readlock, writers must lock all locks to get write access. These
10 * CPU-read-write locks are semantically identical to normal rwlocks.
11 * Memory usage is higher as well. (NR_CPUS*L1_CACHE_BYTES bytes)
13 * The most important feature is that these spinlocks do not cause
14 * cacheline ping-pong in the 'most readonly data' case.
16 * Copyright 2000, Ingo Molnar <mingo@redhat.com>
18 * Registry idea and naming [ crutial! :-) ] by:
20 * David S. Miller <davem@redhat.com>
22 * David has an implementation that doesnt use atomic operations in
23 * the read branch via memory ordering tricks - i guess we need to
24 * split this up into a per-arch thing? The atomicity issue is a
25 * secondary item in profiles, at least on x86 platforms.
27 * The atomic op version overhead is indeed a big deal on
28 * load-locked/store-conditional cpus (ALPHA/MIPS/PPC) and
29 * compare-and-swap cpus (Sparc64). So we control which
30 * implementation to use with a __BRLOCK_USE_ATOMICS define. -DaveM
33 /* Register bigreader lock indices here. */
34 enum brlock_indices {
35 BR_GLOBALIRQ_LOCK,
36 BR_NETPROTO_LOCK,
38 __BR_END
41 #include <linux/config.h>
43 #ifdef CONFIG_SMP
45 #include <linux/cache.h>
46 #include <linux/spinlock.h>
48 #if defined(__i386__) || defined(__ia64__)
49 #define __BRLOCK_USE_ATOMICS
50 #else
51 #undef __BRLOCK_USE_ATOMICS
52 #endif
54 #ifdef __BRLOCK_USE_ATOMICS
55 typedef rwlock_t brlock_read_lock_t;
56 #else
57 typedef unsigned int brlock_read_lock_t;
58 #endif
61 * align last allocated index to the next cacheline:
63 #define __BR_IDX_MAX \
64 (((sizeof(brlock_read_lock_t)*__BR_END + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) / sizeof(brlock_read_lock_t))
66 extern brlock_read_lock_t __brlock_array[NR_CPUS][__BR_IDX_MAX];
68 #ifndef __BRLOCK_USE_ATOMICS
69 struct br_wrlock {
70 spinlock_t lock;
71 } __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
73 extern struct br_wrlock __br_write_locks[__BR_IDX_MAX];
74 #endif
76 extern void __br_lock_usage_bug (void);
78 #ifdef __BRLOCK_USE_ATOMICS
80 static inline void br_read_lock (enum brlock_indices idx)
83 * This causes a link-time bug message if an
84 * invalid index is used:
86 if (idx >= __BR_END)
87 __br_lock_usage_bug();
89 read_lock(&__brlock_array[smp_processor_id()][idx]);
92 static inline void br_read_unlock (enum brlock_indices idx)
94 if (idx >= __BR_END)
95 __br_lock_usage_bug();
97 read_unlock(&__brlock_array[smp_processor_id()][idx]);
100 #else /* ! __BRLOCK_USE_ATOMICS */
101 static inline void br_read_lock (enum brlock_indices idx)
103 unsigned int *ctr;
104 spinlock_t *lock;
107 * This causes a link-time bug message if an
108 * invalid index is used:
110 if (idx >= __BR_END)
111 __br_lock_usage_bug();
113 ctr = &__brlock_array[smp_processor_id()][idx];
114 lock = &__br_write_locks[idx].lock;
115 again:
116 (*ctr)++;
117 rmb();
118 if (spin_is_locked(lock)) {
119 (*ctr)--;
120 rmb();
121 while (spin_is_locked(lock))
122 barrier();
123 goto again;
127 static inline void br_read_unlock (enum brlock_indices idx)
129 unsigned int *ctr;
131 if (idx >= __BR_END)
132 __br_lock_usage_bug();
134 ctr = &__brlock_array[smp_processor_id()][idx];
136 wmb();
137 (*ctr)--;
139 #endif /* __BRLOCK_USE_ATOMICS */
141 /* write path not inlined - it's rare and larger */
143 extern void FASTCALL(__br_write_lock (enum brlock_indices idx));
144 extern void FASTCALL(__br_write_unlock (enum brlock_indices idx));
146 static inline void br_write_lock (enum brlock_indices idx)
148 if (idx >= __BR_END)
149 __br_lock_usage_bug();
150 __br_write_lock(idx);
153 static inline void br_write_unlock (enum brlock_indices idx)
155 if (idx >= __BR_END)
156 __br_lock_usage_bug();
157 __br_write_unlock(idx);
160 #else
161 # define br_read_lock(idx) ((void)(idx))
162 # define br_read_unlock(idx) ((void)(idx))
163 # define br_write_lock(idx) ((void)(idx))
164 # define br_write_unlock(idx) ((void)(idx))
165 #endif
168 * Now enumerate all of the possible sw/hw IRQ protected
169 * versions of the interfaces.
171 #define br_read_lock_irqsave(idx, flags) \
172 do { local_irq_save(flags); br_read_lock(idx); } while (0)
174 #define br_read_lock_irq(idx) \
175 do { local_irq_disable(); br_read_lock(idx); } while (0)
177 #define br_read_lock_bh(idx) \
178 do { local_bh_disable(); br_read_lock(idx); } while (0)
180 #define br_write_lock_irqsave(idx, flags) \
181 do { local_irq_save(flags); br_write_lock(idx); } while (0)
183 #define br_write_lock_irq(idx) \
184 do { local_irq_disable(); br_write_lock(idx); } while (0)
186 #define br_write_lock_bh(idx) \
187 do { local_bh_disable(); br_write_lock(idx); } while (0)
189 #define br_read_unlock_irqrestore(idx, flags) \
190 do { br_read_unlock(irx); local_irq_restore(flags); } while (0)
192 #define br_read_unlock_irq(idx) \
193 do { br_read_unlock(idx); local_irq_enable(); } while (0)
195 #define br_read_unlock_bh(idx) \
196 do { br_read_unlock(idx); local_bh_enable(); } while (0)
198 #define br_write_unlock_irqrestore(idx, flags) \
199 do { br_write_unlock(irx); local_irq_restore(flags); } while (0)
201 #define br_write_unlock_irq(idx) \
202 do { br_write_unlock(idx); local_irq_enable(); } while (0)
204 #define br_write_unlock_bh(idx) \
205 do { br_write_unlock(idx); local_bh_enable(); } while (0)
207 #endif /* __LINUX_BRLOCK_H */