Disintegrate asm/system.h for PA-RISC
[linux-2.6.git] / arch / parisc / include / asm / spinlock.h
blob804aa28ab1d689fbf47c4a76f128d6c83072cdc5
1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #include <asm/processor.h>
5 #include <asm/spinlock_types.h>
7 static inline int arch_spin_is_locked(arch_spinlock_t *x)
9 volatile unsigned int *a = __ldcw_align(x);
10 return *a == 0;
13 #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
14 #define arch_spin_unlock_wait(x) \
15 do { cpu_relax(); } while (arch_spin_is_locked(x))
17 static inline void arch_spin_lock_flags(arch_spinlock_t *x,
18 unsigned long flags)
20 volatile unsigned int *a;
22 mb();
23 a = __ldcw_align(x);
24 while (__ldcw(a) == 0)
25 while (*a == 0)
26 if (flags & PSW_SM_I) {
27 local_irq_enable();
28 cpu_relax();
29 local_irq_disable();
30 } else
31 cpu_relax();
32 mb();
35 static inline void arch_spin_unlock(arch_spinlock_t *x)
37 volatile unsigned int *a;
38 mb();
39 a = __ldcw_align(x);
40 *a = 1;
41 mb();
44 static inline int arch_spin_trylock(arch_spinlock_t *x)
46 volatile unsigned int *a;
47 int ret;
49 mb();
50 a = __ldcw_align(x);
51 ret = __ldcw(a) != 0;
52 mb();
54 return ret;
58 * Read-write spinlocks, allowing multiple readers but only one writer.
59 * Linux rwlocks are unfair to writers; they can be starved for an indefinite
60 * time by readers. With care, they can also be taken in interrupt context.
62 * In the PA-RISC implementation, we have a spinlock and a counter.
63 * Readers use the lock to serialise their access to the counter (which
64 * records how many readers currently hold the lock).
65 * Writers hold the spinlock, preventing any readers or other writers from
66 * grabbing the rwlock.
69 /* Note that we have to ensure interrupts are disabled in case we're
70 * interrupted by some other code that wants to grab the same read lock */
71 static __inline__ void arch_read_lock(arch_rwlock_t *rw)
73 unsigned long flags;
74 local_irq_save(flags);
75 arch_spin_lock_flags(&rw->lock, flags);
76 rw->counter++;
77 arch_spin_unlock(&rw->lock);
78 local_irq_restore(flags);
81 /* Note that we have to ensure interrupts are disabled in case we're
82 * interrupted by some other code that wants to grab the same read lock */
83 static __inline__ void arch_read_unlock(arch_rwlock_t *rw)
85 unsigned long flags;
86 local_irq_save(flags);
87 arch_spin_lock_flags(&rw->lock, flags);
88 rw->counter--;
89 arch_spin_unlock(&rw->lock);
90 local_irq_restore(flags);
93 /* Note that we have to ensure interrupts are disabled in case we're
94 * interrupted by some other code that wants to grab the same read lock */
95 static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
97 unsigned long flags;
98 retry:
99 local_irq_save(flags);
100 if (arch_spin_trylock(&rw->lock)) {
101 rw->counter++;
102 arch_spin_unlock(&rw->lock);
103 local_irq_restore(flags);
104 return 1;
107 local_irq_restore(flags);
108 /* If write-locked, we fail to acquire the lock */
109 if (rw->counter < 0)
110 return 0;
112 /* Wait until we have a realistic chance at the lock */
113 while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
114 cpu_relax();
116 goto retry;
119 /* Note that we have to ensure interrupts are disabled in case we're
120 * interrupted by some other code that wants to read_trylock() this lock */
121 static __inline__ void arch_write_lock(arch_rwlock_t *rw)
123 unsigned long flags;
124 retry:
125 local_irq_save(flags);
126 arch_spin_lock_flags(&rw->lock, flags);
128 if (rw->counter != 0) {
129 arch_spin_unlock(&rw->lock);
130 local_irq_restore(flags);
132 while (rw->counter != 0)
133 cpu_relax();
135 goto retry;
138 rw->counter = -1; /* mark as write-locked */
139 mb();
140 local_irq_restore(flags);
143 static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
145 rw->counter = 0;
146 arch_spin_unlock(&rw->lock);
149 /* Note that we have to ensure interrupts are disabled in case we're
150 * interrupted by some other code that wants to read_trylock() this lock */
151 static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
153 unsigned long flags;
154 int result = 0;
156 local_irq_save(flags);
157 if (arch_spin_trylock(&rw->lock)) {
158 if (rw->counter == 0) {
159 rw->counter = -1;
160 result = 1;
161 } else {
162 /* Read-locked. Oh well. */
163 arch_spin_unlock(&rw->lock);
166 local_irq_restore(flags);
168 return result;
172 * read_can_lock - would read_trylock() succeed?
173 * @lock: the rwlock in question.
175 static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
177 return rw->counter >= 0;
181 * write_can_lock - would write_trylock() succeed?
182 * @lock: the rwlock in question.
184 static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
186 return !rw->counter;
189 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
190 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
192 #define arch_spin_relax(lock) cpu_relax()
193 #define arch_read_relax(lock) cpu_relax()
194 #define arch_write_relax(lock) cpu_relax()
196 #endif /* __ASM_SPINLOCK_H */