1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #include <asm/atomic.h>
5 #include <asm/rwlock.h>
7 #include <asm/processor.h>
8 #include <linux/compiler.h>
10 #ifdef CONFIG_PARAVIRT
11 #include <asm/paravirt.h>
13 #define CLI_STRING "cli"
14 #define STI_STRING "sti"
15 #define CLI_STI_CLOBBERS
16 #define CLI_STI_INPUT_ARGS
17 #endif /* CONFIG_PARAVIRT */
20 * Your basic SMP spinlocks, allowing only a single CPU anywhere
22 * Simple spin lock operations. There are two variants, one clears IRQ's
23 * on the local processor, one does not.
25 * We make no fairness assumptions. They have a cost.
27 * (the type definitions are in asm/spinlock_types.h)
30 static inline int __raw_spin_is_locked(raw_spinlock_t
*x
)
32 return *(volatile signed char *)(&(x
)->slock
) <= 0;
35 static inline void __raw_spin_lock(raw_spinlock_t
*lock
)
38 LOCK_PREFIX
" ; decb %0\n\t"
46 : "+m" (lock
->slock
) : : "memory");
50 * It is easier for the lock validator if interrupts are not re-enabled
51 * in the middle of a lock-acquire. This is a performance feature anyway
54 * NOTE: there's an irqs-on section here, which normally would have to be
55 * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant.
57 #ifndef CONFIG_PROVE_LOCKING
58 static inline void __raw_spin_lock_flags(raw_spinlock_t
*lock
, unsigned long flags
)
62 LOCK_PREFIX
" ; decb %[slock]\n\t"
65 "testl $0x200, %[flags]\n\t"
70 "cmpb $0, %[slock]\n\t"
76 "cmpb $0, %[slock]\n\t"
80 : [slock
] "+m" (lock
->slock
)
83 : "memory" CLI_STI_CLOBBERS
);
87 static inline int __raw_spin_trylock(raw_spinlock_t
*lock
)
92 :"=q" (oldval
), "+m" (lock
->slock
)
98 * __raw_spin_unlock based on writing $1 to the low byte.
99 * This method works. Despite all the confusion.
100 * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there)
101 * (PPro errata 66, 92)
104 #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
106 static inline void __raw_spin_unlock(raw_spinlock_t
*lock
)
108 asm volatile("movb $1,%0" : "+m" (lock
->slock
) :: "memory");
113 static inline void __raw_spin_unlock(raw_spinlock_t
*lock
)
117 asm volatile("xchgb %b0, %1"
118 : "=q" (oldval
), "+m" (lock
->slock
)
119 : "0" (oldval
) : "memory");
124 static inline void __raw_spin_unlock_wait(raw_spinlock_t
*lock
)
126 while (__raw_spin_is_locked(lock
))
131 * Read-write spinlocks, allowing multiple readers
132 * but only one writer.
134 * NOTE! it is quite common to have readers in interrupts
135 * but no interrupt writers. For those circumstances we
136 * can "mix" irq-safe locks - any writer needs to get a
137 * irq-safe write-lock, but readers can get non-irqsafe
140 * On x86, we implement read-write locks as a 32-bit counter
141 * with the high bit (sign) being the "contended" bit.
143 * The inline assembly is non-obvious. Think about it.
145 * Changed to use the same technique as rw semaphores. See
146 * semaphore.h for details. -ben
148 * the helpers are in arch/i386/kernel/semaphore.c
152 * read_can_lock - would read_trylock() succeed?
153 * @lock: the rwlock in question.
155 static inline int __raw_read_can_lock(raw_rwlock_t
*x
)
157 return (int)(x
)->lock
> 0;
161 * write_can_lock - would write_trylock() succeed?
162 * @lock: the rwlock in question.
164 static inline int __raw_write_can_lock(raw_rwlock_t
*x
)
166 return (x
)->lock
== RW_LOCK_BIAS
;
169 static inline void __raw_read_lock(raw_rwlock_t
*rw
)
171 asm volatile(LOCK_PREFIX
" subl $1,(%0)\n\t"
173 "call __read_lock_failed\n\t"
175 ::"a" (rw
) : "memory");
178 static inline void __raw_write_lock(raw_rwlock_t
*rw
)
180 asm volatile(LOCK_PREFIX
" subl $" RW_LOCK_BIAS_STR
",(%0)\n\t"
182 "call __write_lock_failed\n\t"
184 ::"a" (rw
) : "memory");
187 static inline int __raw_read_trylock(raw_rwlock_t
*lock
)
189 atomic_t
*count
= (atomic_t
*)lock
;
191 if (atomic_read(count
) >= 0)
197 static inline int __raw_write_trylock(raw_rwlock_t
*lock
)
199 atomic_t
*count
= (atomic_t
*)lock
;
200 if (atomic_sub_and_test(RW_LOCK_BIAS
, count
))
202 atomic_add(RW_LOCK_BIAS
, count
);
206 static inline void __raw_read_unlock(raw_rwlock_t
*rw
)
208 asm volatile(LOCK_PREFIX
"incl %0" :"+m" (rw
->lock
) : : "memory");
211 static inline void __raw_write_unlock(raw_rwlock_t
*rw
)
213 asm volatile(LOCK_PREFIX
"addl $" RW_LOCK_BIAS_STR
", %0"
214 : "+m" (rw
->lock
) : : "memory");
217 #define _raw_spin_relax(lock) cpu_relax()
218 #define _raw_read_relax(lock) cpu_relax()
219 #define _raw_write_relax(lock) cpu_relax()
221 #endif /* __ASM_SPINLOCK_H */