1 #ifndef ASM_X86__SPINLOCK_H
2 #define ASM_X86__SPINLOCK_H
4 #include <asm/atomic.h>
5 #include <asm/rwlock.h>
7 #include <asm/processor.h>
8 #include <linux/compiler.h>
9 #include <asm/paravirt.h>
11 * Your basic SMP spinlocks, allowing only a single CPU anywhere
13 * Simple spin lock operations. There are two variants, one clears IRQ's
14 * on the local processor, one does not.
16 * These are fair FIFO ticket locks, which are currently limited to 256
19 * (the type definitions are in asm/spinlock_types.h)
23 # define LOCK_PTR_REG "a"
24 # define REG_PTR_MODE "k"
26 # define LOCK_PTR_REG "D"
27 # define REG_PTR_MODE "q"
30 #if defined(CONFIG_X86_32) && \
31 (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
33 * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
34 * (PPro errata 66, 92)
36 # define UNLOCK_LOCK_PREFIX LOCK_PREFIX
38 # define UNLOCK_LOCK_PREFIX
42 * Ticket locks are conceptually two parts, one indicating the current head of
43 * the queue, and the other indicating the current tail. The lock is acquired
44 * by atomically noting the tail and incrementing it by one (thus adding
45 * ourself to the queue and noting our position), then waiting until the head
46 * becomes equal to the the initial value of the tail.
48 * We use an xadd covering *both* parts of the lock, to increment the tail and
49 * also load the position of the head, which takes care of memory ordering
50 * issues and should be optimal for the uncontended case. Note the tail must be
51 * in the high part, because a wide xadd increment of the low part would carry
52 * up and contaminate the high part.
54 * With fewer than 2^8 possible CPUs, we can use x86's partial registers to
55 * save some instructions and make the code more elegant. There really isn't
56 * much between them in performance though, especially as locks are out of line.
59 #define TICKET_SHIFT 8
61 static __always_inline
void __ticket_spin_lock(raw_spinlock_t
*lock
)
66 LOCK_PREFIX
"xaddw %w0, %1\n"
72 /* don't need lfence here, because loads are in-order */
75 : "+Q" (inc
), "+m" (lock
->slock
)
80 static __always_inline
int __ticket_spin_trylock(raw_spinlock_t
*lock
)
84 asm volatile("movzwl %2, %0\n\t"
86 "leal 0x100(%" REG_PTR_MODE
"0), %1\n\t"
88 LOCK_PREFIX
"cmpxchgw %w1,%2\n\t"
92 : "=&a" (tmp
), "=&q" (new), "+m" (lock
->slock
)
99 static __always_inline
void __ticket_spin_unlock(raw_spinlock_t
*lock
)
101 asm volatile(UNLOCK_LOCK_PREFIX
"incb %0"
107 #define TICKET_SHIFT 16
109 static __always_inline
void __ticket_spin_lock(raw_spinlock_t
*lock
)
111 int inc
= 0x00010000;
114 asm volatile(LOCK_PREFIX
"xaddl %0, %1\n"
122 /* don't need lfence here, because loads are in-order */
125 : "+r" (inc
), "+m" (lock
->slock
), "=&r" (tmp
)
130 static __always_inline
int __ticket_spin_trylock(raw_spinlock_t
*lock
)
135 asm volatile("movl %2,%0\n\t"
139 "leal 0x00010000(%" REG_PTR_MODE
"0), %1\n\t"
141 LOCK_PREFIX
"cmpxchgl %1,%2\n\t"
145 : "=&a" (tmp
), "=&q" (new), "+m" (lock
->slock
)
152 static __always_inline
void __ticket_spin_unlock(raw_spinlock_t
*lock
)
154 asm volatile(UNLOCK_LOCK_PREFIX
"incw %0"
161 static inline int __ticket_spin_is_locked(raw_spinlock_t
*lock
)
163 int tmp
= ACCESS_ONCE(lock
->slock
);
165 return !!(((tmp
>> TICKET_SHIFT
) ^ tmp
) & ((1 << TICKET_SHIFT
) - 1));
168 static inline int __ticket_spin_is_contended(raw_spinlock_t
*lock
)
170 int tmp
= ACCESS_ONCE(lock
->slock
);
172 return (((tmp
>> TICKET_SHIFT
) - tmp
) & ((1 << TICKET_SHIFT
) - 1)) > 1;
175 #ifdef CONFIG_PARAVIRT
177 * Define virtualization-friendly old-style lock byte lock, for use in
178 * pv_lock_ops if desired.
180 * This differs from the pre-2.6.24 spinlock by always using xchgb
181 * rather than decb to take the lock; this allows it to use a
182 * zero-initialized lock structure. It also maintains a 1-byte
183 * contention counter, so that we can implement
184 * __byte_spin_is_contended.
186 struct __byte_spinlock
{
191 static inline int __byte_spin_is_locked(raw_spinlock_t
*lock
)
193 struct __byte_spinlock
*bl
= (struct __byte_spinlock
*)lock
;
194 return bl
->lock
!= 0;
197 static inline int __byte_spin_is_contended(raw_spinlock_t
*lock
)
199 struct __byte_spinlock
*bl
= (struct __byte_spinlock
*)lock
;
200 return bl
->spinners
!= 0;
203 static inline void __byte_spin_lock(raw_spinlock_t
*lock
)
205 struct __byte_spinlock
*bl
= (struct __byte_spinlock
*)lock
;
208 asm("1: xchgb %1, %0\n"
211 " " LOCK_PREFIX
"incb %2\n"
215 " " LOCK_PREFIX
"decb %2\n"
218 : "+m" (bl
->lock
), "+q" (val
), "+m" (bl
->spinners
): : "memory");
221 static inline int __byte_spin_trylock(raw_spinlock_t
*lock
)
223 struct __byte_spinlock
*bl
= (struct __byte_spinlock
*)lock
;
227 : "+m" (bl
->lock
), "+q" (old
) : : "memory");
232 static inline void __byte_spin_unlock(raw_spinlock_t
*lock
)
234 struct __byte_spinlock
*bl
= (struct __byte_spinlock
*)lock
;
238 #else /* !CONFIG_PARAVIRT */
239 static inline int __raw_spin_is_locked(raw_spinlock_t
*lock
)
241 return __ticket_spin_is_locked(lock
);
244 static inline int __raw_spin_is_contended(raw_spinlock_t
*lock
)
246 return __ticket_spin_is_contended(lock
);
249 static __always_inline
void __raw_spin_lock(raw_spinlock_t
*lock
)
251 __ticket_spin_lock(lock
);
254 static __always_inline
int __raw_spin_trylock(raw_spinlock_t
*lock
)
256 return __ticket_spin_trylock(lock
);
259 static __always_inline
void __raw_spin_unlock(raw_spinlock_t
*lock
)
261 __ticket_spin_unlock(lock
);
264 static __always_inline
void __raw_spin_lock_flags(raw_spinlock_t
*lock
,
267 __raw_spin_lock(lock
);
270 #endif /* CONFIG_PARAVIRT */
272 static inline void __raw_spin_unlock_wait(raw_spinlock_t
*lock
)
274 while (__raw_spin_is_locked(lock
))
279 * Read-write spinlocks, allowing multiple readers
280 * but only one writer.
282 * NOTE! it is quite common to have readers in interrupts
283 * but no interrupt writers. For those circumstances we
284 * can "mix" irq-safe locks - any writer needs to get a
285 * irq-safe write-lock, but readers can get non-irqsafe
288 * On x86, we implement read-write locks as a 32-bit counter
289 * with the high bit (sign) being the "contended" bit.
293 * read_can_lock - would read_trylock() succeed?
294 * @lock: the rwlock in question.
296 static inline int __raw_read_can_lock(raw_rwlock_t
*lock
)
298 return (int)(lock
)->lock
> 0;
302 * write_can_lock - would write_trylock() succeed?
303 * @lock: the rwlock in question.
305 static inline int __raw_write_can_lock(raw_rwlock_t
*lock
)
307 return (lock
)->lock
== RW_LOCK_BIAS
;
310 static inline void __raw_read_lock(raw_rwlock_t
*rw
)
312 asm volatile(LOCK_PREFIX
" subl $1,(%0)\n\t"
314 "call __read_lock_failed\n\t"
316 ::LOCK_PTR_REG (rw
) : "memory");
319 static inline void __raw_write_lock(raw_rwlock_t
*rw
)
321 asm volatile(LOCK_PREFIX
" subl %1,(%0)\n\t"
323 "call __write_lock_failed\n\t"
325 ::LOCK_PTR_REG (rw
), "i" (RW_LOCK_BIAS
) : "memory");
328 static inline int __raw_read_trylock(raw_rwlock_t
*lock
)
330 atomic_t
*count
= (atomic_t
*)lock
;
333 if (atomic_read(count
) >= 0)
339 static inline int __raw_write_trylock(raw_rwlock_t
*lock
)
341 atomic_t
*count
= (atomic_t
*)lock
;
343 if (atomic_sub_and_test(RW_LOCK_BIAS
, count
))
345 atomic_add(RW_LOCK_BIAS
, count
);
349 static inline void __raw_read_unlock(raw_rwlock_t
*rw
)
351 asm volatile(LOCK_PREFIX
"incl %0" :"+m" (rw
->lock
) : : "memory");
354 static inline void __raw_write_unlock(raw_rwlock_t
*rw
)
356 asm volatile(LOCK_PREFIX
"addl %1, %0"
357 : "+m" (rw
->lock
) : "i" (RW_LOCK_BIAS
) : "memory");
360 #define _raw_spin_relax(lock) cpu_relax()
361 #define _raw_read_relax(lock) cpu_relax()
362 #define _raw_write_relax(lock) cpu_relax()
364 #endif /* ASM_X86__SPINLOCK_H */