[PATCH] don't allow users to set CONFIG_BROKEN=y
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / asm-x86_64 / spinlock.h
blobfe484a699cc3609cca9d1c592e17624bd7bd22a8
1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #include <asm/atomic.h>
5 #include <asm/rwlock.h>
6 #include <asm/page.h>
7 #include <linux/config.h>
9 /*
10 * Your basic SMP spinlocks, allowing only a single CPU anywhere
12 * Simple spin lock operations. There are two variants, one clears IRQ's
13 * on the local processor, one does not.
15 * We make no fairness assumptions. They have a cost.
17 * (the type definitions are in asm/spinlock_types.h)
20 #define __raw_spin_is_locked(x) \
21 (*(volatile signed int *)(&(x)->slock) <= 0)
23 #define __raw_spin_lock_string \
24 "\n1:\t" \
25 "lock ; decl %0\n\t" \
26 "js 2f\n" \
27 LOCK_SECTION_START("") \
28 "2:\t" \
29 "rep;nop\n\t" \
30 "cmpl $0,%0\n\t" \
31 "jle 2b\n\t" \
32 "jmp 1b\n" \
33 LOCK_SECTION_END
35 #define __raw_spin_unlock_string \
36 "movl $1,%0" \
37 :"=m" (lock->slock) : : "memory"
39 static inline void __raw_spin_lock(raw_spinlock_t *lock)
41 __asm__ __volatile__(
42 __raw_spin_lock_string
43 :"=m" (lock->slock) : : "memory");
46 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
48 static inline int __raw_spin_trylock(raw_spinlock_t *lock)
50 int oldval;
52 __asm__ __volatile__(
53 "xchgl %0,%1"
54 :"=q" (oldval), "=m" (lock->slock)
55 :"0" (0) : "memory");
57 return oldval > 0;
60 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
62 __asm__ __volatile__(
63 __raw_spin_unlock_string
67 #define __raw_spin_unlock_wait(lock) \
68 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
71 * Read-write spinlocks, allowing multiple readers
72 * but only one writer.
74 * NOTE! it is quite common to have readers in interrupts
75 * but no interrupt writers. For those circumstances we
76 * can "mix" irq-safe locks - any writer needs to get a
77 * irq-safe write-lock, but readers can get non-irqsafe
78 * read-locks.
80 * On x86, we implement read-write locks as a 32-bit counter
81 * with the high bit (sign) being the "contended" bit.
83 * The inline assembly is non-obvious. Think about it.
85 * Changed to use the same technique as rw semaphores. See
86 * semaphore.h for details. -ben
88 * the helpers are in arch/i386/kernel/semaphore.c
91 #define __raw_read_can_lock(x) ((int)(x)->lock > 0)
92 #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
94 static inline void __raw_read_lock(raw_rwlock_t *rw)
96 __build_read_lock(rw, "__read_lock_failed");
99 static inline void __raw_write_lock(raw_rwlock_t *rw)
101 __build_write_lock(rw, "__write_lock_failed");
104 static inline int __raw_read_trylock(raw_rwlock_t *lock)
106 atomic_t *count = (atomic_t *)lock;
107 atomic_dec(count);
108 if (atomic_read(count) >= 0)
109 return 1;
110 atomic_inc(count);
111 return 0;
114 static inline int __raw_write_trylock(raw_rwlock_t *lock)
116 atomic_t *count = (atomic_t *)lock;
117 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
118 return 1;
119 atomic_add(RW_LOCK_BIAS, count);
120 return 0;
123 static inline void __raw_read_unlock(raw_rwlock_t *rw)
125 asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
128 static inline void __raw_write_unlock(raw_rwlock_t *rw)
130 asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0"
131 : "=m" (rw->lock) : : "memory");
134 #endif /* __ASM_SPINLOCK_H */