Make HZ_TO_STD macro name lowercase.
[linux-2.6/linux-mips.git] / include / asm-i386 / spinlock.h
blob5d5b67ab2db038763f6dc6f4942bab2d1bd07718
1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #include <asm/atomic.h>
5 #include <asm/rwlock.h>
6 #include <asm/page.h>
8 extern int printk(const char * fmt, ...)
9 __attribute__ ((format (printf, 1, 2)));
11 /* It seems that people are forgetting to
12 * initialize their spinlocks properly, tsk tsk.
13 * Remember to turn this off in 2.4. -ben
15 #define SPINLOCK_DEBUG 0
18 * Your basic SMP spinlocks, allowing only a single CPU anywhere
21 typedef struct {
22 volatile unsigned int lock;
23 #if SPINLOCK_DEBUG
24 unsigned magic;
25 #endif
26 } spinlock_t;
28 #define SPINLOCK_MAGIC 0xdead4ead
30 #if SPINLOCK_DEBUG
31 #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC
32 #else
33 #define SPINLOCK_MAGIC_INIT /* */
34 #endif
36 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
38 #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
41 * Simple spin lock operations. There are two variants, one clears IRQ's
42 * on the local processor, one does not.
44 * We make no fairness assumptions. They have a cost.
47 #define spin_is_locked(x) (*(volatile char *)(&(x)->lock) <= 0)
48 #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
50 #define spin_lock_string \
51 "\n1:\t" \
52 "lock ; decb %0\n\t" \
53 "js 2f\n" \
54 ".section .text.lock,\"ax\"\n" \
55 "2:\t" \
56 "cmpb $0,%0\n\t" \
57 "rep;nop\n\t" \
58 "jle 2b\n\t" \
59 "jmp 1b\n" \
60 ".previous"
63 * This works. Despite all the confusion.
65 #define spin_unlock_string \
66 "movb $1,%0"
68 static inline int spin_trylock(spinlock_t *lock)
70 char oldval;
71 __asm__ __volatile__(
72 "xchgb %b0,%1"
73 :"=q" (oldval), "=m" (__dummy_lock(lock))
74 :"0" (0));
75 return oldval > 0;
78 extern inline void spin_lock(spinlock_t *lock)
80 #if SPINLOCK_DEBUG
81 __label__ here;
82 here:
83 if (lock->magic != SPINLOCK_MAGIC) {
84 printk("eip: %p\n", &&here);
85 BUG();
87 #endif
88 __asm__ __volatile__(
89 spin_lock_string
90 :"=m" (__dummy_lock(lock)));
93 extern inline void spin_unlock(spinlock_t *lock)
95 #if SPINLOCK_DEBUG
96 if (lock->magic != SPINLOCK_MAGIC)
97 BUG();
98 if (!spin_is_locked(lock))
99 BUG();
100 #endif
101 __asm__ __volatile__(
102 spin_unlock_string
103 :"=m" (__dummy_lock(lock)));
107 * Read-write spinlocks, allowing multiple readers
108 * but only one writer.
110 * NOTE! it is quite common to have readers in interrupts
111 * but no interrupt writers. For those circumstances we
112 * can "mix" irq-safe locks - any writer needs to get a
113 * irq-safe write-lock, but readers can get non-irqsafe
114 * read-locks.
116 typedef struct {
117 volatile unsigned int lock;
118 #if SPINLOCK_DEBUG
119 unsigned magic;
120 #endif
121 } rwlock_t;
123 #define RWLOCK_MAGIC 0xdeaf1eed
125 #if SPINLOCK_DEBUG
126 #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC
127 #else
128 #define RWLOCK_MAGIC_INIT /* */
129 #endif
131 #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
133 #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
136 * On x86, we implement read-write locks as a 32-bit counter
137 * with the high bit (sign) being the "contended" bit.
139 * The inline assembly is non-obvious. Think about it.
141 * Changed to use the same technique as rw semaphores. See
142 * semaphore.h for details. -ben
144 /* the spinlock helpers are in arch/i386/kernel/semaphore.S */
146 extern inline void read_lock(rwlock_t *rw)
148 #if SPINLOCK_DEBUG
149 if (rw->magic != RWLOCK_MAGIC)
150 BUG();
151 #endif
152 __build_read_lock(rw, "__read_lock_failed");
155 extern inline void write_lock(rwlock_t *rw)
157 #if SPINLOCK_DEBUG
158 if (rw->magic != RWLOCK_MAGIC)
159 BUG();
160 #endif
161 __build_write_lock(rw, "__write_lock_failed");
164 #define read_unlock(rw) asm volatile("lock ; incl %0" :"=m" (__dummy_lock(&(rw)->lock)))
165 #define write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" (__dummy_lock(&(rw)->lock)))
167 extern inline int write_trylock(rwlock_t *lock)
169 atomic_t *count = (atomic_t *)lock;
170 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
171 return 1;
172 atomic_add(RW_LOCK_BIAS, count);
173 return 0;
176 #endif /* __ASM_SPINLOCK_H */