RT-AC56 3.0.0.4.374.37 core
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / arch / cris / include / arch-v32 / arch / spinlock.h
blobf171a6600fbcac6424376d65aeb35424d57e2b6e
1 #ifndef __ASM_ARCH_SPINLOCK_H
2 #define __ASM_ARCH_SPINLOCK_H
4 #include <linux/spinlock_types.h>
6 #define RW_LOCK_BIAS 0x01000000
8 extern void cris_spin_unlock(void *l, int val);
9 extern void cris_spin_lock(void *l);
10 extern int cris_spin_trylock(void *l);
12 static inline int arch_spin_is_locked(arch_spinlock_t *x)
14 return *(volatile signed char *)(&(x)->slock) <= 0;
17 static inline void arch_spin_unlock(arch_spinlock_t *lock)
19 __asm__ volatile ("move.d %1,%0" \
20 : "=m" (lock->slock) \
21 : "r" (1) \
22 : "memory");
25 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
27 while (arch_spin_is_locked(lock))
28 cpu_relax();
31 static inline int arch_spin_trylock(arch_spinlock_t *lock)
33 return cris_spin_trylock((void *)&lock->slock);
36 static inline void arch_spin_lock(arch_spinlock_t *lock)
38 cris_spin_lock((void *)&lock->slock);
41 static inline void
42 arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
44 arch_spin_lock(lock);
48 * Read-write spinlocks, allowing multiple readers
49 * but only one writer.
51 * NOTE! it is quite common to have readers in interrupts
52 * but no interrupt writers. For those circumstances we
53 * can "mix" irq-safe locks - any writer needs to get a
54 * irq-safe write-lock, but readers can get non-irqsafe
55 * read-locks.
59 static inline int arch_read_can_lock(arch_rwlock_t *x)
61 return (int)(x)->lock > 0;
64 static inline int arch_write_can_lock(arch_rwlock_t *x)
66 return (x)->lock == RW_LOCK_BIAS;
69 static inline void arch_read_lock(arch_rwlock_t *rw)
71 arch_spin_lock(&rw->slock);
72 while (rw->lock == 0);
73 rw->lock--;
74 arch_spin_unlock(&rw->slock);
77 static inline void arch_write_lock(arch_rwlock_t *rw)
79 arch_spin_lock(&rw->slock);
80 while (rw->lock != RW_LOCK_BIAS);
81 rw->lock = 0;
82 arch_spin_unlock(&rw->slock);
85 static inline void arch_read_unlock(arch_rwlock_t *rw)
87 arch_spin_lock(&rw->slock);
88 rw->lock++;
89 arch_spin_unlock(&rw->slock);
92 static inline void arch_write_unlock(arch_rwlock_t *rw)
94 arch_spin_lock(&rw->slock);
95 while (rw->lock != RW_LOCK_BIAS);
96 rw->lock = RW_LOCK_BIAS;
97 arch_spin_unlock(&rw->slock);
100 static inline int arch_read_trylock(arch_rwlock_t *rw)
102 int ret = 0;
103 arch_spin_lock(&rw->slock);
104 if (rw->lock != 0) {
105 rw->lock--;
106 ret = 1;
108 arch_spin_unlock(&rw->slock);
109 return ret;
112 static inline int arch_write_trylock(arch_rwlock_t *rw)
114 int ret = 0;
115 arch_spin_lock(&rw->slock);
116 if (rw->lock == RW_LOCK_BIAS) {
117 rw->lock = 0;
118 ret = 1;
120 arch_spin_unlock(&rw->slock);
121 return 1;
124 #define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
125 #define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
127 #define arch_spin_relax(lock) cpu_relax()
128 #define arch_read_relax(lock) cpu_relax()
129 #define arch_write_relax(lock) cpu_relax()
131 #endif /* __ASM_ARCH_SPINLOCK_H */