mac80211: remove ieee80211_get_hdr_info
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / asm-ppc / spinlock.h
blobfccaf5531e579444b83d46192ef4280245a1a8ae
1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #include <asm/system.h>
6 /*
7 * Simple spin lock operations.
9 * (the type definitions are in asm/raw_spinlock_types.h)
12 #define __raw_spin_is_locked(x) ((x)->slock != 0)
13 #define __raw_spin_unlock_wait(lock) \
14 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
15 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
17 static inline void __raw_spin_lock(raw_spinlock_t *lock)
19 unsigned long tmp;
21 __asm__ __volatile__(
22 "b 1f # __raw_spin_lock\n\
23 2: lwzx %0,0,%1\n\
24 cmpwi 0,%0,0\n\
25 bne+ 2b\n\
26 1: lwarx %0,0,%1\n\
27 cmpwi 0,%0,0\n\
28 bne- 2b\n"
29 PPC405_ERR77(0,%1)
30 " stwcx. %2,0,%1\n\
31 bne- 2b\n\
32 isync"
33 : "=&r"(tmp)
34 : "r"(&lock->slock), "r"(1)
35 : "cr0", "memory");
38 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
40 __asm__ __volatile__("eieio # __raw_spin_unlock": : :"memory");
41 lock->slock = 0;
44 #define __raw_spin_trylock(l) (!test_and_set_bit(0,(volatile unsigned long *)(&(l)->slock)))
47 * Read-write spinlocks, allowing multiple readers
48 * but only one writer.
50 * NOTE! it is quite common to have readers in interrupts
51 * but no interrupt writers. For those circumstances we
52 * can "mix" irq-safe locks - any writer needs to get a
53 * irq-safe write-lock, but readers can get non-irqsafe
54 * read-locks.
57 #define __raw_read_can_lock(rw) ((rw)->lock >= 0)
58 #define __raw_write_can_lock(rw) (!(rw)->lock)
60 static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
62 signed int tmp;
64 __asm__ __volatile__(
65 "2: lwarx %0,0,%1 # read_trylock\n\
66 addic. %0,%0,1\n\
67 ble- 1f\n"
68 PPC405_ERR77(0,%1)
69 " stwcx. %0,0,%1\n\
70 bne- 2b\n\
71 isync\n\
72 1:"
73 : "=&r"(tmp)
74 : "r"(&rw->lock)
75 : "cr0", "memory");
77 return tmp > 0;
80 static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
82 signed int tmp;
84 __asm__ __volatile__(
85 "b 2f # read_lock\n\
86 1: lwzx %0,0,%1\n\
87 cmpwi 0,%0,0\n\
88 blt+ 1b\n\
89 2: lwarx %0,0,%1\n\
90 addic. %0,%0,1\n\
91 ble- 1b\n"
92 PPC405_ERR77(0,%1)
93 " stwcx. %0,0,%1\n\
94 bne- 2b\n\
95 isync"
96 : "=&r"(tmp)
97 : "r"(&rw->lock)
98 : "cr0", "memory");
101 static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
103 signed int tmp;
105 __asm__ __volatile__(
106 "eieio # read_unlock\n\
107 1: lwarx %0,0,%1\n\
108 addic %0,%0,-1\n"
109 PPC405_ERR77(0,%1)
110 " stwcx. %0,0,%1\n\
111 bne- 1b"
112 : "=&r"(tmp)
113 : "r"(&rw->lock)
114 : "cr0", "memory");
117 static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
119 signed int tmp;
121 __asm__ __volatile__(
122 "2: lwarx %0,0,%1 # write_trylock\n\
123 cmpwi 0,%0,0\n\
124 bne- 1f\n"
125 PPC405_ERR77(0,%1)
126 " stwcx. %2,0,%1\n\
127 bne- 2b\n\
128 isync\n\
130 : "=&r"(tmp)
131 : "r"(&rw->lock), "r"(-1)
132 : "cr0", "memory");
134 return tmp == 0;
137 static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
139 signed int tmp;
141 __asm__ __volatile__(
142 "b 2f # write_lock\n\
143 1: lwzx %0,0,%1\n\
144 cmpwi 0,%0,0\n\
145 bne+ 1b\n\
146 2: lwarx %0,0,%1\n\
147 cmpwi 0,%0,0\n\
148 bne- 1b\n"
149 PPC405_ERR77(0,%1)
150 " stwcx. %2,0,%1\n\
151 bne- 2b\n\
152 isync"
153 : "=&r"(tmp)
154 : "r"(&rw->lock), "r"(-1)
155 : "cr0", "memory");
158 static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
160 __asm__ __volatile__("eieio # write_unlock": : :"memory");
161 rw->lock = 0;
164 #define _raw_spin_relax(lock) cpu_relax()
165 #define _raw_read_relax(lock) cpu_relax()
166 #define _raw_write_relax(lock) cpu_relax()
168 #endif /* __ASM_SPINLOCK_H */