libata-link: update Power Management to handle PMP links
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / asm-x86 / local_64.h
blobe87492bb0693eeb8d533db17c34067a45a5bd92a
1 #ifndef _ARCH_X8664_LOCAL_H
2 #define _ARCH_X8664_LOCAL_H
4 #include <linux/percpu.h>
5 #include <asm/atomic.h>
7 typedef struct
9 atomic_long_t a;
10 } local_t;
12 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
14 #define local_read(l) atomic_long_read(&(l)->a)
15 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
17 static inline void local_inc(local_t *l)
19 __asm__ __volatile__(
20 "incq %0"
21 :"=m" (l->a.counter)
22 :"m" (l->a.counter));
25 static inline void local_dec(local_t *l)
27 __asm__ __volatile__(
28 "decq %0"
29 :"=m" (l->a.counter)
30 :"m" (l->a.counter));
33 static inline void local_add(long i, local_t *l)
35 __asm__ __volatile__(
36 "addq %1,%0"
37 :"=m" (l->a.counter)
38 :"ir" (i), "m" (l->a.counter));
41 static inline void local_sub(long i, local_t *l)
43 __asm__ __volatile__(
44 "subq %1,%0"
45 :"=m" (l->a.counter)
46 :"ir" (i), "m" (l->a.counter));
49 /**
50 * local_sub_and_test - subtract value from variable and test result
51 * @i: integer value to subtract
52 * @l: pointer to type local_t
54 * Atomically subtracts @i from @l and returns
55 * true if the result is zero, or false for all
56 * other cases.
58 static __inline__ int local_sub_and_test(long i, local_t *l)
60 unsigned char c;
62 __asm__ __volatile__(
63 "subq %2,%0; sete %1"
64 :"=m" (l->a.counter), "=qm" (c)
65 :"ir" (i), "m" (l->a.counter) : "memory");
66 return c;
69 /**
70 * local_dec_and_test - decrement and test
71 * @l: pointer to type local_t
73 * Atomically decrements @l by 1 and
74 * returns true if the result is 0, or false for all other
75 * cases.
77 static __inline__ int local_dec_and_test(local_t *l)
79 unsigned char c;
81 __asm__ __volatile__(
82 "decq %0; sete %1"
83 :"=m" (l->a.counter), "=qm" (c)
84 :"m" (l->a.counter) : "memory");
85 return c != 0;
88 /**
89 * local_inc_and_test - increment and test
90 * @l: pointer to type local_t
92 * Atomically increments @l by 1
93 * and returns true if the result is zero, or false for all
94 * other cases.
96 static __inline__ int local_inc_and_test(local_t *l)
98 unsigned char c;
100 __asm__ __volatile__(
101 "incq %0; sete %1"
102 :"=m" (l->a.counter), "=qm" (c)
103 :"m" (l->a.counter) : "memory");
104 return c != 0;
108 * local_add_negative - add and test if negative
109 * @i: integer value to add
110 * @l: pointer to type local_t
112 * Atomically adds @i to @l and returns true
113 * if the result is negative, or false when
114 * result is greater than or equal to zero.
116 static __inline__ int local_add_negative(long i, local_t *l)
118 unsigned char c;
120 __asm__ __volatile__(
121 "addq %2,%0; sets %1"
122 :"=m" (l->a.counter), "=qm" (c)
123 :"ir" (i), "m" (l->a.counter) : "memory");
124 return c;
128 * local_add_return - add and return
129 * @i: integer value to add
130 * @l: pointer to type local_t
132 * Atomically adds @i to @l and returns @i + @l
134 static __inline__ long local_add_return(long i, local_t *l)
136 long __i = i;
137 __asm__ __volatile__(
138 "xaddq %0, %1;"
139 :"+r" (i), "+m" (l->a.counter)
140 : : "memory");
141 return i + __i;
144 static __inline__ long local_sub_return(long i, local_t *l)
146 return local_add_return(-i,l);
149 #define local_inc_return(l) (local_add_return(1,l))
150 #define local_dec_return(l) (local_sub_return(1,l))
152 #define local_cmpxchg(l, o, n) \
153 (cmpxchg_local(&((l)->a.counter), (o), (n)))
154 /* Always has a lock prefix */
155 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
158 * atomic_up_add_unless - add unless the number is a given value
159 * @l: pointer of type local_t
160 * @a: the amount to add to l...
161 * @u: ...unless l is equal to u.
163 * Atomically adds @a to @l, so long as it was not @u.
164 * Returns non-zero if @l was not @u, and zero otherwise.
166 #define local_add_unless(l, a, u) \
167 ({ \
168 long c, old; \
169 c = local_read(l); \
170 for (;;) { \
171 if (unlikely(c == (u))) \
172 break; \
173 old = local_cmpxchg((l), c, c + (a)); \
174 if (likely(old == c)) \
175 break; \
176 c = old; \
178 c != (u); \
180 #define local_inc_not_zero(l) local_add_unless((l), 1, 0)
182 /* On x86-64 these are better than the atomic variants on SMP kernels
183 because they dont use a lock prefix. */
184 #define __local_inc(l) local_inc(l)
185 #define __local_dec(l) local_dec(l)
186 #define __local_add(i,l) local_add((i),(l))
187 #define __local_sub(i,l) local_sub((i),(l))
189 /* Use these for per-cpu local_t variables: on some archs they are
190 * much more efficient than these naive implementations. Note they take
191 * a variable, not an address.
193 * This could be done better if we moved the per cpu data directly
194 * after GS.
197 /* Need to disable preemption for the cpu local counters otherwise we could
198 still access a variable of a previous CPU in a non atomic way. */
199 #define cpu_local_wrap_v(l) \
200 ({ local_t res__; \
201 preempt_disable(); \
202 res__ = (l); \
203 preempt_enable(); \
204 res__; })
205 #define cpu_local_wrap(l) \
206 ({ preempt_disable(); \
207 l; \
208 preempt_enable(); }) \
210 #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
211 #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
212 #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
213 #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
214 #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
215 #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
217 #define __cpu_local_inc(l) cpu_local_inc(l)
218 #define __cpu_local_dec(l) cpu_local_dec(l)
219 #define __cpu_local_add(i, l) cpu_local_add((i), (l))
220 #define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
222 #endif /* _ARCH_X8664_LOCAL_H */