Linux-2.6.12-rc2
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / asm-generic / local.h
blob16fc00360f7582eef14a9934c9d6fe7f3abf7f7c
1 #ifndef _ASM_GENERIC_LOCAL_H
2 #define _ASM_GENERIC_LOCAL_H
4 #include <linux/config.h>
5 #include <linux/percpu.h>
6 #include <linux/hardirq.h>
7 #include <asm/types.h>
9 /* An unsigned long type for operations which are atomic for a single
10 * CPU. Usually used in combination with per-cpu variables. */
12 #if BITS_PER_LONG == 32
13 /* Implement in terms of atomics. */
15 /* Don't use typedef: don't want them to be mixed with atomic_t's. */
16 typedef struct
18 atomic_t a;
19 } local_t;
21 #define LOCAL_INIT(i) { ATOMIC_INIT(i) }
23 #define local_read(l) ((unsigned long)atomic_read(&(l)->a))
24 #define local_set(l,i) atomic_set((&(l)->a),(i))
25 #define local_inc(l) atomic_inc(&(l)->a)
26 #define local_dec(l) atomic_dec(&(l)->a)
27 #define local_add(i,l) atomic_add((i),(&(l)->a))
28 #define local_sub(i,l) atomic_sub((i),(&(l)->a))
30 /* Non-atomic variants, ie. preemption disabled and won't be touched
31 * in interrupt, etc. Some archs can optimize this case well. */
32 #define __local_inc(l) local_set((l), local_read(l) + 1)
33 #define __local_dec(l) local_set((l), local_read(l) - 1)
34 #define __local_add(i,l) local_set((l), local_read(l) + (i))
35 #define __local_sub(i,l) local_set((l), local_read(l) - (i))
37 #else /* ... can't use atomics. */
38 /* Implement in terms of three variables.
39 Another option would be to use local_irq_save/restore. */
41 typedef struct
43 /* 0 = in hardirq, 1 = in softirq, 2 = usermode. */
44 unsigned long v[3];
45 } local_t;
47 #define _LOCAL_VAR(l) ((l)->v[!in_interrupt() + !in_irq()])
49 #define LOCAL_INIT(i) { { (i), 0, 0 } }
51 static inline unsigned long local_read(local_t *l)
53 return l->v[0] + l->v[1] + l->v[2];
56 static inline void local_set(local_t *l, unsigned long v)
58 l->v[0] = v;
59 l->v[1] = l->v[2] = 0;
62 static inline void local_inc(local_t *l)
64 preempt_disable();
65 _LOCAL_VAR(l)++;
66 preempt_enable();
69 static inline void local_dec(local_t *l)
71 preempt_disable();
72 _LOCAL_VAR(l)--;
73 preempt_enable();
76 static inline void local_add(unsigned long v, local_t *l)
78 preempt_disable();
79 _LOCAL_VAR(l) += v;
80 preempt_enable();
83 static inline void local_sub(unsigned long v, local_t *l)
85 preempt_disable();
86 _LOCAL_VAR(l) -= v;
87 preempt_enable();
90 /* Non-atomic variants, ie. preemption disabled and won't be touched
91 * in interrupt, etc. Some archs can optimize this case well. */
92 #define __local_inc(l) ((l)->v[0]++)
93 #define __local_dec(l) ((l)->v[0]--)
94 #define __local_add(i,l) ((l)->v[0] += (i))
95 #define __local_sub(i,l) ((l)->v[0] -= (i))
97 #endif /* Non-atomic implementation */
99 /* Use these for per-cpu local_t variables: on some archs they are
100 * much more efficient than these naive implementations. Note they take
101 * a variable (eg. mystruct.foo), not an address.
103 #define cpu_local_read(v) local_read(&__get_cpu_var(v))
104 #define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
105 #define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
106 #define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
107 #define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
108 #define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
110 /* Non-atomic increments, ie. preemption disabled and won't be touched
111 * in interrupt, etc. Some archs can optimize this case well.
113 #define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v))
114 #define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v))
115 #define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v))
116 #define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v))
118 #endif /* _ASM_GENERIC_LOCAL_H */