[MTD NAND] Use vmalloc for buffer when scanning for bad blocks.
[linux-2.6/mini2440.git] / include / asm-generic / local.h
blob9291c24f5819b2140de560835b86a00f46dedfbb
1 #ifndef _ASM_GENERIC_LOCAL_H
2 #define _ASM_GENERIC_LOCAL_H
4 #include <linux/config.h>
5 #include <linux/percpu.h>
6 #include <linux/hardirq.h>
7 #include <asm/atomic.h>
8 #include <asm/types.h>
11 * A signed long type for operations which are atomic for a single CPU.
12 * Usually used in combination with per-cpu variables.
14 * This is the default implementation, which uses atomic_long_t. Which is
15 * rather pointless. The whole point behind local_t is that some processors
16 * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs
17 * running on this CPU. local_t allows exploitation of such capabilities.
20 /* Implement in terms of atomics. */
22 /* Don't use typedef: don't want them to be mixed with atomic_t's. */
23 typedef struct
25 atomic_long_t a;
26 } local_t;
28 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
30 #define local_read(l) atomic_long_read(&(l)->a)
31 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
32 #define local_inc(l) atomic_long_inc(&(l)->a)
33 #define local_dec(l) atomic_long_dec(&(l)->a)
34 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
35 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
37 /* Non-atomic variants, ie. preemption disabled and won't be touched
38 * in interrupt, etc. Some archs can optimize this case well. */
39 #define __local_inc(l) local_set((l), local_read(l) + 1)
40 #define __local_dec(l) local_set((l), local_read(l) - 1)
41 #define __local_add(i,l) local_set((l), local_read(l) + (i))
42 #define __local_sub(i,l) local_set((l), local_read(l) - (i))
44 /* Use these for per-cpu local_t variables: on some archs they are
45 * much more efficient than these naive implementations. Note they take
46 * a variable (eg. mystruct.foo), not an address.
48 #define cpu_local_read(v) local_read(&__get_cpu_var(v))
49 #define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
50 #define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
51 #define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
52 #define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
53 #define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
55 /* Non-atomic increments, ie. preemption disabled and won't be touched
56 * in interrupt, etc. Some archs can optimize this case well.
58 #define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v))
59 #define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v))
60 #define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v))
61 #define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v))
63 #endif /* _ASM_GENERIC_LOCAL_H */