Blackfin: pm: drop irq save/restore in standby and suspend to mem callback
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / linux / percpu_counter.h
blob46f6ba56fa9139909c04acbad98383b7ed88babb
1 #ifndef _LINUX_PERCPU_COUNTER_H
2 #define _LINUX_PERCPU_COUNTER_H
3 /*
4 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
6 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
7 */
9 #include <linux/spinlock.h>
10 #include <linux/smp.h>
11 #include <linux/list.h>
12 #include <linux/threads.h>
13 #include <linux/percpu.h>
14 #include <linux/types.h>
16 #ifdef CONFIG_SMP
18 struct percpu_counter {
19 spinlock_t lock;
20 s64 count;
21 #ifdef CONFIG_HOTPLUG_CPU
22 struct list_head list; /* All percpu_counters are on a list */
23 #endif
24 s32 __percpu *counters;
27 extern int percpu_counter_batch;
29 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
30 struct lock_class_key *key);
32 #define percpu_counter_init(fbc, value) \
33 ({ \
34 static struct lock_class_key __key; \
36 __percpu_counter_init(fbc, value, &__key); \
39 void percpu_counter_destroy(struct percpu_counter *fbc);
40 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
41 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
42 s64 __percpu_counter_sum(struct percpu_counter *fbc);
43 int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs);
45 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
47 __percpu_counter_add(fbc, amount, percpu_counter_batch);
50 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
52 s64 ret = __percpu_counter_sum(fbc);
53 return ret < 0 ? 0 : ret;
56 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
58 return __percpu_counter_sum(fbc);
61 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
63 return fbc->count;
67 * It is possible for the percpu_counter_read() to return a small negative
68 * number for some counter which should never be negative.
71 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
73 s64 ret = fbc->count;
75 barrier(); /* Prevent reloads of fbc->count */
76 if (ret >= 0)
77 return ret;
78 return 1;
81 static inline int percpu_counter_initialized(struct percpu_counter *fbc)
83 return (fbc->counters != NULL);
86 #else
88 struct percpu_counter {
89 s64 count;
92 static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
94 fbc->count = amount;
95 return 0;
98 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
102 static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
104 fbc->count = amount;
107 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
109 if (fbc->count > rhs)
110 return 1;
111 else if (fbc->count < rhs)
112 return -1;
113 else
114 return 0;
117 static inline void
118 percpu_counter_add(struct percpu_counter *fbc, s64 amount)
120 preempt_disable();
121 fbc->count += amount;
122 preempt_enable();
125 static inline void
126 __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
128 percpu_counter_add(fbc, amount);
131 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
133 return fbc->count;
136 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
138 return fbc->count;
141 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
143 return percpu_counter_read_positive(fbc);
146 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
148 return percpu_counter_read(fbc);
151 static inline int percpu_counter_initialized(struct percpu_counter *fbc)
153 return 1;
156 #endif /* CONFIG_SMP */
158 static inline void percpu_counter_inc(struct percpu_counter *fbc)
160 percpu_counter_add(fbc, 1);
163 static inline void percpu_counter_dec(struct percpu_counter *fbc)
165 percpu_counter_add(fbc, -1);
168 static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
170 percpu_counter_add(fbc, -amount);
173 #endif /* _LINUX_PERCPU_COUNTER_H */