async: make sure independent async domains can't accidentally entangle
[linux-2.6/mini2440.git] / include / linux / percpu_counter.h
bloba7684a513994725335ac078d457c6a7a02d78455
1 #ifndef _LINUX_PERCPU_COUNTER_H
2 #define _LINUX_PERCPU_COUNTER_H
3 /*
4 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
6 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
7 */
9 #include <linux/spinlock.h>
10 #include <linux/smp.h>
11 #include <linux/list.h>
12 #include <linux/threads.h>
13 #include <linux/percpu.h>
14 #include <linux/types.h>
16 #ifdef CONFIG_SMP
18 struct percpu_counter {
19 spinlock_t lock;
20 s64 count;
21 #ifdef CONFIG_HOTPLUG_CPU
22 struct list_head list; /* All percpu_counters are on a list */
23 #endif
24 s32 *counters;
27 extern int percpu_counter_batch;
29 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
30 struct lock_class_key *key);
32 #define percpu_counter_init(fbc, value) \
33 ({ \
34 static struct lock_class_key __key; \
36 __percpu_counter_init(fbc, value, &__key); \
39 void percpu_counter_destroy(struct percpu_counter *fbc);
40 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
41 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
42 s64 __percpu_counter_sum(struct percpu_counter *fbc);
44 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
46 __percpu_counter_add(fbc, amount, percpu_counter_batch);
49 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
51 s64 ret = __percpu_counter_sum(fbc);
52 return ret < 0 ? 0 : ret;
55 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
57 return __percpu_counter_sum(fbc);
60 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
62 return fbc->count;
66 * It is possible for the percpu_counter_read() to return a small negative
67 * number for some counter which should never be negative.
70 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
72 s64 ret = fbc->count;
74 barrier(); /* Prevent reloads of fbc->count */
75 if (ret >= 0)
76 return ret;
77 return 1;
80 #else
82 struct percpu_counter {
83 s64 count;
86 static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
88 fbc->count = amount;
89 return 0;
92 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
96 static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
98 fbc->count = amount;
101 #define __percpu_counter_add(fbc, amount, batch) \
102 percpu_counter_add(fbc, amount)
104 static inline void
105 percpu_counter_add(struct percpu_counter *fbc, s64 amount)
107 preempt_disable();
108 fbc->count += amount;
109 preempt_enable();
112 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
114 return fbc->count;
117 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
119 return fbc->count;
122 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
124 return percpu_counter_read_positive(fbc);
127 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
129 return percpu_counter_read(fbc);
132 #endif /* CONFIG_SMP */
134 static inline void percpu_counter_inc(struct percpu_counter *fbc)
136 percpu_counter_add(fbc, 1);
139 static inline void percpu_counter_dec(struct percpu_counter *fbc)
141 percpu_counter_add(fbc, -1);
144 static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
146 percpu_counter_add(fbc, -amount);
149 #endif /* _LINUX_PERCPU_COUNTER_H */