badpage: ratelimit print_bad_pte and bad_page
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / linux / percpu_counter.h
blob9007ccdfc1127cfe73db03e31dd82a843f8f4fa8
1 #ifndef _LINUX_PERCPU_COUNTER_H
2 #define _LINUX_PERCPU_COUNTER_H
3 /*
4 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
6 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
7 */
9 #include <linux/spinlock.h>
10 #include <linux/smp.h>
11 #include <linux/list.h>
12 #include <linux/threads.h>
13 #include <linux/percpu.h>
14 #include <linux/types.h>
16 #ifdef CONFIG_SMP
18 struct percpu_counter {
19 spinlock_t lock;
20 s64 count;
21 #ifdef CONFIG_HOTPLUG_CPU
22 struct list_head list; /* All percpu_counters are on a list */
23 #endif
24 s32 *counters;
27 #if NR_CPUS >= 16
28 #define FBC_BATCH (NR_CPUS*2)
29 #else
30 #define FBC_BATCH (NR_CPUS*4)
31 #endif
33 int percpu_counter_init(struct percpu_counter *fbc, s64 amount);
34 int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount);
35 void percpu_counter_destroy(struct percpu_counter *fbc);
36 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
37 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
38 s64 __percpu_counter_sum(struct percpu_counter *fbc);
40 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
42 __percpu_counter_add(fbc, amount, FBC_BATCH);
45 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
47 s64 ret = __percpu_counter_sum(fbc);
48 return ret < 0 ? 0 : ret;
51 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
53 return __percpu_counter_sum(fbc);
56 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
58 return fbc->count;
62 * It is possible for the percpu_counter_read() to return a small negative
63 * number for some counter which should never be negative.
66 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
68 s64 ret = fbc->count;
70 barrier(); /* Prevent reloads of fbc->count */
71 if (ret >= 0)
72 return ret;
73 return 1;
76 #else
78 struct percpu_counter {
79 s64 count;
82 static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
84 fbc->count = amount;
85 return 0;
88 #define percpu_counter_init_irq percpu_counter_init
90 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
94 static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
96 fbc->count = amount;
99 #define __percpu_counter_add(fbc, amount, batch) \
100 percpu_counter_add(fbc, amount)
102 static inline void
103 percpu_counter_add(struct percpu_counter *fbc, s64 amount)
105 preempt_disable();
106 fbc->count += amount;
107 preempt_enable();
110 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
112 return fbc->count;
115 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
117 return fbc->count;
120 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
122 return percpu_counter_read_positive(fbc);
125 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
127 return percpu_counter_read(fbc);
130 #endif /* CONFIG_SMP */
132 static inline void percpu_counter_inc(struct percpu_counter *fbc)
134 percpu_counter_add(fbc, 1);
137 static inline void percpu_counter_dec(struct percpu_counter *fbc)
139 percpu_counter_add(fbc, -1);
142 static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
144 percpu_counter_add(fbc, -amount);
147 #endif /* _LINUX_PERCPU_COUNTER_H */