1 #ifndef _LINUX_PERCPU_COUNTER_H
2 #define _LINUX_PERCPU_COUNTER_H
4 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
6 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
9 #include <linux/spinlock.h>
10 #include <linux/smp.h>
11 #include <linux/list.h>
12 #include <linux/threads.h>
13 #include <linux/percpu.h>
14 #include <linux/types.h>
18 struct percpu_counter
{
21 #ifdef CONFIG_HOTPLUG_CPU
22 struct list_head list
; /* All percpu_counters are on a list */
24 s32 __percpu
*counters
;
27 extern int percpu_counter_batch
;
29 int __percpu_counter_init(struct percpu_counter
*fbc
, s64 amount
,
30 struct lock_class_key
*key
);
32 #define percpu_counter_init(fbc, value) \
34 static struct lock_class_key __key; \
36 __percpu_counter_init(fbc, value, &__key); \
39 void percpu_counter_destroy(struct percpu_counter
*fbc
);
40 void percpu_counter_set(struct percpu_counter
*fbc
, s64 amount
);
41 void __percpu_counter_add(struct percpu_counter
*fbc
, s64 amount
, s32 batch
);
42 s64
__percpu_counter_sum(struct percpu_counter
*fbc
);
43 int percpu_counter_compare(struct percpu_counter
*fbc
, s64 rhs
);
45 static inline void percpu_counter_add(struct percpu_counter
*fbc
, s64 amount
)
47 __percpu_counter_add(fbc
, amount
, percpu_counter_batch
);
50 static inline s64
percpu_counter_sum_positive(struct percpu_counter
*fbc
)
52 s64 ret
= __percpu_counter_sum(fbc
);
53 return ret
< 0 ? 0 : ret
;
56 static inline s64
percpu_counter_sum(struct percpu_counter
*fbc
)
58 return __percpu_counter_sum(fbc
);
61 static inline s64
percpu_counter_read(struct percpu_counter
*fbc
)
67 * It is possible for the percpu_counter_read() to return a small negative
68 * number for some counter which should never be negative.
71 static inline s64
percpu_counter_read_positive(struct percpu_counter
*fbc
)
75 barrier(); /* Prevent reloads of fbc->count */
81 static inline int percpu_counter_initialized(struct percpu_counter
*fbc
)
83 return (fbc
->counters
!= NULL
);
88 struct percpu_counter
{
92 static inline int percpu_counter_init(struct percpu_counter
*fbc
, s64 amount
)
98 static inline void percpu_counter_destroy(struct percpu_counter
*fbc
)
102 static inline void percpu_counter_set(struct percpu_counter
*fbc
, s64 amount
)
107 static inline int percpu_counter_compare(struct percpu_counter
*fbc
, s64 rhs
)
109 if (fbc
->count
> rhs
)
111 else if (fbc
->count
< rhs
)
118 percpu_counter_add(struct percpu_counter
*fbc
, s64 amount
)
121 fbc
->count
+= amount
;
126 __percpu_counter_add(struct percpu_counter
*fbc
, s64 amount
, s32 batch
)
128 percpu_counter_add(fbc
, amount
);
131 static inline s64
percpu_counter_read(struct percpu_counter
*fbc
)
137 * percpu_counter is intended to track positive numbers. In the UP case the
138 * number should never be negative.
140 static inline s64
percpu_counter_read_positive(struct percpu_counter
*fbc
)
145 static inline s64
percpu_counter_sum_positive(struct percpu_counter
*fbc
)
147 return percpu_counter_read_positive(fbc
);
150 static inline s64
percpu_counter_sum(struct percpu_counter
*fbc
)
152 return percpu_counter_read(fbc
);
155 static inline int percpu_counter_initialized(struct percpu_counter
*fbc
)
160 #endif /* CONFIG_SMP */
162 static inline void percpu_counter_inc(struct percpu_counter
*fbc
)
164 percpu_counter_add(fbc
, 1);
167 static inline void percpu_counter_dec(struct percpu_counter
*fbc
)
169 percpu_counter_add(fbc
, -1);
172 static inline void percpu_counter_sub(struct percpu_counter
*fbc
, s64 amount
)
174 percpu_counter_add(fbc
, -amount
);
177 #endif /* _LINUX_PERCPU_COUNTER_H */