x86, cpu: Clean up AMD erratum 400 workaround
[wandboard.git] / lib / percpu_counter.c
blob9d942128c152d035bfc62d9bda6632bb254be0ed
1 /*
2 * Fast batching percpu counters.
3 */
5 #include <linux/percpu_counter.h>
6 #include <linux/notifier.h>
7 #include <linux/mutex.h>
8 #include <linux/init.h>
9 #include <linux/cpu.h>
10 #include <linux/module.h>
12 static LIST_HEAD(percpu_counters);
13 static DEFINE_MUTEX(percpu_counters_lock);
15 void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
17 int cpu;
19 spin_lock(&fbc->lock);
20 for_each_possible_cpu(cpu) {
21 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
22 *pcount = 0;
24 fbc->count = amount;
25 spin_unlock(&fbc->lock);
27 EXPORT_SYMBOL(percpu_counter_set);
29 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
31 s64 count;
32 s32 *pcount;
33 int cpu = get_cpu();
35 pcount = per_cpu_ptr(fbc->counters, cpu);
36 count = *pcount + amount;
37 if (count >= batch || count <= -batch) {
38 spin_lock(&fbc->lock);
39 fbc->count += count;
40 *pcount = 0;
41 spin_unlock(&fbc->lock);
42 } else {
43 *pcount = count;
45 put_cpu();
47 EXPORT_SYMBOL(__percpu_counter_add);
50 * Add up all the per-cpu counts, return the result. This is a more accurate
51 * but much slower version of percpu_counter_read_positive()
53 s64 __percpu_counter_sum(struct percpu_counter *fbc)
55 s64 ret;
56 int cpu;
58 spin_lock(&fbc->lock);
59 ret = fbc->count;
60 for_each_online_cpu(cpu) {
61 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
62 ret += *pcount;
64 spin_unlock(&fbc->lock);
65 return ret;
67 EXPORT_SYMBOL(__percpu_counter_sum);
69 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
70 struct lock_class_key *key)
72 spin_lock_init(&fbc->lock);
73 lockdep_set_class(&fbc->lock, key);
74 fbc->count = amount;
75 fbc->counters = alloc_percpu(s32);
76 if (!fbc->counters)
77 return -ENOMEM;
78 #ifdef CONFIG_HOTPLUG_CPU
79 INIT_LIST_HEAD(&fbc->list);
80 mutex_lock(&percpu_counters_lock);
81 list_add(&fbc->list, &percpu_counters);
82 mutex_unlock(&percpu_counters_lock);
83 #endif
84 return 0;
86 EXPORT_SYMBOL(__percpu_counter_init);
88 void percpu_counter_destroy(struct percpu_counter *fbc)
90 if (!fbc->counters)
91 return;
93 #ifdef CONFIG_HOTPLUG_CPU
94 mutex_lock(&percpu_counters_lock);
95 list_del(&fbc->list);
96 mutex_unlock(&percpu_counters_lock);
97 #endif
98 free_percpu(fbc->counters);
99 fbc->counters = NULL;
101 EXPORT_SYMBOL(percpu_counter_destroy);
103 int percpu_counter_batch __read_mostly = 32;
104 EXPORT_SYMBOL(percpu_counter_batch);
106 static void compute_batch_value(void)
108 int nr = num_online_cpus();
110 percpu_counter_batch = max(32, nr*2);
113 static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
114 unsigned long action, void *hcpu)
116 #ifdef CONFIG_HOTPLUG_CPU
117 unsigned int cpu;
118 struct percpu_counter *fbc;
120 compute_batch_value();
121 if (action != CPU_DEAD)
122 return NOTIFY_OK;
124 cpu = (unsigned long)hcpu;
125 mutex_lock(&percpu_counters_lock);
126 list_for_each_entry(fbc, &percpu_counters, list) {
127 s32 *pcount;
128 unsigned long flags;
130 spin_lock_irqsave(&fbc->lock, flags);
131 pcount = per_cpu_ptr(fbc->counters, cpu);
132 fbc->count += *pcount;
133 *pcount = 0;
134 spin_unlock_irqrestore(&fbc->lock, flags);
136 mutex_unlock(&percpu_counters_lock);
137 #endif
138 return NOTIFY_OK;
141 static int __init percpu_counter_startup(void)
143 compute_batch_value();
144 hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
145 return 0;
147 module_init(percpu_counter_startup);