Update patch comments
[ext4-patch-queue.git] / percpu_counter_sum_cleanup.patch
blob7d9e6ee8f3ef4d9799045c0c6449b36b2f54ca95
1 percpu counter: clean up percpu_counter_sum_and_set()
3 From: Mingming Cao <cmm@us.ibm.com>
5 percpu_counter_sum_and_set() and percpu_counter_sum() is the same except
6 the former updates the global counter after accounting. Since we are
7 taking the fbc->lock to calculate the precise value of the counter in
8 percpu_counter_sum() anyway, it should simply set fbc->count too, as the
9 percpu_counter_sum_and_set() does.
11 This patch merge these two interfaces into one.
13 Signed-off-by: Mingming Cao <cmm@us.ibm.com>
14 Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
15 Cc: <linux-ext4@vger.kernel.org>
16 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
17 Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
18 ---
19 fs/ext4/balloc.c | 2 +-
20 include/linux/percpu_counter.h | 12 +++---------
21 lib/percpu_counter.c | 8 +++-----
22 3 files changed, 7 insertions(+), 15 deletions(-)
24 Index: linux-2.6.27-rc3/fs/ext4/balloc.c
25 ===================================================================
26 --- linux-2.6.27-rc3.orig/fs/ext4/balloc.c 2008-08-20 17:25:35.000000000 -0700
27 +++ linux-2.6.27-rc3/fs/ext4/balloc.c 2008-08-21 14:34:32.000000000 -0700
28 @@ -1624,7 +1624,7 @@
29 #ifdef CONFIG_SMP
30 if (free_blocks - root_blocks < FBC_BATCH)
31 free_blocks =
32 - percpu_counter_sum_and_set(&sbi->s_freeblocks_counter);
33 + percpu_counter_sum(&sbi->s_freeblocks_counter);
34 #endif
35 if (free_blocks <= root_blocks)
36 /* we don't have free space */
37 Index: linux-2.6.27-rc3/include/linux/percpu_counter.h
38 ===================================================================
39 --- linux-2.6.27-rc3.orig/include/linux/percpu_counter.h 2008-08-20 17:25:35.000000000 -0700
40 +++ linux-2.6.27-rc3/include/linux/percpu_counter.h 2008-08-21 14:34:32.000000000 -0700
41 @@ -35,7 +35,7 @@
42 void percpu_counter_destroy(struct percpu_counter *fbc);
43 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
44 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
45 -s64 __percpu_counter_sum(struct percpu_counter *fbc, int set);
46 +s64 __percpu_counter_sum(struct percpu_counter *fbc);
48 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
50 @@ -44,19 +44,13 @@
52 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
54 - s64 ret = __percpu_counter_sum(fbc, 0);
55 + s64 ret = __percpu_counter_sum(fbc);
56 return ret < 0 ? 0 : ret;
59 -static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc)
61 - return __percpu_counter_sum(fbc, 1);
65 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
67 - return __percpu_counter_sum(fbc, 0);
68 + return __percpu_counter_sum(fbc);
71 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
72 Index: linux-2.6.27-rc3/lib/percpu_counter.c
73 ===================================================================
74 --- linux-2.6.27-rc3.orig/lib/percpu_counter.c 2008-08-20 17:25:35.000000000 -0700
75 +++ linux-2.6.27-rc3/lib/percpu_counter.c 2008-08-21 14:34:32.000000000 -0700
76 @@ -52,7 +52,7 @@
77 * Add up all the per-cpu counts, return the result. This is a more accurate
78 * but much slower version of percpu_counter_read_positive()
80 -s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
81 +s64 __percpu_counter_sum(struct percpu_counter *fbc)
83 s64 ret;
84 int cpu;
85 @@ -62,11 +62,9 @@
86 for_each_online_cpu(cpu) {
87 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
88 ret += *pcount;
89 - if (set)
90 - *pcount = 0;
91 + *pcount = 0;
93 - if (set)
94 - fbc->count = ret;
95 + fbc->count = ret;
97 spin_unlock(&fbc->lock);
98 return ret;