Update delalloc ENOSPC patch comments for clarity
[ext4-patch-queue.git] / percpu_counter_sum_cleanup.patch
bloba79f8ee4a09bdd215a7eab16e551181a9528f3a6
1 percpu counter: clean up percpu_counter_sum_and_set()
3 From: Mingming Cao <cmm@us.ibm.com>
5 percpu_counter_sum_and_set() and percpu_counter_sum() is the same except
6 the former update the global counter after accounting. Since we are
7 taking the fbc->lock to calculate the precise value of the
8 counter in percpu_counter_sum() anyway, it should simply set fbc->count
9 too, as the percpu_counter_sum_and_set() dose.
11 This patch merge these two interfaces into one.
13 Signed-off-by: Mingming Cao <cmm@us.ibm.com>
14 ---
15 fs/ext4/balloc.c | 2 +-
16 include/linux/percpu_counter.h | 12 +++---------
17 lib/percpu_counter.c | 8 +++-----
18 3 files changed, 7 insertions(+), 15 deletions(-)
20 Index: linux-2.6.27-rc3/fs/ext4/balloc.c
21 ===================================================================
22 --- linux-2.6.27-rc3.orig/fs/ext4/balloc.c 2008-08-20 17:25:35.000000000 -0700
23 +++ linux-2.6.27-rc3/fs/ext4/balloc.c 2008-08-21 14:34:32.000000000 -0700
24 @@ -1624,7 +1624,7 @@
25 #ifdef CONFIG_SMP
26 if (free_blocks - root_blocks < FBC_BATCH)
27 free_blocks =
28 - percpu_counter_sum_and_set(&sbi->s_freeblocks_counter);
29 + percpu_counter_sum(&sbi->s_freeblocks_counter);
30 #endif
31 if (free_blocks <= root_blocks)
32 /* we don't have free space */
33 Index: linux-2.6.27-rc3/include/linux/percpu_counter.h
34 ===================================================================
35 --- linux-2.6.27-rc3.orig/include/linux/percpu_counter.h 2008-08-20 17:25:35.000000000 -0700
36 +++ linux-2.6.27-rc3/include/linux/percpu_counter.h 2008-08-21 14:34:32.000000000 -0700
37 @@ -35,7 +35,7 @@
38 void percpu_counter_destroy(struct percpu_counter *fbc);
39 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
40 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
41 -s64 __percpu_counter_sum(struct percpu_counter *fbc, int set);
42 +s64 __percpu_counter_sum(struct percpu_counter *fbc);
44 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
46 @@ -44,19 +44,13 @@
48 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
50 - s64 ret = __percpu_counter_sum(fbc, 0);
51 + s64 ret = __percpu_counter_sum(fbc);
52 return ret < 0 ? 0 : ret;
55 -static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc)
57 - return __percpu_counter_sum(fbc, 1);
61 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
63 - return __percpu_counter_sum(fbc, 0);
64 + return __percpu_counter_sum(fbc);
67 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
68 Index: linux-2.6.27-rc3/lib/percpu_counter.c
69 ===================================================================
70 --- linux-2.6.27-rc3.orig/lib/percpu_counter.c 2008-08-20 17:25:35.000000000 -0700
71 +++ linux-2.6.27-rc3/lib/percpu_counter.c 2008-08-21 14:34:32.000000000 -0700
72 @@ -52,7 +52,7 @@
73 * Add up all the per-cpu counts, return the result. This is a more accurate
74 * but much slower version of percpu_counter_read_positive()
76 -s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
77 +s64 __percpu_counter_sum(struct percpu_counter *fbc)
79 s64 ret;
80 int cpu;
81 @@ -62,11 +62,9 @@
82 for_each_online_cpu(cpu) {
83 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
84 ret += *pcount;
85 - if (set)
86 - *pcount = 0;
87 + *pcount = 0;
89 - if (set)
90 - fbc->count = ret;
91 + fbc->count = ret;
93 spin_unlock(&fbc->lock);
94 return ret;