Move ext4-use-percpu-data-for-lg_prealloc_list to stable series
[ext4-patch-queue.git] / ext4-use-percpu-data-for-lg_prealloc_list
blob8ddebb7f6054f70e34b142d7ae6f314d6629aac7
1 ext4: use percpu data structures for lg_prealloc_list
3 From: Eric Sandeen <sandeen@redhat.com>
5 lg_prealloc_list seems to cry out for a per-cpu data structure; on a large
6 smp system I think this should be better.  I've lightly tested this change
7 on a 4-cpu system.
9 Signed-off-by: Eric Sandeen <sandeen@redhat.com>
10 Acked-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
11 Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
12 ---
14  mballoc.c |   13 +++++--------
15  1 file changed, 5 insertions(+), 8 deletions(-)
17 Index: linux-2.6/fs/ext4/mballoc.c
18 ===================================================================
19 --- linux-2.6.orig/fs/ext4/mballoc.c    2008-08-04 15:30:30.000000000 -0500
20 +++ linux-2.6/fs/ext4/mballoc.c 2008-08-13 13:48:33.224165751 -0500
21 @@ -2540,17 +2540,16 @@ int ext4_mb_init(struct super_block *sb,
22         sbi->s_mb_history_filter = EXT4_MB_HISTORY_DEFAULT;
23         sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
25 -       i = sizeof(struct ext4_locality_group) * nr_cpu_ids;
26 -       sbi->s_locality_groups = kmalloc(i, GFP_KERNEL);
27 +       sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
28         if (sbi->s_locality_groups == NULL) {
29                 clear_opt(sbi->s_mount_opt, MBALLOC);
30                 kfree(sbi->s_mb_offsets);
31                 kfree(sbi->s_mb_maxs);
32                 return -ENOMEM;
33         }
34 -       for (i = 0; i < nr_cpu_ids; i++) {
35 +       for_each_possible_cpu(i) {
36                 struct ext4_locality_group *lg;
37 -               lg = &sbi->s_locality_groups[i];
38 +               lg = per_cpu_ptr(sbi->s_locality_groups, i);
39                 mutex_init(&lg->lg_mutex);
40                 for (j = 0; j < PREALLOC_TB_SIZE; j++)
41                         INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
42 @@ -2647,8 +2646,7 @@ int ext4_mb_release(struct super_block *
43                                 atomic_read(&sbi->s_mb_discarded));
44         }
46 -       kfree(sbi->s_locality_groups);
48 +       free_percpu(sbi->s_locality_groups);
49         ext4_mb_history_release(sb);
50         ext4_mb_destroy_per_dev_proc(sb);
52 @@ -4055,8 +4053,7 @@ static void ext4_mb_group_or_file(struct
53          * per cpu locality group is to reduce the contention between block
54          * request from multiple CPUs.
55          */
56 -       ac->ac_lg = &sbi->s_locality_groups[get_cpu()];
57 -       put_cpu();
58 +       ac->ac_lg = per_cpu_ptr(sbi->s_locality_groups, raw_smp_processor_id());
60         /* we're going to use group allocation */
61         ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;