Update delalloc ENOSPC patch comments for clarity
[ext4-patch-queue.git] / ext4-use-percpu-data-for-lg_prealloc_list
blobadd66d532d5628f97bcb58e7e6e5aabb3768e53b
1 ext4: use percpu data structures for lg_prealloc_list
3 From: Eric Sandeen <sandeen@redhat.com>
5 lg_prealloc_list seems to cry out for a per-cpu data structure; on a large
6 smp system I think this should be better.  I've lightly tested this change
7 on a 4-cpu system.
9 Signed-off-by: Eric Sandeen <sandeen@redhat.com>
10 Acked-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
11 ---
13  mballoc.c |   13 +++++--------
14  1 file changed, 5 insertions(+), 8 deletions(-)
16 Index: linux-2.6/fs/ext4/mballoc.c
17 ===================================================================
18 --- linux-2.6.orig/fs/ext4/mballoc.c    2008-08-04 15:30:30.000000000 -0500
19 +++ linux-2.6/fs/ext4/mballoc.c 2008-08-13 13:48:33.224165751 -0500
20 @@ -2540,17 +2540,16 @@ int ext4_mb_init(struct super_block *sb,
21         sbi->s_mb_history_filter = EXT4_MB_HISTORY_DEFAULT;
22         sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
24 -       i = sizeof(struct ext4_locality_group) * nr_cpu_ids;
25 -       sbi->s_locality_groups = kmalloc(i, GFP_KERNEL);
26 +       sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
27         if (sbi->s_locality_groups == NULL) {
28                 clear_opt(sbi->s_mount_opt, MBALLOC);
29                 kfree(sbi->s_mb_offsets);
30                 kfree(sbi->s_mb_maxs);
31                 return -ENOMEM;
32         }
33 -       for (i = 0; i < nr_cpu_ids; i++) {
34 +       for_each_possible_cpu(i) {
35                 struct ext4_locality_group *lg;
36 -               lg = &sbi->s_locality_groups[i];
37 +               lg = per_cpu_ptr(sbi->s_locality_groups, i);
38                 mutex_init(&lg->lg_mutex);
39                 for (j = 0; j < PREALLOC_TB_SIZE; j++)
40                         INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
41 @@ -2647,8 +2646,7 @@ int ext4_mb_release(struct super_block *
42                                 atomic_read(&sbi->s_mb_discarded));
43         }
45 -       kfree(sbi->s_locality_groups);
47 +       free_percpu(sbi->s_locality_groups);
48         ext4_mb_history_release(sb);
49         ext4_mb_destroy_per_dev_proc(sb);
51 @@ -4055,8 +4053,7 @@ static void ext4_mb_group_or_file(struct
52          * per cpu locality group is to reduce the contention between block
53          * request from multiple CPUs.
54          */
55 -       ac->ac_lg = &sbi->s_locality_groups[get_cpu()];
56 -       put_cpu();
57 +       ac->ac_lg = per_cpu_ptr(sbi->s_locality_groups, raw_smp_processor_id());
59         /* we're going to use group allocation */
60         ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;