Check in v2 version of "ext4: Punch hole and DAX fixes"
[ext4-patch-queue.git] / inline-ext4_get_block-into-readpage
blob790b353ca40aba1a74c33d49206cc5b8ebd7136c
1 ext4: call ext4_map_blocks() directly from read_page.c
3 Use ext4_map_blocks() directly instead of going through
4 ext4_get_block().  This allows us to drop out a lot of generic code
5 that was not needed for ext4.
7 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
10 ---
11  fs/ext4/readpage.c | 83 ++++++++++++++++++-----------------------------------------------------------------
12  1 file changed, 18 insertions(+), 65 deletions(-)
14 diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
15 index 3b29da1..ce3ecc1 100644
16 --- a/fs/ext4/readpage.c
17 +++ b/fs/ext4/readpage.c
18 @@ -85,49 +85,6 @@ mpage_alloc(struct block_device *bdev,
19  }
21  /*
22 - * support function for mpage_readpages.  The fs supplied get_block might
23 - * return an up to date buffer.  This is used to map that buffer into
24 - * the page, which allows readpage to avoid triggering a duplicate call
25 - * to get_block.
26 - *
27 - * The idea is to avoid adding buffers to pages that don't already have
28 - * them.  So when the buffer is up to date and the page size == block size,
29 - * this marks the page up to date instead of adding new buffers.
30 - */
31 -static void
32 -map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
34 -       struct inode *inode = page->mapping->host;
35 -       struct buffer_head *page_bh, *head;
36 -       int block = 0;
38 -       if (!page_has_buffers(page)) {
39 -               /*
40 -                * don't make any buffers if there is only one buffer on
41 -                * the page and the page just needs to be set up to date
42 -                */
43 -               if (inode->i_blkbits == PAGE_CACHE_SHIFT &&
44 -                   buffer_uptodate(bh)) {
45 -                       SetPageUptodate(page);
46 -                       return;
47 -               }
48 -               create_empty_buffers(page, 1 << inode->i_blkbits, 0);
49 -       }
50 -       head = page_buffers(page);
51 -       page_bh = head;
52 -       do {
53 -               if (block == page_block) {
54 -                       page_bh->b_state = bh->b_state;
55 -                       page_bh->b_bdev = bh->b_bdev;
56 -                       page_bh->b_blocknr = bh->b_blocknr;
57 -                       break;
58 -               }
59 -               page_bh = page_bh->b_this_page;
60 -               block++;
61 -       } while (page_bh != head);
64 -/*
65   * This is the worker routine which does all the work of mapping the disk
66   * blocks and constructs largest possible bios, submits them for IO if the
67   * blocks are not contiguous on the disk.
68 @@ -138,8 +95,8 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
69   */
70  static struct bio *
71  do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
72 -               sector_t *last_block_in_bio, struct buffer_head *map_bh,
73 -               unsigned long *first_logical_block, get_block_t get_block)
74 +                 sector_t *last_block_in_bio, struct buffer_head *map_bh,
75 +                 unsigned long *first_logical_block)
76  {
77         struct inode *inode = page->mapping->host;
78         const unsigned blkbits = inode->i_blkbits;
79 @@ -151,7 +108,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
80         sector_t blocks[MAX_BUF_PER_PAGE];
81         unsigned page_block;
82         unsigned first_hole = blocks_per_page;
83 -       struct block_device *bdev = NULL;
84 +       struct block_device *bdev = inode->i_sb->s_bdev;
85         int length;
86         int fully_mapped = 1;
87         unsigned nblocks;
88 @@ -188,7 +145,6 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
89                         page_block++;
90                         block_in_file++;
91                 }
92 -               bdev = map_bh->b_bdev;
93         }
95         /*
96 @@ -200,9 +156,19 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
97                 map_bh->b_size = 0;
99                 if (block_in_file < last_block) {
100 -                       map_bh->b_size = (last_block-block_in_file) << blkbits;
101 -                       if (get_block(inode, block_in_file, map_bh, 0))
102 +                       struct ext4_map_blocks map;
103 +                       int ret;
105 +                       map.m_lblk = block_in_file;
106 +                       map.m_len = last_block - block_in_file;
107 +                       ret = ext4_map_blocks(NULL, inode, &map, 0);
108 +                       if (ret < 0)
109                                 goto confused;
110 +                       map_bh->b_blocknr = map.m_pblk;
111 +                       map_bh->b_bdev = bdev;
112 +                       map_bh->b_size = inode->i_sb->s_blocksize * map.m_len;
113 +                       map_bh->b_state = map.m_flags;
115                         *first_logical_block = block_in_file;
116                 }
118 @@ -215,17 +181,6 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
119                         continue;
120                 }
122 -               /* some filesystems will copy data into the page during
123 -                * the get_block call, in which case we don't want to
124 -                * read it again.  map_buffer_to_page copies the data
125 -                * we just collected from get_block into the page's buffers
126 -                * so readpage doesn't have to repeat the get_block call
127 -                */
128 -               if (buffer_uptodate(map_bh)) {
129 -                       map_buffer_to_page(page, map_bh, page_block);
130 -                       goto confused;
131 -               }
133                 if (first_hole != blocks_per_page)
134                         goto confused;          /* hole -> non-hole */
136 @@ -243,7 +198,6 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
137                         page_block++;
138                         block_in_file++;
139                 }
140 -               bdev = map_bh->b_bdev;
141         }
143         if (first_hole != blocks_per_page) {
144 @@ -303,7 +257,7 @@ confused:
145         if (bio)
146                 bio = mpage_bio_submit(READ, bio);
147         if (!PageUptodate(page))
148 -               block_read_full_page(page, get_block);
149 +               block_read_full_page(page, ext4_get_block);
150         else
151                 unlock_page(page);
152         goto out;
153 @@ -329,7 +283,7 @@ int ext4_readpage(struct file *file, struct page *page)
154         map_bh.b_state = 0;
155         map_bh.b_size = 0;
156         bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
157 -                       &map_bh, &first_logical_block, ext4_get_block);
158 +                       &map_bh, &first_logical_block);
159         if (bio)
160                 mpage_bio_submit(READ, bio);
161         return 0;
162 @@ -361,8 +315,7 @@ int ext4_readpages(struct file *file, struct address_space *mapping,
163                         bio = do_mpage_readpage(bio, page,
164                                         nr_pages - page_idx,
165                                         &last_block_in_bio, &map_bh,
166 -                                       &first_logical_block,
167 -                                       ext4_get_block);
168 +                                       &first_logical_block);
169                 }
170                 page_cache_release(page);
171         }