1 ext4: pass allocation_request struct to ext4_(alloc,splice)_branch
3 Instead of initializing the allocation_request structure in
4 ext4_alloc_branch(), set it up in ext4_ind_map_blocks(), and then pass
5 it to ext4_alloc_branch() and ext4_splice_branch().
7 This allows ext4_ind_map_blocks to pass flags in the allocation
8 request structure without having to add Yet Another argument to
11 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
13 fs/ext4/indirect.c | 82 +++++++++++++++++++++++++++++----------------------------------
14 1 file changed, 38 insertions(+), 44 deletions(-)
16 diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
17 index e75f840..69af0cd 100644
18 --- a/fs/ext4/indirect.c
19 +++ b/fs/ext4/indirect.c
20 @@ -318,34 +318,22 @@ static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
21 * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
22 * as described above and return 0.
24 -static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
25 - ext4_lblk_t iblock, int indirect_blks,
26 - int *blks, ext4_fsblk_t goal,
27 - ext4_lblk_t *offsets, Indirect *branch)
28 +static int ext4_alloc_branch(handle_t *handle,
29 + struct ext4_allocation_request *ar,
30 + int indirect_blks, ext4_lblk_t *offsets,
33 - struct ext4_allocation_request ar;
34 struct buffer_head * bh;
35 ext4_fsblk_t b, new_blocks[4];
37 int i, j, err, len = 1;
40 - * Set up for the direct block allocation
42 - memset(&ar, 0, sizeof(ar));
45 - ar.logical = iblock;
46 - if (S_ISREG(inode->i_mode))
47 - ar.flags = EXT4_MB_HINT_DATA;
49 for (i = 0; i <= indirect_blks; i++) {
50 if (i == indirect_blks) {
52 - new_blocks[i] = ext4_mb_new_blocks(handle, &ar, &err);
53 + new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err);
55 - goal = new_blocks[i] = ext4_new_meta_blocks(handle, inode,
56 - goal, 0, NULL, &err);
57 + ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle,
58 + ar->inode, ar->goal, 0, NULL, &err);
62 @@ -354,7 +342,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
66 - bh = branch[i].bh = sb_getblk(inode->i_sb, new_blocks[i-1]);
67 + bh = branch[i].bh = sb_getblk(ar->inode->i_sb, new_blocks[i-1]);
71 @@ -372,7 +360,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
74 if (i == indirect_blks)
77 for (j = 0; j < len; j++)
78 *p++ = cpu_to_le32(b++);
80 @@ -381,11 +369,10 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
83 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
84 - err = ext4_handle_dirty_metadata(handle, inode, bh);
85 + err = ext4_handle_dirty_metadata(handle, ar->inode, bh);
93 @@ -396,10 +383,10 @@ failed:
94 * existing before ext4_alloc_branch() was called.
96 if (i > 0 && i != indirect_blks && branch[i].bh)
97 - ext4_forget(handle, 1, inode, branch[i].bh,
98 + ext4_forget(handle, 1, ar->inode, branch[i].bh,
99 branch[i].bh->b_blocknr);
100 - ext4_free_blocks(handle, inode, NULL, new_blocks[i],
101 - (i == indirect_blks) ? ar.len : 1, 0);
102 + ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i],
103 + (i == indirect_blks) ? ar->len : 1, 0);
107 @@ -419,9 +406,9 @@ failed:
108 * inode (->i_blocks, etc.). In case of success we end up with the full
109 * chain to new block and return 0.
111 -static int ext4_splice_branch(handle_t *handle, struct inode *inode,
112 - ext4_lblk_t block, Indirect *where, int num,
114 +static int ext4_splice_branch(handle_t *handle,
115 + struct ext4_allocation_request *ar,
116 + Indirect *where, int num)
120 @@ -446,9 +433,9 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode,
121 * Update the host buffer_head or inode to point to more just allocated
122 * direct blocks blocks
124 - if (num == 0 && blks > 1) {
125 + if (num == 0 && ar->len > 1) {
126 current_block = le32_to_cpu(where->key) + 1;
127 - for (i = 1; i < blks; i++)
128 + for (i = 1; i < ar->len; i++)
129 *(where->p + i) = cpu_to_le32(current_block++);
132 @@ -465,14 +452,14 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode,
134 jbd_debug(5, "splicing indirect only\n");
135 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
136 - err = ext4_handle_dirty_metadata(handle, inode, where->bh);
137 + err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh);
142 * OK, we spliced it into the inode itself on a direct block.
144 - ext4_mark_inode_dirty(handle, inode);
145 + ext4_mark_inode_dirty(handle, ar->inode);
146 jbd_debug(5, "splicing direct\n");
149 @@ -484,11 +471,11 @@ err_out:
150 * need to revoke the block, which is why we don't
151 * need to set EXT4_FREE_BLOCKS_METADATA.
153 - ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
154 + ext4_free_blocks(handle, ar->inode, where[i].bh, 0, 1,
155 EXT4_FREE_BLOCKS_FORGET);
157 - ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key),
159 + ext4_free_blocks(handle, ar->inode, NULL, le32_to_cpu(where[num].key),
164 @@ -525,11 +512,11 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
165 struct ext4_map_blocks *map,
168 + struct ext4_allocation_request ar;
170 ext4_lblk_t offsets[4];
175 int blocks_to_boundary = 0;
177 @@ -579,7 +566,14 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
181 - goal = ext4_find_goal(inode, map->m_lblk, partial);
182 + /* Set up for the direct block allocation */
183 + memset(&ar, 0, sizeof(ar));
185 + ar.logical = map->m_lblk;
186 + if (S_ISREG(inode->i_mode))
187 + ar.flags = EXT4_MB_HINT_DATA;
189 + ar.goal = ext4_find_goal(inode, map->m_lblk, partial);
191 /* the number of blocks need to allocate for [d,t]indirect blocks */
192 indirect_blks = (chain + depth) - partial - 1;
193 @@ -588,13 +582,13 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
194 * Next look up the indirect map to count the totoal number of
195 * direct blocks to allocate for this branch.
197 - count = ext4_blks_to_allocate(partial, indirect_blks,
198 - map->m_len, blocks_to_boundary);
199 + ar.len = ext4_blks_to_allocate(partial, indirect_blks,
200 + map->m_len, blocks_to_boundary);
203 * Block out ext4_truncate while we alter the tree
205 - err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks,
207 + err = ext4_alloc_branch(handle, &ar, indirect_blks,
208 offsets + (partial - chain), partial);
211 @@ -605,14 +599,14 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
212 * may need to return -EAGAIN upwards in the worst case. --sct
215 - err = ext4_splice_branch(handle, inode, map->m_lblk,
216 - partial, indirect_blks, count);
217 + err = ext4_splice_branch(handle, &ar, partial, indirect_blks);
221 map->m_flags |= EXT4_MAP_NEW;
223 ext4_update_inode_fsync_trans(handle, inode, 1);
226 map->m_flags |= EXT4_MAP_MAPPED;
227 map->m_pblk = le32_to_cpu(chain[depth-1].key);