add ext4_state_delalloc cleanup patches
[ext4-patch-queue.git] / pass-allocation_request-struct-to-ext4_alloc_branch
blob9b54d636abf62296387e6f3fe7d6e97717d7266e
1 ext4: pass allocation_request struct to ext4_(alloc,splice)_branch
3 Instead of initializing the allocation_request structure in
4 ext4_alloc_branch(), set it up in ext4_ind_map_blocks(), and then pass
5 it to ext4_alloc_branch() and ext4_splice_branch().
7 This allows ext4_ind_map_blocks to pass flags in the allocation
8 request structure without having to add Yet Another argument to
9 ext4_alloc_branch().
11 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
12 ---
13  fs/ext4/indirect.c | 82 +++++++++++++++++++++++++++++----------------------------------
14  1 file changed, 38 insertions(+), 44 deletions(-)
16 diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
17 index e75f840..69af0cd 100644
18 --- a/fs/ext4/indirect.c
19 +++ b/fs/ext4/indirect.c
20 @@ -318,34 +318,22 @@ static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
21   *     ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
22   *     as described above and return 0.
23   */
24 -static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
25 -                            ext4_lblk_t iblock, int indirect_blks,
26 -                            int *blks, ext4_fsblk_t goal,
27 -                            ext4_lblk_t *offsets, Indirect *branch)
28 +static int ext4_alloc_branch(handle_t *handle,
29 +                            struct ext4_allocation_request *ar,
30 +                            int indirect_blks, ext4_lblk_t *offsets,
31 +                            Indirect *branch)
32  {
33 -       struct ext4_allocation_request  ar;
34         struct buffer_head *            bh;
35         ext4_fsblk_t                    b, new_blocks[4];
36         __le32                          *p;
37         int                             i, j, err, len = 1;
39 -       /*
40 -        * Set up for the direct block allocation
41 -        */
42 -       memset(&ar, 0, sizeof(ar));
43 -       ar.inode = inode;
44 -       ar.len = *blks;
45 -       ar.logical = iblock;
46 -       if (S_ISREG(inode->i_mode))
47 -               ar.flags = EXT4_MB_HINT_DATA;
49         for (i = 0; i <= indirect_blks; i++) {
50                 if (i == indirect_blks) {
51 -                       ar.goal = goal;
52 -                       new_blocks[i] = ext4_mb_new_blocks(handle, &ar, &err);
53 +                       new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err);
54                 } else
55 -                       goal = new_blocks[i] = ext4_new_meta_blocks(handle, inode,
56 -                                                       goal, 0, NULL, &err);
57 +                       ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle,
58 +                                   ar->inode, ar->goal, 0, NULL, &err);
59                 if (err) {
60                         i--;
61                         goto failed;
62 @@ -354,7 +342,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
63                 if (i == 0)
64                         continue;
66 -               bh = branch[i].bh = sb_getblk(inode->i_sb, new_blocks[i-1]);
67 +               bh = branch[i].bh = sb_getblk(ar->inode->i_sb, new_blocks[i-1]);
68                 if (unlikely(!bh)) {
69                         err = -ENOMEM;
70                         goto failed;
71 @@ -372,7 +360,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
72                 b = new_blocks[i];
74                 if (i == indirect_blks)
75 -                       len = ar.len;
76 +                       len = ar->len;
77                 for (j = 0; j < len; j++)
78                         *p++ = cpu_to_le32(b++);
80 @@ -381,11 +369,10 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
81                 unlock_buffer(bh);
83                 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
84 -               err = ext4_handle_dirty_metadata(handle, inode, bh);
85 +               err = ext4_handle_dirty_metadata(handle, ar->inode, bh);
86                 if (err)
87                         goto failed;
88         }
89 -       *blks = ar.len;
90         return 0;
91  failed:
92         for (; i >= 0; i--) {
93 @@ -396,10 +383,10 @@ failed:
94                  * existing before ext4_alloc_branch() was called.
95                  */
96                 if (i > 0 && i != indirect_blks && branch[i].bh)
97 -                       ext4_forget(handle, 1, inode, branch[i].bh,
98 +                       ext4_forget(handle, 1, ar->inode, branch[i].bh,
99                                     branch[i].bh->b_blocknr);
100 -               ext4_free_blocks(handle, inode, NULL, new_blocks[i],
101 -                                (i == indirect_blks) ? ar.len : 1, 0);
102 +               ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i],
103 +                                (i == indirect_blks) ? ar->len : 1, 0);
104         }
105         return err;
107 @@ -419,9 +406,9 @@ failed:
108   * inode (->i_blocks, etc.). In case of success we end up with the full
109   * chain to new block and return 0.
110   */
111 -static int ext4_splice_branch(handle_t *handle, struct inode *inode,
112 -                             ext4_lblk_t block, Indirect *where, int num,
113 -                             int blks)
114 +static int ext4_splice_branch(handle_t *handle,
115 +                             struct ext4_allocation_request *ar,
116 +                             Indirect *where, int num)
118         int i;
119         int err = 0;
120 @@ -446,9 +433,9 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode,
121          * Update the host buffer_head or inode to point to more just allocated
122          * direct blocks blocks
123          */
124 -       if (num == 0 && blks > 1) {
125 +       if (num == 0 && ar->len > 1) {
126                 current_block = le32_to_cpu(where->key) + 1;
127 -               for (i = 1; i < blks; i++)
128 +               for (i = 1; i < ar->len; i++)
129                         *(where->p + i) = cpu_to_le32(current_block++);
130         }
132 @@ -465,14 +452,14 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode,
133                  */
134                 jbd_debug(5, "splicing indirect only\n");
135                 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
136 -               err = ext4_handle_dirty_metadata(handle, inode, where->bh);
137 +               err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh);
138                 if (err)
139                         goto err_out;
140         } else {
141                 /*
142                  * OK, we spliced it into the inode itself on a direct block.
143                  */
144 -               ext4_mark_inode_dirty(handle, inode);
145 +               ext4_mark_inode_dirty(handle, ar->inode);
146                 jbd_debug(5, "splicing direct\n");
147         }
148         return err;
149 @@ -484,11 +471,11 @@ err_out:
150                  * need to revoke the block, which is why we don't
151                  * need to set EXT4_FREE_BLOCKS_METADATA.
152                  */
153 -               ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
154 +               ext4_free_blocks(handle, ar->inode, where[i].bh, 0, 1,
155                                  EXT4_FREE_BLOCKS_FORGET);
156         }
157 -       ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key),
158 -                        blks, 0);
159 +       ext4_free_blocks(handle, ar->inode, NULL, le32_to_cpu(where[num].key),
160 +                        ar->len, 0);
162         return err;
164 @@ -525,11 +512,11 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
165                         struct ext4_map_blocks *map,
166                         int flags)
168 +       struct ext4_allocation_request ar;
169         int err = -EIO;
170         ext4_lblk_t offsets[4];
171         Indirect chain[4];
172         Indirect *partial;
173 -       ext4_fsblk_t goal;
174         int indirect_blks;
175         int blocks_to_boundary = 0;
176         int depth;
177 @@ -579,7 +566,14 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
178                 return -ENOSPC;
179         }
181 -       goal = ext4_find_goal(inode, map->m_lblk, partial);
182 +       /* Set up for the direct block allocation */
183 +       memset(&ar, 0, sizeof(ar));
184 +       ar.inode = inode;
185 +       ar.logical = map->m_lblk;
186 +       if (S_ISREG(inode->i_mode))
187 +               ar.flags = EXT4_MB_HINT_DATA;
189 +       ar.goal = ext4_find_goal(inode, map->m_lblk, partial);
191         /* the number of blocks need to allocate for [d,t]indirect blocks */
192         indirect_blks = (chain + depth) - partial - 1;
193 @@ -588,13 +582,13 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
194          * Next look up the indirect map to count the totoal number of
195          * direct blocks to allocate for this branch.
196          */
197 -       count = ext4_blks_to_allocate(partial, indirect_blks,
198 -                                     map->m_len, blocks_to_boundary);
199 +       ar.len = ext4_blks_to_allocate(partial, indirect_blks,
200 +                                      map->m_len, blocks_to_boundary);
202         /*
203          * Block out ext4_truncate while we alter the tree
204          */
205 -       err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks,
206 -                               &count, goal,
207 +       err = ext4_alloc_branch(handle, &ar, indirect_blks,
208                                 offsets + (partial - chain), partial);
210         /*
211 @@ -605,14 +599,14 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
212          * may need to return -EAGAIN upwards in the worst case.  --sct
213          */
214         if (!err)
215 -               err = ext4_splice_branch(handle, inode, map->m_lblk,
216 -                                        partial, indirect_blks, count);
217 +               err = ext4_splice_branch(handle, &ar, partial, indirect_blks);
218         if (err)
219                 goto cleanup;
221         map->m_flags |= EXT4_MAP_NEW;
223         ext4_update_inode_fsync_trans(handle, inode, 1);
224 +       count = ar.len;
225  got_it:
226         map->m_flags |= EXT4_MAP_MAPPED;
227         map->m_pblk = le32_to_cpu(chain[depth-1].key);