2 * Copyright IBM Corporation, 2007
3 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2.1 of the GNU Lesser General Public License
7 * as published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 #include <linux/module.h>
16 #include "ext4_jbd2.h"
17 #include "ext4_extents.h"
20 * The contiguous blocks details which can be
21 * represented by a single extent
23 struct list_blocks_struct
{
24 ext4_lblk_t first_block
, last_block
;
25 ext4_fsblk_t first_pblock
, last_pblock
;
28 static int finish_range(handle_t
*handle
, struct inode
*inode
,
29 struct list_blocks_struct
*lb
)
32 int retval
= 0, needed
;
33 struct ext4_extent newext
;
34 struct ext4_ext_path
*path
;
35 if (lb
->first_pblock
== 0)
38 /* Add the extent to temp inode*/
39 newext
.ee_block
= cpu_to_le32(lb
->first_block
);
40 newext
.ee_len
= cpu_to_le16(lb
->last_block
- lb
->first_block
+ 1);
41 ext4_ext_store_pblock(&newext
, lb
->first_pblock
);
42 path
= ext4_ext_find_extent(inode
, lb
->first_block
, NULL
);
45 retval
= PTR_ERR(path
);
51 * Calculate the credit needed to inserting this extent
52 * Since we are doing this in loop we may accumalate extra
53 * credit. But below we try to not accumalate too much
54 * of them by restarting the journal.
56 needed
= ext4_ext_calc_credits_for_single_extent(inode
,
57 lb
->last_block
- lb
->first_block
+ 1, path
);
60 * Make sure the credit we accumalated is not really high
62 if (needed
&& handle
->h_buffer_credits
>= EXT4_RESERVE_TRANS_BLOCKS
) {
63 retval
= ext4_journal_restart(handle
, needed
);
67 retval
= ext4_journal_extend(handle
, needed
);
70 * IF not able to extend the journal restart the journal
72 retval
= ext4_journal_restart(handle
, needed
);
77 retval
= ext4_ext_insert_extent(handle
, inode
, path
, &newext
);
80 ext4_ext_drop_refs(path
);
87 static int update_extent_range(handle_t
*handle
, struct inode
*inode
,
88 ext4_fsblk_t pblock
, ext4_lblk_t blk_num
,
89 struct list_blocks_struct
*lb
)
93 * See if we can add on to the existing range (if it exists)
95 if (lb
->first_pblock
&&
96 (lb
->last_pblock
+1 == pblock
) &&
97 (lb
->last_block
+1 == blk_num
)) {
98 lb
->last_pblock
= pblock
;
99 lb
->last_block
= blk_num
;
105 retval
= finish_range(handle
, inode
, lb
);
106 lb
->first_pblock
= lb
->last_pblock
= pblock
;
107 lb
->first_block
= lb
->last_block
= blk_num
;
112 static int update_ind_extent_range(handle_t
*handle
, struct inode
*inode
,
113 ext4_fsblk_t pblock
, ext4_lblk_t
*blk_nump
,
114 struct list_blocks_struct
*lb
)
116 struct buffer_head
*bh
;
119 ext4_lblk_t blk_count
= *blk_nump
;
120 unsigned long max_entries
= inode
->i_sb
->s_blocksize
>> 2;
123 /* Only update the file block number */
124 *blk_nump
+= max_entries
;
128 bh
= sb_bread(inode
->i_sb
, pblock
);
132 i_data
= (__le32
*)bh
->b_data
;
133 for (i
= 0; i
< max_entries
; i
++, blk_count
++) {
135 retval
= update_extent_range(handle
, inode
,
136 le32_to_cpu(i_data
[i
]),
143 /* Update the file block number */
144 *blk_nump
= blk_count
;
150 static int update_dind_extent_range(handle_t
*handle
, struct inode
*inode
,
151 ext4_fsblk_t pblock
, ext4_lblk_t
*blk_nump
,
152 struct list_blocks_struct
*lb
)
154 struct buffer_head
*bh
;
157 ext4_lblk_t blk_count
= *blk_nump
;
158 unsigned long max_entries
= inode
->i_sb
->s_blocksize
>> 2;
161 /* Only update the file block number */
162 *blk_nump
+= max_entries
* max_entries
;
165 bh
= sb_bread(inode
->i_sb
, pblock
);
169 i_data
= (__le32
*)bh
->b_data
;
170 for (i
= 0; i
< max_entries
; i
++) {
172 retval
= update_ind_extent_range(handle
, inode
,
173 le32_to_cpu(i_data
[i
]),
178 /* Only update the file block number */
179 blk_count
+= max_entries
;
183 /* Update the file block number */
184 *blk_nump
= blk_count
;
190 static int update_tind_extent_range(handle_t
*handle
, struct inode
*inode
,
191 ext4_fsblk_t pblock
, ext4_lblk_t
*blk_nump
,
192 struct list_blocks_struct
*lb
)
194 struct buffer_head
*bh
;
197 ext4_lblk_t blk_count
= *blk_nump
;
198 unsigned long max_entries
= inode
->i_sb
->s_blocksize
>> 2;
201 /* Only update the file block number */
202 *blk_nump
+= max_entries
* max_entries
* max_entries
;
205 bh
= sb_bread(inode
->i_sb
, pblock
);
209 i_data
= (__le32
*)bh
->b_data
;
210 for (i
= 0; i
< max_entries
; i
++) {
212 retval
= update_dind_extent_range(handle
, inode
,
213 le32_to_cpu(i_data
[i
]),
218 /* Only update the file block number */
219 blk_count
+= max_entries
* max_entries
;
221 /* Update the file block number */
222 *blk_nump
= blk_count
;
228 static int extend_credit_for_blkdel(handle_t
*handle
, struct inode
*inode
)
230 int retval
= 0, needed
;
232 if (handle
->h_buffer_credits
> EXT4_RESERVE_TRANS_BLOCKS
)
235 * We are freeing a blocks. During this we touch
236 * superblock, group descriptor and block bitmap.
237 * So allocate a credit of 3. We may update
238 * quota (user and group).
240 needed
= 3 + 2*EXT4_QUOTA_TRANS_BLOCKS(inode
->i_sb
);
242 if (ext4_journal_extend(handle
, needed
) != 0)
243 retval
= ext4_journal_restart(handle
, needed
);
248 static int free_dind_blocks(handle_t
*handle
,
249 struct inode
*inode
, __le32 i_data
)
253 struct buffer_head
*bh
;
254 unsigned long max_entries
= inode
->i_sb
->s_blocksize
>> 2;
256 bh
= sb_bread(inode
->i_sb
, le32_to_cpu(i_data
));
260 tmp_idata
= (__le32
*)bh
->b_data
;
261 for (i
= 0; i
< max_entries
; i
++) {
263 extend_credit_for_blkdel(handle
, inode
);
264 ext4_free_blocks(handle
, inode
,
265 le32_to_cpu(tmp_idata
[i
]), 1, 1);
269 extend_credit_for_blkdel(handle
, inode
);
270 ext4_free_blocks(handle
, inode
, le32_to_cpu(i_data
), 1, 1);
274 static int free_tind_blocks(handle_t
*handle
,
275 struct inode
*inode
, __le32 i_data
)
279 struct buffer_head
*bh
;
280 unsigned long max_entries
= inode
->i_sb
->s_blocksize
>> 2;
282 bh
= sb_bread(inode
->i_sb
, le32_to_cpu(i_data
));
286 tmp_idata
= (__le32
*)bh
->b_data
;
287 for (i
= 0; i
< max_entries
; i
++) {
289 retval
= free_dind_blocks(handle
,
290 inode
, tmp_idata
[i
]);
298 extend_credit_for_blkdel(handle
, inode
);
299 ext4_free_blocks(handle
, inode
, le32_to_cpu(i_data
), 1, 1);
303 static int free_ind_block(handle_t
*handle
, struct inode
*inode
, __le32
*i_data
)
307 /* ei->i_data[EXT4_IND_BLOCK] */
309 extend_credit_for_blkdel(handle
, inode
);
310 ext4_free_blocks(handle
, inode
,
311 le32_to_cpu(i_data
[0]), 1, 1);
314 /* ei->i_data[EXT4_DIND_BLOCK] */
316 retval
= free_dind_blocks(handle
, inode
, i_data
[1]);
321 /* ei->i_data[EXT4_TIND_BLOCK] */
323 retval
= free_tind_blocks(handle
, inode
, i_data
[2]);
330 static int ext4_ext_swap_inode_data(handle_t
*handle
, struct inode
*inode
,
331 struct inode
*tmp_inode
)
335 struct ext4_inode_info
*ei
= EXT4_I(inode
);
336 struct ext4_inode_info
*tmp_ei
= EXT4_I(tmp_inode
);
339 * One credit accounted for writing the
340 * i_data field of the original inode
342 retval
= ext4_journal_extend(handle
, 1);
344 retval
= ext4_journal_restart(handle
, 1);
349 i_data
[0] = ei
->i_data
[EXT4_IND_BLOCK
];
350 i_data
[1] = ei
->i_data
[EXT4_DIND_BLOCK
];
351 i_data
[2] = ei
->i_data
[EXT4_TIND_BLOCK
];
353 down_write(&EXT4_I(inode
)->i_data_sem
);
355 * if EXT4_EXT_MIGRATE is cleared a block allocation
356 * happened after we started the migrate. We need to
359 if (!(EXT4_I(inode
)->i_flags
& EXT4_EXT_MIGRATE
)) {
361 up_write(&EXT4_I(inode
)->i_data_sem
);
364 EXT4_I(inode
)->i_flags
= EXT4_I(inode
)->i_flags
&
367 * We have the extent map build with the tmp inode.
368 * Now copy the i_data across
370 ei
->i_flags
|= EXT4_EXTENTS_FL
;
371 memcpy(ei
->i_data
, tmp_ei
->i_data
, sizeof(ei
->i_data
));
374 * Update i_blocks with the new blocks that got
375 * allocated while adding extents for extent index
378 * While converting to extents we need not
379 * update the orignal inode i_blocks for extent blocks
380 * via quota APIs. The quota update happened via tmp_inode already.
382 spin_lock(&inode
->i_lock
);
383 inode
->i_blocks
+= tmp_inode
->i_blocks
;
384 spin_unlock(&inode
->i_lock
);
385 up_write(&EXT4_I(inode
)->i_data_sem
);
388 * We mark the inode dirty after, because we decrement the
389 * i_blocks when freeing the indirect meta-data blocks
391 retval
= free_ind_block(handle
, inode
, i_data
);
392 ext4_mark_inode_dirty(handle
, inode
);
398 static int free_ext_idx(handle_t
*handle
, struct inode
*inode
,
399 struct ext4_extent_idx
*ix
)
403 struct buffer_head
*bh
;
404 struct ext4_extent_header
*eh
;
406 block
= idx_pblock(ix
);
407 bh
= sb_bread(inode
->i_sb
, block
);
411 eh
= (struct ext4_extent_header
*)bh
->b_data
;
412 if (eh
->eh_depth
!= 0) {
413 ix
= EXT_FIRST_INDEX(eh
);
414 for (i
= 0; i
< le16_to_cpu(eh
->eh_entries
); i
++, ix
++) {
415 retval
= free_ext_idx(handle
, inode
, ix
);
421 extend_credit_for_blkdel(handle
, inode
);
422 ext4_free_blocks(handle
, inode
, block
, 1, 1);
427 * Free the extent meta data blocks only
429 static int free_ext_block(handle_t
*handle
, struct inode
*inode
)
432 struct ext4_inode_info
*ei
= EXT4_I(inode
);
433 struct ext4_extent_header
*eh
= (struct ext4_extent_header
*)ei
->i_data
;
434 struct ext4_extent_idx
*ix
;
435 if (eh
->eh_depth
== 0)
437 * No extra blocks allocated for extent meta data
440 ix
= EXT_FIRST_INDEX(eh
);
441 for (i
= 0; i
< le16_to_cpu(eh
->eh_entries
); i
++, ix
++) {
442 retval
= free_ext_idx(handle
, inode
, ix
);
450 int ext4_ext_migrate(struct inode
*inode
, struct file
*filp
,
451 unsigned int cmd
, unsigned long arg
)
456 ext4_lblk_t blk_count
= 0;
457 struct ext4_inode_info
*ei
;
458 struct inode
*tmp_inode
= NULL
;
459 struct list_blocks_struct lb
;
460 unsigned long max_entries
;
462 if (!test_opt(inode
->i_sb
, EXTENTS
))
464 * if mounted with noextents we don't allow the migrate
468 if ((EXT4_I(inode
)->i_flags
& EXT4_EXTENTS_FL
))
471 if (S_ISLNK(inode
->i_mode
) && inode
->i_blocks
== 0)
473 * don't migrate fast symlink
477 handle
= ext4_journal_start(inode
,
478 EXT4_DATA_TRANS_BLOCKS(inode
->i_sb
) +
479 EXT4_INDEX_EXTRA_TRANS_BLOCKS
+ 3 +
480 2 * EXT4_QUOTA_INIT_BLOCKS(inode
->i_sb
)
482 if (IS_ERR(handle
)) {
483 retval
= PTR_ERR(handle
);
486 tmp_inode
= ext4_new_inode(handle
,
487 inode
->i_sb
->s_root
->d_inode
,
489 if (IS_ERR(tmp_inode
)) {
491 ext4_journal_stop(handle
);
495 i_size_write(tmp_inode
, i_size_read(inode
));
497 * We don't want the inode to be reclaimed
498 * if we got interrupted in between. We have
499 * this tmp inode carrying reference to the
500 * data blocks of the original file. We set
501 * the i_nlink to zero at the last stage after
502 * switching the original file to extent format
504 tmp_inode
->i_nlink
= 1;
506 ext4_ext_tree_init(handle
, tmp_inode
);
507 ext4_orphan_add(handle
, tmp_inode
);
508 ext4_journal_stop(handle
);
511 * start with one credit accounted for
512 * superblock modification.
514 * For the tmp_inode we already have commited the
515 * trascation that created the inode. Later as and
516 * when we add extents we extent the journal
519 * inode_mutex prevent write and truncate on the file. Read still goes
520 * through. We take i_data_sem in ext4_ext_swap_inode_data before we
521 * switch the inode format to prevent read.
523 mutex_lock(&(inode
->i_mutex
));
525 * Even though we take i_mutex we can still cause block allocation
526 * via mmap write to holes. If we have allocated new blocks we fail
527 * migrate. New block allocation will clear EXT4_EXT_MIGRATE flag.
528 * The flag is updated with i_data_sem held to prevent racing with
531 down_read((&EXT4_I(inode
)->i_data_sem
));
532 EXT4_I(inode
)->i_flags
= EXT4_I(inode
)->i_flags
| EXT4_EXT_MIGRATE
;
533 up_read((&EXT4_I(inode
)->i_data_sem
));
535 handle
= ext4_journal_start(inode
, 1);
539 memset(&lb
, 0, sizeof(lb
));
541 /* 32 bit block address 4 bytes */
542 max_entries
= inode
->i_sb
->s_blocksize
>> 2;
543 for (i
= 0; i
< EXT4_NDIR_BLOCKS
; i
++, blk_count
++) {
545 retval
= update_extent_range(handle
, tmp_inode
,
546 le32_to_cpu(i_data
[i
]),
552 if (i_data
[EXT4_IND_BLOCK
]) {
553 retval
= update_ind_extent_range(handle
, tmp_inode
,
554 le32_to_cpu(i_data
[EXT4_IND_BLOCK
]),
559 blk_count
+= max_entries
;
560 if (i_data
[EXT4_DIND_BLOCK
]) {
561 retval
= update_dind_extent_range(handle
, tmp_inode
,
562 le32_to_cpu(i_data
[EXT4_DIND_BLOCK
]),
567 blk_count
+= max_entries
* max_entries
;
568 if (i_data
[EXT4_TIND_BLOCK
]) {
569 retval
= update_tind_extent_range(handle
, tmp_inode
,
570 le32_to_cpu(i_data
[EXT4_TIND_BLOCK
]),
576 * Build the last extent
578 retval
= finish_range(handle
, tmp_inode
, &lb
);
582 * Failure case delete the extent information with the
585 free_ext_block(handle
, tmp_inode
);
587 retval
= ext4_ext_swap_inode_data(handle
, inode
, tmp_inode
);
590 * if we fail to swap inode data free the extent
591 * details of the tmp inode
593 free_ext_block(handle
, tmp_inode
);
596 /* We mark the tmp_inode dirty via ext4_ext_tree_init. */
597 if (ext4_journal_extend(handle
, 1) != 0)
598 ext4_journal_restart(handle
, 1);
601 * Mark the tmp_inode as of size zero
603 i_size_write(tmp_inode
, 0);
606 * set the i_blocks count to zero
607 * so that the ext4_delete_inode does the
610 * We don't need to take the i_lock because
611 * the inode is not visible to user space.
613 tmp_inode
->i_blocks
= 0;
615 /* Reset the extent details */
616 ext4_ext_tree_init(handle
, tmp_inode
);
619 * Set the i_nlink to zero so that
620 * generic_drop_inode really deletes the
623 tmp_inode
->i_nlink
= 0;
625 ext4_journal_stop(handle
);
626 mutex_unlock(&(inode
->i_mutex
));