2 * linux/fs/ext4/balloc.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
10 * Big-endian to little-endian byte-swapping/bitmaps by
11 * David S. Miller (davem@caip.rutgers.edu), 1995
14 #include <linux/time.h>
15 #include <linux/capability.h>
17 #include <linux/jbd2.h>
18 #include <linux/quotaops.h>
19 #include <linux/buffer_head.h>
21 #include "ext4_jbd2.h"
24 #include <trace/events/ext4.h>
26 static unsigned ext4_num_base_meta_clusters(struct super_block
*sb
,
27 ext4_group_t block_group
);
29 * balloc.c contains the blocks allocation and deallocation routines
33 * Calculate the block group number and offset into the block/cluster
34 * allocation bitmap, given a block number
36 void ext4_get_group_no_and_offset(struct super_block
*sb
, ext4_fsblk_t blocknr
,
37 ext4_group_t
*blockgrpp
, ext4_grpblk_t
*offsetp
)
39 struct ext4_super_block
*es
= EXT4_SB(sb
)->s_es
;
42 blocknr
= blocknr
- le32_to_cpu(es
->s_first_data_block
);
43 offset
= do_div(blocknr
, EXT4_BLOCKS_PER_GROUP(sb
)) >>
44 EXT4_SB(sb
)->s_cluster_bits
;
52 static int ext4_block_in_group(struct super_block
*sb
, ext4_fsblk_t block
,
53 ext4_group_t block_group
)
55 ext4_group_t actual_group
;
56 ext4_get_group_no_and_offset(sb
, block
, &actual_group
, NULL
);
57 if (actual_group
== block_group
)
62 /* Return the number of clusters used for file system metadata; this
63 * represents the overhead needed by the file system.
65 unsigned ext4_num_overhead_clusters(struct super_block
*sb
,
66 ext4_group_t block_group
,
67 struct ext4_group_desc
*gdp
)
69 unsigned num_clusters
;
70 int block_cluster
= -1, inode_cluster
= -1, itbl_cluster
= -1, i
, c
;
71 ext4_fsblk_t start
= ext4_group_first_block_no(sb
, block_group
);
72 ext4_fsblk_t itbl_blk
;
73 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
75 /* This is the number of clusters used by the superblock,
76 * block group descriptors, and reserved block group
77 * descriptor blocks */
78 num_clusters
= ext4_num_base_meta_clusters(sb
, block_group
);
81 * For the allocation bitmaps and inode table, we first need
82 * to check to see if the block is in the block group. If it
83 * is, then check to see if the cluster is already accounted
84 * for in the clusters used for the base metadata cluster, or
85 * if we can increment the base metadata cluster to include
86 * that block. Otherwise, we will have to track the cluster
87 * used for the allocation bitmap or inode table explicitly.
88 * Normally all of these blocks are contiguous, so the special
89 * case handling shouldn't be necessary except for *very*
90 * unusual file system layouts.
92 if (ext4_block_in_group(sb
, ext4_block_bitmap(sb
, gdp
), block_group
)) {
93 block_cluster
= EXT4_B2C(sbi
,
94 ext4_block_bitmap(sb
, gdp
) - start
);
95 if (block_cluster
< num_clusters
)
97 else if (block_cluster
== num_clusters
) {
103 if (ext4_block_in_group(sb
, ext4_inode_bitmap(sb
, gdp
), block_group
)) {
104 inode_cluster
= EXT4_B2C(sbi
,
105 ext4_inode_bitmap(sb
, gdp
) - start
);
106 if (inode_cluster
< num_clusters
)
108 else if (inode_cluster
== num_clusters
) {
114 itbl_blk
= ext4_inode_table(sb
, gdp
);
115 for (i
= 0; i
< sbi
->s_itb_per_group
; i
++) {
116 if (ext4_block_in_group(sb
, itbl_blk
+ i
, block_group
)) {
117 c
= EXT4_B2C(sbi
, itbl_blk
+ i
- start
);
118 if ((c
< num_clusters
) || (c
== inode_cluster
) ||
119 (c
== block_cluster
) || (c
== itbl_cluster
))
121 if (c
== num_clusters
) {
130 if (block_cluster
!= -1)
132 if (inode_cluster
!= -1)
138 static unsigned int num_clusters_in_group(struct super_block
*sb
,
139 ext4_group_t block_group
)
143 if (block_group
== ext4_get_groups_count(sb
) - 1) {
145 * Even though mke2fs always initializes the first and
146 * last group, just in case some other tool was used,
147 * we need to make sure we calculate the right free
150 blocks
= ext4_blocks_count(EXT4_SB(sb
)->s_es
) -
151 ext4_group_first_block_no(sb
, block_group
);
153 blocks
= EXT4_BLOCKS_PER_GROUP(sb
);
154 return EXT4_NUM_B2C(EXT4_SB(sb
), blocks
);
157 /* Initializes an uninitialized block bitmap */
158 void ext4_init_block_bitmap(struct super_block
*sb
, struct buffer_head
*bh
,
159 ext4_group_t block_group
,
160 struct ext4_group_desc
*gdp
)
162 unsigned int bit
, bit_max
;
163 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
164 ext4_fsblk_t start
, tmp
;
167 J_ASSERT_BH(bh
, buffer_locked(bh
));
169 /* If checksum is bad mark all blocks used to prevent allocation
170 * essentially implementing a per-group read-only flag. */
171 if (!ext4_group_desc_csum_verify(sb
, block_group
, gdp
)) {
172 ext4_error(sb
, "Checksum bad for group %u", block_group
);
173 ext4_free_group_clusters_set(sb
, gdp
, 0);
174 ext4_free_inodes_set(sb
, gdp
, 0);
175 ext4_itable_unused_set(sb
, gdp
, 0);
176 memset(bh
->b_data
, 0xff, sb
->s_blocksize
);
177 ext4_block_bitmap_csum_set(sb
, block_group
, gdp
, bh
);
180 memset(bh
->b_data
, 0, sb
->s_blocksize
);
182 bit_max
= ext4_num_base_meta_clusters(sb
, block_group
);
183 for (bit
= 0; bit
< bit_max
; bit
++)
184 ext4_set_bit(bit
, bh
->b_data
);
186 start
= ext4_group_first_block_no(sb
, block_group
);
188 if (EXT4_HAS_INCOMPAT_FEATURE(sb
, EXT4_FEATURE_INCOMPAT_FLEX_BG
))
191 /* Set bits for block and inode bitmaps, and inode table */
192 tmp
= ext4_block_bitmap(sb
, gdp
);
193 if (!flex_bg
|| ext4_block_in_group(sb
, tmp
, block_group
))
194 ext4_set_bit(EXT4_B2C(sbi
, tmp
- start
), bh
->b_data
);
196 tmp
= ext4_inode_bitmap(sb
, gdp
);
197 if (!flex_bg
|| ext4_block_in_group(sb
, tmp
, block_group
))
198 ext4_set_bit(EXT4_B2C(sbi
, tmp
- start
), bh
->b_data
);
200 tmp
= ext4_inode_table(sb
, gdp
);
201 for (; tmp
< ext4_inode_table(sb
, gdp
) +
202 sbi
->s_itb_per_group
; tmp
++) {
203 if (!flex_bg
|| ext4_block_in_group(sb
, tmp
, block_group
))
204 ext4_set_bit(EXT4_B2C(sbi
, tmp
- start
), bh
->b_data
);
208 * Also if the number of blocks within the group is less than
209 * the blocksize * 8 ( which is the size of bitmap ), set rest
210 * of the block bitmap to 1
212 ext4_mark_bitmap_end(num_clusters_in_group(sb
, block_group
),
213 sb
->s_blocksize
* 8, bh
->b_data
);
214 ext4_block_bitmap_csum_set(sb
, block_group
, gdp
, bh
);
215 ext4_group_desc_csum_set(sb
, block_group
, gdp
);
218 /* Return the number of free blocks in a block group. It is used when
219 * the block bitmap is uninitialized, so we can't just count the bits
221 unsigned ext4_free_clusters_after_init(struct super_block
*sb
,
222 ext4_group_t block_group
,
223 struct ext4_group_desc
*gdp
)
225 return num_clusters_in_group(sb
, block_group
) -
226 ext4_num_overhead_clusters(sb
, block_group
, gdp
);
230 * The free blocks are managed by bitmaps. A file system contains several
231 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
232 * block for inodes, N blocks for the inode table and data blocks.
234 * The file system contains group descriptors which are located after the
235 * super block. Each descriptor contains the number of the bitmap block and
236 * the free blocks count in the block. The descriptors are loaded in memory
237 * when a file system is mounted (see ext4_fill_super).
241 * ext4_get_group_desc() -- load group descriptor from disk
243 * @block_group: given block group
244 * @bh: pointer to the buffer head to store the block
247 struct ext4_group_desc
* ext4_get_group_desc(struct super_block
*sb
,
248 ext4_group_t block_group
,
249 struct buffer_head
**bh
)
251 unsigned int group_desc
;
253 ext4_group_t ngroups
= ext4_get_groups_count(sb
);
254 struct ext4_group_desc
*desc
;
255 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
257 if (block_group
>= ngroups
) {
258 ext4_error(sb
, "block_group >= groups_count - block_group = %u,"
259 " groups_count = %u", block_group
, ngroups
);
264 group_desc
= block_group
>> EXT4_DESC_PER_BLOCK_BITS(sb
);
265 offset
= block_group
& (EXT4_DESC_PER_BLOCK(sb
) - 1);
266 if (!sbi
->s_group_desc
[group_desc
]) {
267 ext4_error(sb
, "Group descriptor not loaded - "
268 "block_group = %u, group_desc = %u, desc = %u",
269 block_group
, group_desc
, offset
);
273 desc
= (struct ext4_group_desc
*)(
274 (__u8
*)sbi
->s_group_desc
[group_desc
]->b_data
+
275 offset
* EXT4_DESC_SIZE(sb
));
277 *bh
= sbi
->s_group_desc
[group_desc
];
282 * Return the block number which was discovered to be invalid, or 0 if
283 * the block bitmap is valid.
285 static ext4_fsblk_t
ext4_valid_block_bitmap(struct super_block
*sb
,
286 struct ext4_group_desc
*desc
,
287 unsigned int block_group
,
288 struct buffer_head
*bh
)
290 ext4_grpblk_t offset
;
291 ext4_grpblk_t next_zero_bit
;
293 ext4_fsblk_t group_first_block
;
295 if (EXT4_HAS_INCOMPAT_FEATURE(sb
, EXT4_FEATURE_INCOMPAT_FLEX_BG
)) {
296 /* with FLEX_BG, the inode/block bitmaps and itable
297 * blocks may not be in the group at all
298 * so the bitmap validation will be skipped for those groups
299 * or it has to also read the block group where the bitmaps
300 * are located to verify they are set.
304 group_first_block
= ext4_group_first_block_no(sb
, block_group
);
306 /* check whether block bitmap block number is set */
307 blk
= ext4_block_bitmap(sb
, desc
);
308 offset
= blk
- group_first_block
;
309 if (!ext4_test_bit(offset
, bh
->b_data
))
310 /* bad block bitmap */
313 /* check whether the inode bitmap block number is set */
314 blk
= ext4_inode_bitmap(sb
, desc
);
315 offset
= blk
- group_first_block
;
316 if (!ext4_test_bit(offset
, bh
->b_data
))
317 /* bad block bitmap */
320 /* check whether the inode table block number is set */
321 blk
= ext4_inode_table(sb
, desc
);
322 offset
= blk
- group_first_block
;
323 next_zero_bit
= ext4_find_next_zero_bit(bh
->b_data
,
324 offset
+ EXT4_SB(sb
)->s_itb_per_group
,
326 if (next_zero_bit
< offset
+ EXT4_SB(sb
)->s_itb_per_group
)
327 /* bad bitmap for inode tables */
332 void ext4_validate_block_bitmap(struct super_block
*sb
,
333 struct ext4_group_desc
*desc
,
334 unsigned int block_group
,
335 struct buffer_head
*bh
)
339 if (buffer_verified(bh
))
342 ext4_lock_group(sb
, block_group
);
343 blk
= ext4_valid_block_bitmap(sb
, desc
, block_group
, bh
);
344 if (unlikely(blk
!= 0)) {
345 ext4_unlock_group(sb
, block_group
);
346 ext4_error(sb
, "bg %u: block %llu: invalid block bitmap",
350 if (unlikely(!ext4_block_bitmap_csum_verify(sb
, block_group
,
352 ext4_unlock_group(sb
, block_group
);
353 ext4_error(sb
, "bg %u: bad block bitmap checksum", block_group
);
356 set_buffer_verified(bh
);
357 ext4_unlock_group(sb
, block_group
);
361 * ext4_read_block_bitmap_nowait()
363 * @block_group: given block group
365 * Read the bitmap for a given block_group,and validate the
366 * bits for block/inode/inode tables are set in the bitmaps
368 * Return buffer_head on success or NULL in case of failure.
371 ext4_read_block_bitmap_nowait(struct super_block
*sb
, ext4_group_t block_group
)
373 struct ext4_group_desc
*desc
;
374 struct buffer_head
*bh
;
375 ext4_fsblk_t bitmap_blk
;
377 desc
= ext4_get_group_desc(sb
, block_group
, NULL
);
380 bitmap_blk
= ext4_block_bitmap(sb
, desc
);
381 bh
= sb_getblk(sb
, bitmap_blk
);
383 ext4_error(sb
, "Cannot get buffer for block bitmap - "
384 "block_group = %u, block_bitmap = %llu",
385 block_group
, bitmap_blk
);
389 if (bitmap_uptodate(bh
))
393 if (bitmap_uptodate(bh
)) {
397 ext4_lock_group(sb
, block_group
);
398 if (desc
->bg_flags
& cpu_to_le16(EXT4_BG_BLOCK_UNINIT
)) {
399 ext4_init_block_bitmap(sb
, bh
, block_group
, desc
);
400 set_bitmap_uptodate(bh
);
401 set_buffer_uptodate(bh
);
402 ext4_unlock_group(sb
, block_group
);
406 ext4_unlock_group(sb
, block_group
);
407 if (buffer_uptodate(bh
)) {
409 * if not uninit if bh is uptodate,
410 * bitmap is also uptodate
412 set_bitmap_uptodate(bh
);
417 * submit the buffer_head for reading
420 trace_ext4_read_block_bitmap_load(sb
, block_group
);
421 bh
->b_end_io
= ext4_end_bitmap_read
;
426 ext4_validate_block_bitmap(sb
, desc
, block_group
, bh
);
430 /* Returns 0 on success, 1 on error */
431 int ext4_wait_block_bitmap(struct super_block
*sb
, ext4_group_t block_group
,
432 struct buffer_head
*bh
)
434 struct ext4_group_desc
*desc
;
438 desc
= ext4_get_group_desc(sb
, block_group
, NULL
);
442 if (!buffer_uptodate(bh
)) {
443 ext4_error(sb
, "Cannot read block bitmap - "
444 "block_group = %u, block_bitmap = %llu",
445 block_group
, (unsigned long long) bh
->b_blocknr
);
448 clear_buffer_new(bh
);
449 /* Panic or remount fs read-only if block bitmap is invalid */
450 ext4_validate_block_bitmap(sb
, desc
, block_group
, bh
);
455 ext4_read_block_bitmap(struct super_block
*sb
, ext4_group_t block_group
)
457 struct buffer_head
*bh
;
459 bh
= ext4_read_block_bitmap_nowait(sb
, block_group
);
462 if (ext4_wait_block_bitmap(sb
, block_group
, bh
)) {
470 * ext4_has_free_clusters()
471 * @sbi: in-core super block structure.
472 * @nclusters: number of needed blocks
473 * @flags: flags from ext4_mb_new_blocks()
475 * Check if filesystem has nclusters free & available for allocation.
476 * On success return 1, return 0 on failure.
478 static int ext4_has_free_clusters(struct ext4_sb_info
*sbi
,
479 s64 nclusters
, unsigned int flags
)
481 s64 free_clusters
, dirty_clusters
, root_clusters
;
482 struct percpu_counter
*fcc
= &sbi
->s_freeclusters_counter
;
483 struct percpu_counter
*dcc
= &sbi
->s_dirtyclusters_counter
;
485 free_clusters
= percpu_counter_read_positive(fcc
);
486 dirty_clusters
= percpu_counter_read_positive(dcc
);
489 * r_blocks_count should always be multiple of the cluster ratio so
490 * we are safe to do a plane bit shift only.
492 root_clusters
= ext4_r_blocks_count(sbi
->s_es
) >> sbi
->s_cluster_bits
;
494 if (free_clusters
- (nclusters
+ root_clusters
+ dirty_clusters
) <
495 EXT4_FREECLUSTERS_WATERMARK
) {
496 free_clusters
= percpu_counter_sum_positive(fcc
);
497 dirty_clusters
= percpu_counter_sum_positive(dcc
);
499 /* Check whether we have space after accounting for current
500 * dirty clusters & root reserved clusters.
502 if (free_clusters
>= ((root_clusters
+ nclusters
) + dirty_clusters
))
505 /* Hm, nope. Are (enough) root reserved clusters available? */
506 if (uid_eq(sbi
->s_resuid
, current_fsuid()) ||
507 (!gid_eq(sbi
->s_resgid
, GLOBAL_ROOT_GID
) && in_group_p(sbi
->s_resgid
)) ||
508 capable(CAP_SYS_RESOURCE
) ||
509 (flags
& EXT4_MB_USE_ROOT_BLOCKS
)) {
511 if (free_clusters
>= (nclusters
+ dirty_clusters
))
518 int ext4_claim_free_clusters(struct ext4_sb_info
*sbi
,
519 s64 nclusters
, unsigned int flags
)
521 if (ext4_has_free_clusters(sbi
, nclusters
, flags
)) {
522 percpu_counter_add(&sbi
->s_dirtyclusters_counter
, nclusters
);
529 * ext4_should_retry_alloc()
531 * @retries number of attemps has been made
533 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
534 * it is profitable to retry the operation, this function will wait
535 * for the current or committing transaction to complete, and then
538 * if the total number of retries exceed three times, return FALSE.
540 int ext4_should_retry_alloc(struct super_block
*sb
, int *retries
)
542 if (!ext4_has_free_clusters(EXT4_SB(sb
), 1, 0) ||
544 !EXT4_SB(sb
)->s_journal
)
547 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb
->s_id
);
549 return jbd2_journal_force_commit_nested(EXT4_SB(sb
)->s_journal
);
553 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
555 * @handle: handle to this transaction
557 * @goal: given target block(filesystem wide)
558 * @count: pointer to total number of clusters needed
561 * Return 1st allocated block number on success, *count stores total account
562 * error stores in errp pointer
564 ext4_fsblk_t
ext4_new_meta_blocks(handle_t
*handle
, struct inode
*inode
,
565 ext4_fsblk_t goal
, unsigned int flags
,
566 unsigned long *count
, int *errp
)
568 struct ext4_allocation_request ar
;
571 memset(&ar
, 0, sizeof(ar
));
572 /* Fill with neighbour allocated blocks */
575 ar
.len
= count
? *count
: 1;
578 ret
= ext4_mb_new_blocks(handle
, &ar
, errp
);
582 * Account for the allocated meta blocks. We will never
583 * fail EDQUOT for metdata, but we do account for it.
586 ext4_test_inode_state(inode
, EXT4_STATE_DELALLOC_RESERVED
)) {
587 spin_lock(&EXT4_I(inode
)->i_block_reservation_lock
);
588 EXT4_I(inode
)->i_allocated_meta_blocks
+= ar
.len
;
589 spin_unlock(&EXT4_I(inode
)->i_block_reservation_lock
);
590 dquot_alloc_block_nofail(inode
,
591 EXT4_C2B(EXT4_SB(inode
->i_sb
), ar
.len
));
597 * ext4_count_free_clusters() -- count filesystem free clusters
600 * Adds up the number of free clusters from each block group.
602 ext4_fsblk_t
ext4_count_free_clusters(struct super_block
*sb
)
604 ext4_fsblk_t desc_count
;
605 struct ext4_group_desc
*gdp
;
607 ext4_group_t ngroups
= ext4_get_groups_count(sb
);
609 struct ext4_super_block
*es
;
610 ext4_fsblk_t bitmap_count
;
612 struct buffer_head
*bitmap_bh
= NULL
;
614 es
= EXT4_SB(sb
)->s_es
;
619 for (i
= 0; i
< ngroups
; i
++) {
620 gdp
= ext4_get_group_desc(sb
, i
, NULL
);
623 desc_count
+= ext4_free_group_clusters(sb
, gdp
);
625 bitmap_bh
= ext4_read_block_bitmap(sb
, i
);
626 if (bitmap_bh
== NULL
)
629 x
= ext4_count_free(bitmap_bh
->b_data
,
630 EXT4_BLOCKS_PER_GROUP(sb
) / 8);
631 printk(KERN_DEBUG
"group %u: stored = %d, counted = %u\n",
632 i
, ext4_free_group_clusters(sb
, gdp
), x
);
636 printk(KERN_DEBUG
"ext4_count_free_clusters: stored = %llu"
637 ", computed = %llu, %llu\n",
638 EXT4_NUM_B2C(EXT4_SB(sb
), ext4_free_blocks_count(es
)),
639 desc_count
, bitmap_count
);
643 for (i
= 0; i
< ngroups
; i
++) {
644 gdp
= ext4_get_group_desc(sb
, i
, NULL
);
647 desc_count
+= ext4_free_group_clusters(sb
, gdp
);
654 static inline int test_root(ext4_group_t a
, int b
)
663 static int ext4_group_sparse(ext4_group_t group
)
669 return (test_root(group
, 7) || test_root(group
, 5) ||
670 test_root(group
, 3));
674 * ext4_bg_has_super - number of blocks used by the superblock in group
675 * @sb: superblock for filesystem
676 * @group: group number to check
678 * Return the number of blocks used by the superblock (primary or backup)
679 * in this group. Currently this will be only 0 or 1.
681 int ext4_bg_has_super(struct super_block
*sb
, ext4_group_t group
)
683 if (EXT4_HAS_RO_COMPAT_FEATURE(sb
,
684 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER
) &&
685 !ext4_group_sparse(group
))
690 static unsigned long ext4_bg_num_gdb_meta(struct super_block
*sb
,
693 unsigned long metagroup
= group
/ EXT4_DESC_PER_BLOCK(sb
);
694 ext4_group_t first
= metagroup
* EXT4_DESC_PER_BLOCK(sb
);
695 ext4_group_t last
= first
+ EXT4_DESC_PER_BLOCK(sb
) - 1;
697 if (group
== first
|| group
== first
+ 1 || group
== last
)
702 static unsigned long ext4_bg_num_gdb_nometa(struct super_block
*sb
,
705 if (!ext4_bg_has_super(sb
, group
))
708 if (EXT4_HAS_INCOMPAT_FEATURE(sb
,EXT4_FEATURE_INCOMPAT_META_BG
))
709 return le32_to_cpu(EXT4_SB(sb
)->s_es
->s_first_meta_bg
);
711 return EXT4_SB(sb
)->s_gdb_count
;
715 * ext4_bg_num_gdb - number of blocks used by the group table in group
716 * @sb: superblock for filesystem
717 * @group: group number to check
719 * Return the number of blocks used by the group descriptor table
720 * (primary or backup) in this group. In the future there may be a
721 * different number of descriptor blocks in each group.
723 unsigned long ext4_bg_num_gdb(struct super_block
*sb
, ext4_group_t group
)
725 unsigned long first_meta_bg
=
726 le32_to_cpu(EXT4_SB(sb
)->s_es
->s_first_meta_bg
);
727 unsigned long metagroup
= group
/ EXT4_DESC_PER_BLOCK(sb
);
729 if (!EXT4_HAS_INCOMPAT_FEATURE(sb
,EXT4_FEATURE_INCOMPAT_META_BG
) ||
730 metagroup
< first_meta_bg
)
731 return ext4_bg_num_gdb_nometa(sb
, group
);
733 return ext4_bg_num_gdb_meta(sb
,group
);
738 * This function returns the number of file system metadata clusters at
739 * the beginning of a block group, including the reserved gdt blocks.
741 static unsigned ext4_num_base_meta_clusters(struct super_block
*sb
,
742 ext4_group_t block_group
)
744 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
747 /* Check for superblock and gdt backups in this group */
748 num
= ext4_bg_has_super(sb
, block_group
);
750 if (!EXT4_HAS_INCOMPAT_FEATURE(sb
, EXT4_FEATURE_INCOMPAT_META_BG
) ||
751 block_group
< le32_to_cpu(sbi
->s_es
->s_first_meta_bg
) *
752 sbi
->s_desc_per_block
) {
754 num
+= ext4_bg_num_gdb(sb
, block_group
);
755 num
+= le16_to_cpu(sbi
->s_es
->s_reserved_gdt_blocks
);
757 } else { /* For META_BG_BLOCK_GROUPS */
758 num
+= ext4_bg_num_gdb(sb
, block_group
);
760 return EXT4_NUM_B2C(sbi
, num
);
763 * ext4_inode_to_goal_block - return a hint for block allocation
764 * @inode: inode for block allocation
766 * Return the ideal location to start allocating blocks for a
767 * newly created inode.
769 ext4_fsblk_t
ext4_inode_to_goal_block(struct inode
*inode
)
771 struct ext4_inode_info
*ei
= EXT4_I(inode
);
772 ext4_group_t block_group
;
773 ext4_grpblk_t colour
;
774 int flex_size
= ext4_flex_bg_size(EXT4_SB(inode
->i_sb
));
775 ext4_fsblk_t bg_start
;
776 ext4_fsblk_t last_block
;
778 block_group
= ei
->i_block_group
;
779 if (flex_size
>= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
) {
781 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
782 * block groups per flexgroup, reserve the first block
783 * group for directories and special files. Regular
784 * files will start at the second block group. This
785 * tends to speed up directory access and improves
788 block_group
&= ~(flex_size
-1);
789 if (S_ISREG(inode
->i_mode
))
792 bg_start
= ext4_group_first_block_no(inode
->i_sb
, block_group
);
793 last_block
= ext4_blocks_count(EXT4_SB(inode
->i_sb
)->s_es
) - 1;
796 * If we are doing delayed allocation, we don't need take
797 * colour into account.
799 if (test_opt(inode
->i_sb
, DELALLOC
))
802 if (bg_start
+ EXT4_BLOCKS_PER_GROUP(inode
->i_sb
) <= last_block
)
803 colour
= (current
->pid
% 16) *
804 (EXT4_BLOCKS_PER_GROUP(inode
->i_sb
) / 16);
806 colour
= (current
->pid
% 16) * ((last_block
- bg_start
) / 16);
807 return bg_start
+ colour
;