2 * linux/fs/ext4/ialloc.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * BSD ufs-inspired inode and directory allocation by
10 * Stephen Tweedie (sct@redhat.com), 1993
11 * Big-endian to little-endian byte-swapping/bitmaps by
12 * David S. Miller (davem@caip.rutgers.edu), 1995
15 #include <linux/time.h>
17 #include <linux/jbd2.h>
18 #include <linux/stat.h>
19 #include <linux/string.h>
20 #include <linux/quotaops.h>
21 #include <linux/buffer_head.h>
22 #include <linux/random.h>
23 #include <linux/bitops.h>
24 #include <linux/blkdev.h>
25 #include <asm/byteorder.h>
28 #include "ext4_jbd2.h"
32 #include <trace/events/ext4.h>
35 * ialloc.c contains the inodes allocation and deallocation routines
39 * The free inodes are managed by bitmaps. A file system contains several
40 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
41 * block for inodes, N blocks for the inode table and data blocks.
43 * The file system contains group descriptors which are located after the
44 * super block. Each descriptor contains the number of the bitmap block and
45 * the free blocks count in the block.
49 * To avoid calling the atomic setbit hundreds or thousands of times, we only
50 * need to use it within a single byte (to ensure we get endianness right).
51 * We can use memset for the rest of the bitmap as there are no other users.
53 void ext4_mark_bitmap_end(int start_bit
, int end_bit
, char *bitmap
)
57 if (start_bit
>= end_bit
)
60 ext4_debug("mark end bits +%d through +%d used\n", start_bit
, end_bit
);
61 for (i
= start_bit
; i
< ((start_bit
+ 7) & ~7UL); i
++)
62 ext4_set_bit(i
, bitmap
);
64 memset(bitmap
+ (i
>> 3), 0xff, (end_bit
- i
) >> 3);
67 /* Initializes an uninitialized inode bitmap */
68 static unsigned ext4_init_inode_bitmap(struct super_block
*sb
,
69 struct buffer_head
*bh
,
70 ext4_group_t block_group
,
71 struct ext4_group_desc
*gdp
)
73 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
75 J_ASSERT_BH(bh
, buffer_locked(bh
));
77 /* If checksum is bad mark all blocks and inodes use to prevent
78 * allocation, essentially implementing a per-group read-only flag. */
79 if (!ext4_group_desc_csum_verify(sbi
, block_group
, gdp
)) {
80 ext4_error(sb
, "Checksum bad for group %u", block_group
);
81 ext4_free_group_clusters_set(sb
, gdp
, 0);
82 ext4_free_inodes_set(sb
, gdp
, 0);
83 ext4_itable_unused_set(sb
, gdp
, 0);
84 memset(bh
->b_data
, 0xff, sb
->s_blocksize
);
88 memset(bh
->b_data
, 0, (EXT4_INODES_PER_GROUP(sb
) + 7) / 8);
89 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb
), sb
->s_blocksize
* 8,
92 return EXT4_INODES_PER_GROUP(sb
);
96 * Read the inode allocation bitmap for a given block_group, reading
97 * into the specified slot in the superblock's bitmap cache.
99 * Return buffer_head of bitmap on success or NULL.
101 static struct buffer_head
*
102 ext4_read_inode_bitmap(struct super_block
*sb
, ext4_group_t block_group
)
104 struct ext4_group_desc
*desc
;
105 struct buffer_head
*bh
= NULL
;
106 ext4_fsblk_t bitmap_blk
;
108 desc
= ext4_get_group_desc(sb
, block_group
, NULL
);
112 bitmap_blk
= ext4_inode_bitmap(sb
, desc
);
113 bh
= sb_getblk(sb
, bitmap_blk
);
115 ext4_error(sb
, "Cannot read inode bitmap - "
116 "block_group = %u, inode_bitmap = %llu",
117 block_group
, bitmap_blk
);
120 if (bitmap_uptodate(bh
))
124 if (bitmap_uptodate(bh
)) {
129 ext4_lock_group(sb
, block_group
);
130 if (desc
->bg_flags
& cpu_to_le16(EXT4_BG_INODE_UNINIT
)) {
131 ext4_init_inode_bitmap(sb
, bh
, block_group
, desc
);
132 set_bitmap_uptodate(bh
);
133 set_buffer_uptodate(bh
);
134 ext4_unlock_group(sb
, block_group
);
138 ext4_unlock_group(sb
, block_group
);
140 if (buffer_uptodate(bh
)) {
142 * if not uninit if bh is uptodate,
143 * bitmap is also uptodate
145 set_bitmap_uptodate(bh
);
150 * submit the buffer_head for read. We can
151 * safely mark the bitmap as uptodate now.
152 * We do it here so the bitmap uptodate bit
153 * get set with buffer lock held.
155 trace_ext4_load_inode_bitmap(sb
, block_group
);
156 set_bitmap_uptodate(bh
);
157 if (bh_submit_read(bh
) < 0) {
159 ext4_error(sb
, "Cannot read inode bitmap - "
160 "block_group = %u, inode_bitmap = %llu",
161 block_group
, bitmap_blk
);
168 * NOTE! When we get the inode, we're the only people
169 * that have access to it, and as such there are no
170 * race conditions we have to worry about. The inode
171 * is not on the hash-lists, and it cannot be reached
172 * through the filesystem because the directory entry
173 * has been deleted earlier.
175 * HOWEVER: we must make sure that we get no aliases,
176 * which means that we have to call "clear_inode()"
177 * _before_ we mark the inode not in use in the inode
178 * bitmaps. Otherwise a newly created file might use
179 * the same inode number (not actually the same pointer
180 * though), and then we'd have two inodes sharing the
181 * same inode number and space on the harddisk.
183 void ext4_free_inode(handle_t
*handle
, struct inode
*inode
)
185 struct super_block
*sb
= inode
->i_sb
;
188 struct buffer_head
*bitmap_bh
= NULL
;
189 struct buffer_head
*bh2
;
190 ext4_group_t block_group
;
192 struct ext4_group_desc
*gdp
;
193 struct ext4_super_block
*es
;
194 struct ext4_sb_info
*sbi
;
195 int fatal
= 0, err
, count
, cleared
;
197 if (atomic_read(&inode
->i_count
) > 1) {
198 printk(KERN_ERR
"ext4_free_inode: inode has count=%d\n",
199 atomic_read(&inode
->i_count
));
202 if (inode
->i_nlink
) {
203 printk(KERN_ERR
"ext4_free_inode: inode has nlink=%d\n",
208 printk(KERN_ERR
"ext4_free_inode: inode on "
209 "nonexistent device\n");
215 ext4_debug("freeing inode %lu\n", ino
);
216 trace_ext4_free_inode(inode
);
219 * Note: we must free any quota before locking the superblock,
220 * as writing the quota to disk may need the lock as well.
222 dquot_initialize(inode
);
223 ext4_xattr_delete_inode(handle
, inode
);
224 dquot_free_inode(inode
);
227 is_directory
= S_ISDIR(inode
->i_mode
);
229 /* Do this BEFORE marking the inode not in use or returning an error */
230 ext4_clear_inode(inode
);
232 es
= EXT4_SB(sb
)->s_es
;
233 if (ino
< EXT4_FIRST_INO(sb
) || ino
> le32_to_cpu(es
->s_inodes_count
)) {
234 ext4_error(sb
, "reserved or nonexistent inode %lu", ino
);
237 block_group
= (ino
- 1) / EXT4_INODES_PER_GROUP(sb
);
238 bit
= (ino
- 1) % EXT4_INODES_PER_GROUP(sb
);
239 bitmap_bh
= ext4_read_inode_bitmap(sb
, block_group
);
243 BUFFER_TRACE(bitmap_bh
, "get_write_access");
244 fatal
= ext4_journal_get_write_access(handle
, bitmap_bh
);
249 gdp
= ext4_get_group_desc(sb
, block_group
, &bh2
);
251 BUFFER_TRACE(bh2
, "get_write_access");
252 fatal
= ext4_journal_get_write_access(handle
, bh2
);
254 ext4_lock_group(sb
, block_group
);
255 cleared
= ext4_clear_bit(bit
, bitmap_bh
->b_data
);
256 if (fatal
|| !cleared
) {
257 ext4_unlock_group(sb
, block_group
);
261 count
= ext4_free_inodes_count(sb
, gdp
) + 1;
262 ext4_free_inodes_set(sb
, gdp
, count
);
264 count
= ext4_used_dirs_count(sb
, gdp
) - 1;
265 ext4_used_dirs_set(sb
, gdp
, count
);
266 percpu_counter_dec(&sbi
->s_dirs_counter
);
268 gdp
->bg_checksum
= ext4_group_desc_csum(sbi
, block_group
, gdp
);
269 ext4_unlock_group(sb
, block_group
);
271 percpu_counter_inc(&sbi
->s_freeinodes_counter
);
272 if (sbi
->s_log_groups_per_flex
) {
273 ext4_group_t f
= ext4_flex_group(sbi
, block_group
);
275 atomic_inc(&sbi
->s_flex_groups
[f
].free_inodes
);
277 atomic_dec(&sbi
->s_flex_groups
[f
].used_dirs
);
279 BUFFER_TRACE(bh2
, "call ext4_handle_dirty_metadata");
280 fatal
= ext4_handle_dirty_metadata(handle
, NULL
, bh2
);
283 BUFFER_TRACE(bitmap_bh
, "call ext4_handle_dirty_metadata");
284 err
= ext4_handle_dirty_metadata(handle
, NULL
, bitmap_bh
);
287 ext4_mark_super_dirty(sb
);
289 ext4_error(sb
, "bit already cleared for inode %lu", ino
);
293 ext4_std_error(sb
, fatal
);
297 * There are two policies for allocating an inode. If the new inode is
298 * a directory, then a forward search is made for a block group with both
299 * free space and a low directory-to-inode ratio; if that fails, then of
300 * the groups with above-average free space, that group with the fewest
301 * directories already is chosen.
303 * For other inodes, search forward from the parent directory\'s block
304 * group to find a free inode.
306 static int find_group_dir(struct super_block
*sb
, struct inode
*parent
,
307 ext4_group_t
*best_group
)
309 ext4_group_t ngroups
= ext4_get_groups_count(sb
);
310 unsigned int freei
, avefreei
;
311 struct ext4_group_desc
*desc
, *best_desc
= NULL
;
315 freei
= percpu_counter_read_positive(&EXT4_SB(sb
)->s_freeinodes_counter
);
316 avefreei
= freei
/ ngroups
;
318 for (group
= 0; group
< ngroups
; group
++) {
319 desc
= ext4_get_group_desc(sb
, group
, NULL
);
320 if (!desc
|| !ext4_free_inodes_count(sb
, desc
))
322 if (ext4_free_inodes_count(sb
, desc
) < avefreei
)
325 (ext4_free_group_clusters(sb
, desc
) >
326 ext4_free_group_clusters(sb
, best_desc
))) {
335 #define free_block_ratio 10
337 static int find_group_flex(struct super_block
*sb
, struct inode
*parent
,
338 ext4_group_t
*best_group
)
340 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
341 struct ext4_group_desc
*desc
;
342 struct flex_groups
*flex_group
= sbi
->s_flex_groups
;
343 ext4_group_t parent_group
= EXT4_I(parent
)->i_block_group
;
344 ext4_group_t parent_fbg_group
= ext4_flex_group(sbi
, parent_group
);
345 ext4_group_t ngroups
= ext4_get_groups_count(sb
);
346 int flex_size
= ext4_flex_bg_size(sbi
);
347 ext4_group_t best_flex
= parent_fbg_group
;
348 int blocks_per_flex
= sbi
->s_blocks_per_group
* flex_size
;
349 int flexbg_free_clusters
;
350 int flex_freeb_ratio
;
351 ext4_group_t n_fbg_groups
;
354 n_fbg_groups
= (ngroups
+ flex_size
- 1) >>
355 sbi
->s_log_groups_per_flex
;
357 find_close_to_parent
:
358 flexbg_free_clusters
= atomic_read(&flex_group
[best_flex
].free_clusters
);
359 flex_freeb_ratio
= EXT4_C2B(sbi
, flexbg_free_clusters
) * 100 /
361 if (atomic_read(&flex_group
[best_flex
].free_inodes
) &&
362 flex_freeb_ratio
> free_block_ratio
)
365 if (best_flex
&& best_flex
== parent_fbg_group
) {
367 goto find_close_to_parent
;
370 for (i
= 0; i
< n_fbg_groups
; i
++) {
371 if (i
== parent_fbg_group
|| i
== parent_fbg_group
- 1)
374 flexbg_free_clusters
= atomic_read(&flex_group
[i
].free_clusters
);
375 flex_freeb_ratio
= EXT4_C2B(sbi
, flexbg_free_clusters
) * 100 /
378 if (flex_freeb_ratio
> free_block_ratio
&&
379 (atomic_read(&flex_group
[i
].free_inodes
))) {
384 if ((atomic_read(&flex_group
[best_flex
].free_inodes
) == 0) ||
385 ((atomic_read(&flex_group
[i
].free_clusters
) >
386 atomic_read(&flex_group
[best_flex
].free_clusters
)) &&
387 atomic_read(&flex_group
[i
].free_inodes
)))
391 if (!atomic_read(&flex_group
[best_flex
].free_inodes
) ||
392 !atomic_read(&flex_group
[best_flex
].free_clusters
))
396 for (i
= best_flex
* flex_size
; i
< ngroups
&&
397 i
< (best_flex
+ 1) * flex_size
; i
++) {
398 desc
= ext4_get_group_desc(sb
, i
, NULL
);
399 if (ext4_free_inodes_count(sb
, desc
)) {
417 * Helper function for Orlov's allocator; returns critical information
418 * for a particular block group or flex_bg. If flex_size is 1, then g
419 * is a block group number; otherwise it is flex_bg number.
421 static void get_orlov_stats(struct super_block
*sb
, ext4_group_t g
,
422 int flex_size
, struct orlov_stats
*stats
)
424 struct ext4_group_desc
*desc
;
425 struct flex_groups
*flex_group
= EXT4_SB(sb
)->s_flex_groups
;
428 stats
->free_inodes
= atomic_read(&flex_group
[g
].free_inodes
);
429 stats
->free_clusters
= atomic_read(&flex_group
[g
].free_clusters
);
430 stats
->used_dirs
= atomic_read(&flex_group
[g
].used_dirs
);
434 desc
= ext4_get_group_desc(sb
, g
, NULL
);
436 stats
->free_inodes
= ext4_free_inodes_count(sb
, desc
);
437 stats
->free_clusters
= ext4_free_group_clusters(sb
, desc
);
438 stats
->used_dirs
= ext4_used_dirs_count(sb
, desc
);
440 stats
->free_inodes
= 0;
441 stats
->free_clusters
= 0;
442 stats
->used_dirs
= 0;
447 * Orlov's allocator for directories.
449 * We always try to spread first-level directories.
451 * If there are blockgroups with both free inodes and free blocks counts
452 * not worse than average we return one with smallest directory count.
453 * Otherwise we simply return a random group.
455 * For the rest rules look so:
457 * It's OK to put directory into a group unless
458 * it has too many directories already (max_dirs) or
459 * it has too few free inodes left (min_inodes) or
460 * it has too few free blocks left (min_blocks) or
461 * Parent's group is preferred, if it doesn't satisfy these
462 * conditions we search cyclically through the rest. If none
463 * of the groups look good we just look for a group with more
464 * free inodes than average (starting at parent's group).
467 static int find_group_orlov(struct super_block
*sb
, struct inode
*parent
,
468 ext4_group_t
*group
, int mode
,
469 const struct qstr
*qstr
)
471 ext4_group_t parent_group
= EXT4_I(parent
)->i_block_group
;
472 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
473 ext4_group_t real_ngroups
= ext4_get_groups_count(sb
);
474 int inodes_per_group
= EXT4_INODES_PER_GROUP(sb
);
475 unsigned int freei
, avefreei
;
476 ext4_fsblk_t freeb
, avefreec
;
478 int max_dirs
, min_inodes
;
479 ext4_grpblk_t min_clusters
;
480 ext4_group_t i
, grp
, g
, ngroups
;
481 struct ext4_group_desc
*desc
;
482 struct orlov_stats stats
;
483 int flex_size
= ext4_flex_bg_size(sbi
);
484 struct dx_hash_info hinfo
;
486 ngroups
= real_ngroups
;
488 ngroups
= (real_ngroups
+ flex_size
- 1) >>
489 sbi
->s_log_groups_per_flex
;
490 parent_group
>>= sbi
->s_log_groups_per_flex
;
493 freei
= percpu_counter_read_positive(&sbi
->s_freeinodes_counter
);
494 avefreei
= freei
/ ngroups
;
495 freeb
= EXT4_C2B(sbi
,
496 percpu_counter_read_positive(&sbi
->s_freeclusters_counter
));
498 do_div(avefreec
, ngroups
);
499 ndirs
= percpu_counter_read_positive(&sbi
->s_dirs_counter
);
502 ((parent
== sb
->s_root
->d_inode
) ||
503 (ext4_test_inode_flag(parent
, EXT4_INODE_TOPDIR
)))) {
504 int best_ndir
= inodes_per_group
;
508 hinfo
.hash_version
= DX_HASH_HALF_MD4
;
509 hinfo
.seed
= sbi
->s_hash_seed
;
510 ext4fs_dirhash(qstr
->name
, qstr
->len
, &hinfo
);
513 get_random_bytes(&grp
, sizeof(grp
));
514 parent_group
= (unsigned)grp
% ngroups
;
515 for (i
= 0; i
< ngroups
; i
++) {
516 g
= (parent_group
+ i
) % ngroups
;
517 get_orlov_stats(sb
, g
, flex_size
, &stats
);
518 if (!stats
.free_inodes
)
520 if (stats
.used_dirs
>= best_ndir
)
522 if (stats
.free_inodes
< avefreei
)
524 if (stats
.free_clusters
< avefreec
)
528 best_ndir
= stats
.used_dirs
;
533 if (flex_size
== 1) {
539 * We pack inodes at the beginning of the flexgroup's
540 * inode tables. Block allocation decisions will do
541 * something similar, although regular files will
542 * start at 2nd block group of the flexgroup. See
543 * ext4_ext_find_goal() and ext4_find_near().
546 for (i
= 0; i
< flex_size
; i
++) {
547 if (grp
+i
>= real_ngroups
)
549 desc
= ext4_get_group_desc(sb
, grp
+i
, NULL
);
550 if (desc
&& ext4_free_inodes_count(sb
, desc
)) {
558 max_dirs
= ndirs
/ ngroups
+ inodes_per_group
/ 16;
559 min_inodes
= avefreei
- inodes_per_group
*flex_size
/ 4;
562 min_clusters
= avefreec
- EXT4_CLUSTERS_PER_GROUP(sb
)*flex_size
/ 4;
565 * Start looking in the flex group where we last allocated an
566 * inode for this parent directory
568 if (EXT4_I(parent
)->i_last_alloc_group
!= ~0) {
569 parent_group
= EXT4_I(parent
)->i_last_alloc_group
;
571 parent_group
>>= sbi
->s_log_groups_per_flex
;
574 for (i
= 0; i
< ngroups
; i
++) {
575 grp
= (parent_group
+ i
) % ngroups
;
576 get_orlov_stats(sb
, grp
, flex_size
, &stats
);
577 if (stats
.used_dirs
>= max_dirs
)
579 if (stats
.free_inodes
< min_inodes
)
581 if (stats
.free_clusters
< min_clusters
)
587 ngroups
= real_ngroups
;
588 avefreei
= freei
/ ngroups
;
590 parent_group
= EXT4_I(parent
)->i_block_group
;
591 for (i
= 0; i
< ngroups
; i
++) {
592 grp
= (parent_group
+ i
) % ngroups
;
593 desc
= ext4_get_group_desc(sb
, grp
, NULL
);
594 if (desc
&& ext4_free_inodes_count(sb
, desc
) &&
595 ext4_free_inodes_count(sb
, desc
) >= avefreei
) {
603 * The free-inodes counter is approximate, and for really small
604 * filesystems the above test can fail to find any blockgroups
613 static int find_group_other(struct super_block
*sb
, struct inode
*parent
,
614 ext4_group_t
*group
, int mode
)
616 ext4_group_t parent_group
= EXT4_I(parent
)->i_block_group
;
617 ext4_group_t i
, last
, ngroups
= ext4_get_groups_count(sb
);
618 struct ext4_group_desc
*desc
;
619 int flex_size
= ext4_flex_bg_size(EXT4_SB(sb
));
622 * Try to place the inode is the same flex group as its
623 * parent. If we can't find space, use the Orlov algorithm to
624 * find another flex group, and store that information in the
625 * parent directory's inode information so that use that flex
626 * group for future allocations.
632 parent_group
&= ~(flex_size
-1);
633 last
= parent_group
+ flex_size
;
636 for (i
= parent_group
; i
< last
; i
++) {
637 desc
= ext4_get_group_desc(sb
, i
, NULL
);
638 if (desc
&& ext4_free_inodes_count(sb
, desc
)) {
643 if (!retry
&& EXT4_I(parent
)->i_last_alloc_group
!= ~0) {
645 parent_group
= EXT4_I(parent
)->i_last_alloc_group
;
649 * If this didn't work, use the Orlov search algorithm
650 * to find a new flex group; we pass in the mode to
651 * avoid the topdir algorithms.
653 *group
= parent_group
+ flex_size
;
654 if (*group
> ngroups
)
656 return find_group_orlov(sb
, parent
, group
, mode
, NULL
);
660 * Try to place the inode in its parent directory
662 *group
= parent_group
;
663 desc
= ext4_get_group_desc(sb
, *group
, NULL
);
664 if (desc
&& ext4_free_inodes_count(sb
, desc
) &&
665 ext4_free_group_clusters(sb
, desc
))
669 * We're going to place this inode in a different blockgroup from its
670 * parent. We want to cause files in a common directory to all land in
671 * the same blockgroup. But we want files which are in a different
672 * directory which shares a blockgroup with our parent to land in a
673 * different blockgroup.
675 * So add our directory's i_ino into the starting point for the hash.
677 *group
= (*group
+ parent
->i_ino
) % ngroups
;
680 * Use a quadratic hash to find a group with a free inode and some free
683 for (i
= 1; i
< ngroups
; i
<<= 1) {
685 if (*group
>= ngroups
)
687 desc
= ext4_get_group_desc(sb
, *group
, NULL
);
688 if (desc
&& ext4_free_inodes_count(sb
, desc
) &&
689 ext4_free_group_clusters(sb
, desc
))
694 * That failed: try linear search for a free inode, even if that group
695 * has no free blocks.
697 *group
= parent_group
;
698 for (i
= 0; i
< ngroups
; i
++) {
699 if (++*group
>= ngroups
)
701 desc
= ext4_get_group_desc(sb
, *group
, NULL
);
702 if (desc
&& ext4_free_inodes_count(sb
, desc
))
710 * claim the inode from the inode bitmap. If the group
711 * is uninit we need to take the groups's ext4_group_lock
712 * and clear the uninit flag. The inode bitmap update
713 * and group desc uninit flag clear should be done
714 * after holding ext4_group_lock so that ext4_read_inode_bitmap
715 * doesn't race with the ext4_claim_inode
717 static int ext4_claim_inode(struct super_block
*sb
,
718 struct buffer_head
*inode_bitmap_bh
,
719 unsigned long ino
, ext4_group_t group
, int mode
)
721 int free
= 0, retval
= 0, count
;
722 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
723 struct ext4_group_info
*grp
= ext4_get_group_info(sb
, group
);
724 struct ext4_group_desc
*gdp
= ext4_get_group_desc(sb
, group
, NULL
);
727 * We have to be sure that new inode allocation does not race with
728 * inode table initialization, because otherwise we may end up
729 * allocating and writing new inode right before sb_issue_zeroout
730 * takes place and overwriting our new inode with zeroes. So we
731 * take alloc_sem to prevent it.
733 down_read(&grp
->alloc_sem
);
734 ext4_lock_group(sb
, group
);
735 if (ext4_set_bit(ino
, inode_bitmap_bh
->b_data
)) {
736 /* not a free inode */
741 if ((group
== 0 && ino
< EXT4_FIRST_INO(sb
)) ||
742 ino
> EXT4_INODES_PER_GROUP(sb
)) {
743 ext4_unlock_group(sb
, group
);
744 up_read(&grp
->alloc_sem
);
745 ext4_error(sb
, "reserved inode or inode > inodes count - "
746 "block_group = %u, inode=%lu", group
,
747 ino
+ group
* EXT4_INODES_PER_GROUP(sb
));
750 /* If we didn't allocate from within the initialized part of the inode
751 * table then we need to initialize up to this inode. */
752 if (EXT4_HAS_RO_COMPAT_FEATURE(sb
, EXT4_FEATURE_RO_COMPAT_GDT_CSUM
)) {
754 if (gdp
->bg_flags
& cpu_to_le16(EXT4_BG_INODE_UNINIT
)) {
755 gdp
->bg_flags
&= cpu_to_le16(~EXT4_BG_INODE_UNINIT
);
756 /* When marking the block group with
757 * ~EXT4_BG_INODE_UNINIT we don't want to depend
758 * on the value of bg_itable_unused even though
759 * mke2fs could have initialized the same for us.
760 * Instead we calculated the value below
765 free
= EXT4_INODES_PER_GROUP(sb
) -
766 ext4_itable_unused_count(sb
, gdp
);
770 * Check the relative inode number against the last used
771 * relative inode number in this group. if it is greater
772 * we need to update the bg_itable_unused count
776 ext4_itable_unused_set(sb
, gdp
,
777 (EXT4_INODES_PER_GROUP(sb
) - ino
));
779 count
= ext4_free_inodes_count(sb
, gdp
) - 1;
780 ext4_free_inodes_set(sb
, gdp
, count
);
782 count
= ext4_used_dirs_count(sb
, gdp
) + 1;
783 ext4_used_dirs_set(sb
, gdp
, count
);
784 if (sbi
->s_log_groups_per_flex
) {
785 ext4_group_t f
= ext4_flex_group(sbi
, group
);
787 atomic_inc(&sbi
->s_flex_groups
[f
].used_dirs
);
790 gdp
->bg_checksum
= ext4_group_desc_csum(sbi
, group
, gdp
);
792 ext4_unlock_group(sb
, group
);
793 up_read(&grp
->alloc_sem
);
798 * There are two policies for allocating an inode. If the new inode is
799 * a directory, then a forward search is made for a block group with both
800 * free space and a low directory-to-inode ratio; if that fails, then of
801 * the groups with above-average free space, that group with the fewest
802 * directories already is chosen.
804 * For other inodes, search forward from the parent directory's block
805 * group to find a free inode.
807 struct inode
*ext4_new_inode(handle_t
*handle
, struct inode
*dir
, int mode
,
808 const struct qstr
*qstr
, __u32 goal
)
810 struct super_block
*sb
;
811 struct buffer_head
*inode_bitmap_bh
= NULL
;
812 struct buffer_head
*group_desc_bh
;
813 ext4_group_t ngroups
, group
= 0;
814 unsigned long ino
= 0;
816 struct ext4_group_desc
*gdp
= NULL
;
817 struct ext4_inode_info
*ei
;
818 struct ext4_sb_info
*sbi
;
823 ext4_group_t flex_group
;
825 /* Cannot create files in a deleted directory */
826 if (!dir
|| !dir
->i_nlink
)
827 return ERR_PTR(-EPERM
);
830 ngroups
= ext4_get_groups_count(sb
);
831 trace_ext4_request_inode(dir
, mode
);
832 inode
= new_inode(sb
);
834 return ERR_PTR(-ENOMEM
);
839 goal
= sbi
->s_inode_goal
;
841 if (goal
&& goal
<= le32_to_cpu(sbi
->s_es
->s_inodes_count
)) {
842 group
= (goal
- 1) / EXT4_INODES_PER_GROUP(sb
);
843 ino
= (goal
- 1) % EXT4_INODES_PER_GROUP(sb
);
848 if (sbi
->s_log_groups_per_flex
&& test_opt(sb
, OLDALLOC
)) {
849 ret2
= find_group_flex(sb
, dir
, &group
);
851 ret2
= find_group_other(sb
, dir
, &group
, mode
);
852 if (ret2
== 0 && once
) {
854 printk(KERN_NOTICE
"ext4: find_group_flex "
855 "failed, fallback succeeded dir %lu\n",
863 if (test_opt(sb
, OLDALLOC
))
864 ret2
= find_group_dir(sb
, dir
, &group
);
866 ret2
= find_group_orlov(sb
, dir
, &group
, mode
, qstr
);
868 ret2
= find_group_other(sb
, dir
, &group
, mode
);
871 EXT4_I(dir
)->i_last_alloc_group
= group
;
876 for (i
= 0; i
< ngroups
; i
++, ino
= 0) {
879 gdp
= ext4_get_group_desc(sb
, group
, &group_desc_bh
);
883 brelse(inode_bitmap_bh
);
884 inode_bitmap_bh
= ext4_read_inode_bitmap(sb
, group
);
885 if (!inode_bitmap_bh
)
888 repeat_in_this_group
:
889 ino
= ext4_find_next_zero_bit((unsigned long *)
890 inode_bitmap_bh
->b_data
,
891 EXT4_INODES_PER_GROUP(sb
), ino
);
893 if (ino
< EXT4_INODES_PER_GROUP(sb
)) {
895 BUFFER_TRACE(inode_bitmap_bh
, "get_write_access");
896 err
= ext4_journal_get_write_access(handle
,
901 BUFFER_TRACE(group_desc_bh
, "get_write_access");
902 err
= ext4_journal_get_write_access(handle
,
906 if (!ext4_claim_inode(sb
, inode_bitmap_bh
,
909 BUFFER_TRACE(inode_bitmap_bh
,
910 "call ext4_handle_dirty_metadata");
911 err
= ext4_handle_dirty_metadata(handle
,
916 /* zero bit is inode number 1*/
921 ext4_handle_release_buffer(handle
, inode_bitmap_bh
);
922 ext4_handle_release_buffer(handle
, group_desc_bh
);
924 if (++ino
< EXT4_INODES_PER_GROUP(sb
))
925 goto repeat_in_this_group
;
929 * This case is possible in concurrent environment. It is very
930 * rare. We cannot repeat the find_group_xxx() call because
931 * that will simply return the same blockgroup, because the
932 * group descriptor metadata has not yet been updated.
933 * So we just go onto the next blockgroup.
935 if (++group
== ngroups
)
942 /* We may have to initialize the block bitmap if it isn't already */
943 if (EXT4_HAS_RO_COMPAT_FEATURE(sb
, EXT4_FEATURE_RO_COMPAT_GDT_CSUM
) &&
944 gdp
->bg_flags
& cpu_to_le16(EXT4_BG_BLOCK_UNINIT
)) {
945 struct buffer_head
*block_bitmap_bh
;
947 block_bitmap_bh
= ext4_read_block_bitmap(sb
, group
);
948 BUFFER_TRACE(block_bitmap_bh
, "get block bitmap access");
949 err
= ext4_journal_get_write_access(handle
, block_bitmap_bh
);
951 brelse(block_bitmap_bh
);
955 BUFFER_TRACE(block_bitmap_bh
, "dirty block bitmap");
956 err
= ext4_handle_dirty_metadata(handle
, NULL
, block_bitmap_bh
);
957 brelse(block_bitmap_bh
);
959 /* recheck and clear flag under lock if we still need to */
960 ext4_lock_group(sb
, group
);
961 if (gdp
->bg_flags
& cpu_to_le16(EXT4_BG_BLOCK_UNINIT
)) {
962 gdp
->bg_flags
&= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT
);
963 ext4_free_group_clusters_set(sb
, gdp
,
964 ext4_free_blocks_after_init(sb
, group
, gdp
));
965 gdp
->bg_checksum
= ext4_group_desc_csum(sbi
, group
,
968 ext4_unlock_group(sb
, group
);
973 BUFFER_TRACE(group_desc_bh
, "call ext4_handle_dirty_metadata");
974 err
= ext4_handle_dirty_metadata(handle
, NULL
, group_desc_bh
);
978 percpu_counter_dec(&sbi
->s_freeinodes_counter
);
980 percpu_counter_inc(&sbi
->s_dirs_counter
);
981 ext4_mark_super_dirty(sb
);
983 if (sbi
->s_log_groups_per_flex
) {
984 flex_group
= ext4_flex_group(sbi
, group
);
985 atomic_dec(&sbi
->s_flex_groups
[flex_group
].free_inodes
);
988 if (test_opt(sb
, GRPID
)) {
989 inode
->i_mode
= mode
;
990 inode
->i_uid
= current_fsuid();
991 inode
->i_gid
= dir
->i_gid
;
993 inode_init_owner(inode
, dir
, mode
);
995 inode
->i_ino
= ino
+ group
* EXT4_INODES_PER_GROUP(sb
);
996 /* This is the optimal IO size (for stat), not the fs block size */
998 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= ei
->i_crtime
=
999 ext4_current_time(inode
);
1001 memset(ei
->i_data
, 0, sizeof(ei
->i_data
));
1002 ei
->i_dir_start_lookup
= 0;
1006 * Don't inherit extent flag from directory, amongst others. We set
1007 * extent flag on newly created directory and file only if -o extent
1008 * mount option is specified
1011 ext4_mask_flags(mode
, EXT4_I(dir
)->i_flags
& EXT4_FL_INHERITED
);
1014 ei
->i_block_group
= group
;
1015 ei
->i_last_alloc_group
= ~0;
1017 ext4_set_inode_flags(inode
);
1018 if (IS_DIRSYNC(inode
))
1019 ext4_handle_sync(handle
);
1020 if (insert_inode_locked(inode
) < 0) {
1024 spin_lock(&sbi
->s_next_gen_lock
);
1025 inode
->i_generation
= sbi
->s_next_generation
++;
1026 spin_unlock(&sbi
->s_next_gen_lock
);
1028 ext4_clear_state_flags(ei
); /* Only relevant on 32-bit archs */
1029 ext4_set_inode_state(inode
, EXT4_STATE_NEW
);
1031 ei
->i_extra_isize
= EXT4_SB(sb
)->s_want_extra_isize
;
1034 dquot_initialize(inode
);
1035 err
= dquot_alloc_inode(inode
);
1039 err
= ext4_init_acl(handle
, inode
, dir
);
1041 goto fail_free_drop
;
1043 err
= ext4_init_security(handle
, inode
, dir
, qstr
);
1045 goto fail_free_drop
;
1047 if (EXT4_HAS_INCOMPAT_FEATURE(sb
, EXT4_FEATURE_INCOMPAT_EXTENTS
)) {
1048 /* set extent flag only for directory, file and normal symlink*/
1049 if (S_ISDIR(mode
) || S_ISREG(mode
) || S_ISLNK(mode
)) {
1050 ext4_set_inode_flag(inode
, EXT4_INODE_EXTENTS
);
1051 ext4_ext_tree_init(handle
, inode
);
1055 if (ext4_handle_valid(handle
)) {
1056 ei
->i_sync_tid
= handle
->h_transaction
->t_tid
;
1057 ei
->i_datasync_tid
= handle
->h_transaction
->t_tid
;
1060 err
= ext4_mark_inode_dirty(handle
, inode
);
1062 ext4_std_error(sb
, err
);
1063 goto fail_free_drop
;
1066 ext4_debug("allocating inode %lu\n", inode
->i_ino
);
1067 trace_ext4_allocate_inode(inode
, dir
, mode
);
1070 ext4_std_error(sb
, err
);
1075 brelse(inode_bitmap_bh
);
1079 dquot_free_inode(inode
);
1083 inode
->i_flags
|= S_NOQUOTA
;
1085 unlock_new_inode(inode
);
1087 brelse(inode_bitmap_bh
);
1088 return ERR_PTR(err
);
1091 /* Verify that we are loading a valid orphan from disk */
1092 struct inode
*ext4_orphan_get(struct super_block
*sb
, unsigned long ino
)
1094 unsigned long max_ino
= le32_to_cpu(EXT4_SB(sb
)->s_es
->s_inodes_count
);
1095 ext4_group_t block_group
;
1097 struct buffer_head
*bitmap_bh
;
1098 struct inode
*inode
= NULL
;
1101 /* Error cases - e2fsck has already cleaned up for us */
1102 if (ino
> max_ino
) {
1103 ext4_warning(sb
, "bad orphan ino %lu! e2fsck was run?", ino
);
1107 block_group
= (ino
- 1) / EXT4_INODES_PER_GROUP(sb
);
1108 bit
= (ino
- 1) % EXT4_INODES_PER_GROUP(sb
);
1109 bitmap_bh
= ext4_read_inode_bitmap(sb
, block_group
);
1111 ext4_warning(sb
, "inode bitmap error for orphan %lu", ino
);
1115 /* Having the inode bit set should be a 100% indicator that this
1116 * is a valid orphan (no e2fsck run on fs). Orphans also include
1117 * inodes that were being truncated, so we can't check i_nlink==0.
1119 if (!ext4_test_bit(bit
, bitmap_bh
->b_data
))
1122 inode
= ext4_iget(sb
, ino
);
1127 * If the orphans has i_nlinks > 0 then it should be able to be
1128 * truncated, otherwise it won't be removed from the orphan list
1129 * during processing and an infinite loop will result.
1131 if (inode
->i_nlink
&& !ext4_can_truncate(inode
))
1134 if (NEXT_ORPHAN(inode
) > max_ino
)
1140 err
= PTR_ERR(inode
);
1143 ext4_warning(sb
, "bad orphan inode %lu! e2fsck was run?", ino
);
1144 printk(KERN_NOTICE
"ext4_test_bit(bit=%d, block=%llu) = %d\n",
1145 bit
, (unsigned long long)bitmap_bh
->b_blocknr
,
1146 ext4_test_bit(bit
, bitmap_bh
->b_data
));
1147 printk(KERN_NOTICE
"inode=%p\n", inode
);
1149 printk(KERN_NOTICE
"is_bad_inode(inode)=%d\n",
1150 is_bad_inode(inode
));
1151 printk(KERN_NOTICE
"NEXT_ORPHAN(inode)=%u\n",
1152 NEXT_ORPHAN(inode
));
1153 printk(KERN_NOTICE
"max_ino=%lu\n", max_ino
);
1154 printk(KERN_NOTICE
"i_nlink=%u\n", inode
->i_nlink
);
1155 /* Avoid freeing blocks if we got a bad deleted inode */
1156 if (inode
->i_nlink
== 0)
1157 inode
->i_blocks
= 0;
1162 return ERR_PTR(err
);
1165 unsigned long ext4_count_free_inodes(struct super_block
*sb
)
1167 unsigned long desc_count
;
1168 struct ext4_group_desc
*gdp
;
1169 ext4_group_t i
, ngroups
= ext4_get_groups_count(sb
);
1171 struct ext4_super_block
*es
;
1172 unsigned long bitmap_count
, x
;
1173 struct buffer_head
*bitmap_bh
= NULL
;
1175 es
= EXT4_SB(sb
)->s_es
;
1179 for (i
= 0; i
< ngroups
; i
++) {
1180 gdp
= ext4_get_group_desc(sb
, i
, NULL
);
1183 desc_count
+= ext4_free_inodes_count(sb
, gdp
);
1185 bitmap_bh
= ext4_read_inode_bitmap(sb
, i
);
1189 x
= ext4_count_free(bitmap_bh
, EXT4_INODES_PER_GROUP(sb
) / 8);
1190 printk(KERN_DEBUG
"group %lu: stored = %d, counted = %lu\n",
1191 (unsigned long) i
, ext4_free_inodes_count(sb
, gdp
), x
);
1195 printk(KERN_DEBUG
"ext4_count_free_inodes: "
1196 "stored = %u, computed = %lu, %lu\n",
1197 le32_to_cpu(es
->s_free_inodes_count
), desc_count
, bitmap_count
);
1201 for (i
= 0; i
< ngroups
; i
++) {
1202 gdp
= ext4_get_group_desc(sb
, i
, NULL
);
1205 desc_count
+= ext4_free_inodes_count(sb
, gdp
);
1212 /* Called at mount-time, super-block is locked */
1213 unsigned long ext4_count_dirs(struct super_block
* sb
)
1215 unsigned long count
= 0;
1216 ext4_group_t i
, ngroups
= ext4_get_groups_count(sb
);
1218 for (i
= 0; i
< ngroups
; i
++) {
1219 struct ext4_group_desc
*gdp
= ext4_get_group_desc(sb
, i
, NULL
);
1222 count
+= ext4_used_dirs_count(sb
, gdp
);
1228 * Zeroes not yet zeroed inode table - just write zeroes through the whole
1229 * inode table. Must be called without any spinlock held. The only place
1230 * where it is called from on active part of filesystem is ext4lazyinit
1231 * thread, so we do not need any special locks, however we have to prevent
1232 * inode allocation from the current group, so we take alloc_sem lock, to
1233 * block ext4_claim_inode until we are finished.
1235 extern int ext4_init_inode_table(struct super_block
*sb
, ext4_group_t group
,
1238 struct ext4_group_info
*grp
= ext4_get_group_info(sb
, group
);
1239 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
1240 struct ext4_group_desc
*gdp
= NULL
;
1241 struct buffer_head
*group_desc_bh
;
1244 int num
, ret
= 0, used_blks
= 0;
1246 /* This should not happen, but just to be sure check this */
1247 if (sb
->s_flags
& MS_RDONLY
) {
1252 gdp
= ext4_get_group_desc(sb
, group
, &group_desc_bh
);
1257 * We do not need to lock this, because we are the only one
1258 * handling this flag.
1260 if (gdp
->bg_flags
& cpu_to_le16(EXT4_BG_INODE_ZEROED
))
1263 handle
= ext4_journal_start_sb(sb
, 1);
1264 if (IS_ERR(handle
)) {
1265 ret
= PTR_ERR(handle
);
1269 down_write(&grp
->alloc_sem
);
1271 * If inode bitmap was already initialized there may be some
1272 * used inodes so we need to skip blocks with used inodes in
1275 if (!(gdp
->bg_flags
& cpu_to_le16(EXT4_BG_INODE_UNINIT
)))
1276 used_blks
= DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb
) -
1277 ext4_itable_unused_count(sb
, gdp
)),
1278 sbi
->s_inodes_per_block
);
1280 if ((used_blks
< 0) || (used_blks
> sbi
->s_itb_per_group
)) {
1281 ext4_error(sb
, "Something is wrong with group %u\n"
1282 "Used itable blocks: %d"
1283 "itable unused count: %u\n",
1285 ext4_itable_unused_count(sb
, gdp
));
1290 blk
= ext4_inode_table(sb
, gdp
) + used_blks
;
1291 num
= sbi
->s_itb_per_group
- used_blks
;
1293 BUFFER_TRACE(group_desc_bh
, "get_write_access");
1294 ret
= ext4_journal_get_write_access(handle
,
1300 * Skip zeroout if the inode table is full. But we set the ZEROED
1301 * flag anyway, because obviously, when it is full it does not need
1304 if (unlikely(num
== 0))
1307 ext4_debug("going to zero out inode table in group %d\n",
1309 ret
= sb_issue_zeroout(sb
, blk
, num
, GFP_NOFS
);
1313 blkdev_issue_flush(sb
->s_bdev
, GFP_NOFS
, NULL
);
1316 ext4_lock_group(sb
, group
);
1317 gdp
->bg_flags
|= cpu_to_le16(EXT4_BG_INODE_ZEROED
);
1318 gdp
->bg_checksum
= ext4_group_desc_csum(sbi
, group
, gdp
);
1319 ext4_unlock_group(sb
, group
);
1321 BUFFER_TRACE(group_desc_bh
,
1322 "call ext4_handle_dirty_metadata");
1323 ret
= ext4_handle_dirty_metadata(handle
, NULL
,
1327 up_write(&grp
->alloc_sem
);
1328 ext4_journal_stop(handle
);