2 * linux/fs/ext4/ialloc.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * BSD ufs-inspired inode and directory allocation by
10 * Stephen Tweedie (sct@redhat.com), 1993
11 * Big-endian to little-endian byte-swapping/bitmaps by
12 * David S. Miller (davem@caip.rutgers.edu), 1995
15 #include <linux/time.h>
17 #include <linux/jbd2.h>
18 #include <linux/stat.h>
19 #include <linux/string.h>
20 #include <linux/quotaops.h>
21 #include <linux/buffer_head.h>
22 #include <linux/random.h>
23 #include <linux/bitops.h>
24 #include <linux/blkdev.h>
25 #include <asm/byteorder.h>
27 #include "ext4_jbd2.h"
33 * ialloc.c contains the inodes allocation and deallocation routines
37 * The free inodes are managed by bitmaps. A file system contains several
38 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
39 * block for inodes, N blocks for the inode table and data blocks.
41 * The file system contains group descriptors which are located after the
42 * super block. Each descriptor contains the number of the bitmap block and
43 * the free blocks count in the block.
47 * To avoid calling the atomic setbit hundreds or thousands of times, we only
48 * need to use it within a single byte (to ensure we get endianness right).
49 * We can use memset for the rest of the bitmap as there are no other users.
51 void mark_bitmap_end(int start_bit
, int end_bit
, char *bitmap
)
55 if (start_bit
>= end_bit
)
58 ext4_debug("mark end bits +%d through +%d used\n", start_bit
, end_bit
);
59 for (i
= start_bit
; i
< ((start_bit
+ 7) & ~7UL); i
++)
60 ext4_set_bit(i
, bitmap
);
62 memset(bitmap
+ (i
>> 3), 0xff, (end_bit
- i
) >> 3);
65 /* Initializes an uninitialized inode bitmap */
66 unsigned ext4_init_inode_bitmap(struct super_block
*sb
, struct buffer_head
*bh
,
67 ext4_group_t block_group
,
68 struct ext4_group_desc
*gdp
)
70 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
72 J_ASSERT_BH(bh
, buffer_locked(bh
));
74 /* If checksum is bad mark all blocks and inodes use to prevent
75 * allocation, essentially implementing a per-group read-only flag. */
76 if (!ext4_group_desc_csum_verify(sbi
, block_group
, gdp
)) {
77 ext4_error(sb
, __func__
, "Checksum bad for group %lu\n",
79 gdp
->bg_free_blocks_count
= 0;
80 gdp
->bg_free_inodes_count
= 0;
81 gdp
->bg_itable_unused
= 0;
82 memset(bh
->b_data
, 0xff, sb
->s_blocksize
);
86 memset(bh
->b_data
, 0, (EXT4_INODES_PER_GROUP(sb
) + 7) / 8);
87 mark_bitmap_end(EXT4_INODES_PER_GROUP(sb
), EXT4_BLOCKS_PER_GROUP(sb
),
90 return EXT4_INODES_PER_GROUP(sb
);
94 * Read the inode allocation bitmap for a given block_group, reading
95 * into the specified slot in the superblock's bitmap cache.
97 * Return buffer_head of bitmap on success or NULL.
99 static struct buffer_head
*
100 read_inode_bitmap(struct super_block
*sb
, ext4_group_t block_group
)
102 struct ext4_group_desc
*desc
;
103 struct buffer_head
*bh
= NULL
;
105 desc
= ext4_get_group_desc(sb
, block_group
, NULL
);
108 if (desc
->bg_flags
& cpu_to_le16(EXT4_BG_INODE_UNINIT
)) {
109 bh
= sb_getblk(sb
, ext4_inode_bitmap(sb
, desc
));
110 if (!buffer_uptodate(bh
)) {
112 if (!buffer_uptodate(bh
)) {
113 ext4_init_inode_bitmap(sb
, bh
, block_group
,
115 set_buffer_uptodate(bh
);
120 bh
= sb_bread(sb
, ext4_inode_bitmap(sb
, desc
));
123 ext4_error(sb
, "read_inode_bitmap",
124 "Cannot read inode bitmap - "
125 "block_group = %lu, inode_bitmap = %llu",
126 block_group
, ext4_inode_bitmap(sb
, desc
));
132 * NOTE! When we get the inode, we're the only people
133 * that have access to it, and as such there are no
134 * race conditions we have to worry about. The inode
135 * is not on the hash-lists, and it cannot be reached
136 * through the filesystem because the directory entry
137 * has been deleted earlier.
139 * HOWEVER: we must make sure that we get no aliases,
140 * which means that we have to call "clear_inode()"
141 * _before_ we mark the inode not in use in the inode
142 * bitmaps. Otherwise a newly created file might use
143 * the same inode number (not actually the same pointer
144 * though), and then we'd have two inodes sharing the
145 * same inode number and space on the harddisk.
147 void ext4_free_inode (handle_t
*handle
, struct inode
* inode
)
149 struct super_block
* sb
= inode
->i_sb
;
152 struct buffer_head
*bitmap_bh
= NULL
;
153 struct buffer_head
*bh2
;
154 ext4_group_t block_group
;
156 struct ext4_group_desc
* gdp
;
157 struct ext4_super_block
* es
;
158 struct ext4_sb_info
*sbi
;
161 if (atomic_read(&inode
->i_count
) > 1) {
162 printk ("ext4_free_inode: inode has count=%d\n",
163 atomic_read(&inode
->i_count
));
166 if (inode
->i_nlink
) {
167 printk ("ext4_free_inode: inode has nlink=%d\n",
172 printk("ext4_free_inode: inode on nonexistent device\n");
178 ext4_debug ("freeing inode %lu\n", ino
);
181 * Note: we must free any quota before locking the superblock,
182 * as writing the quota to disk may need the lock as well.
185 ext4_xattr_delete_inode(handle
, inode
);
186 DQUOT_FREE_INODE(inode
);
189 is_directory
= S_ISDIR(inode
->i_mode
);
191 /* Do this BEFORE marking the inode not in use or returning an error */
194 es
= EXT4_SB(sb
)->s_es
;
195 if (ino
< EXT4_FIRST_INO(sb
) || ino
> le32_to_cpu(es
->s_inodes_count
)) {
196 ext4_error (sb
, "ext4_free_inode",
197 "reserved or nonexistent inode %lu", ino
);
200 block_group
= (ino
- 1) / EXT4_INODES_PER_GROUP(sb
);
201 bit
= (ino
- 1) % EXT4_INODES_PER_GROUP(sb
);
202 bitmap_bh
= read_inode_bitmap(sb
, block_group
);
206 BUFFER_TRACE(bitmap_bh
, "get_write_access");
207 fatal
= ext4_journal_get_write_access(handle
, bitmap_bh
);
211 /* Ok, now we can actually update the inode bitmaps.. */
212 if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi
, block_group
),
213 bit
, bitmap_bh
->b_data
))
214 ext4_error (sb
, "ext4_free_inode",
215 "bit already cleared for inode %lu", ino
);
217 gdp
= ext4_get_group_desc (sb
, block_group
, &bh2
);
219 BUFFER_TRACE(bh2
, "get_write_access");
220 fatal
= ext4_journal_get_write_access(handle
, bh2
);
221 if (fatal
) goto error_return
;
224 spin_lock(sb_bgl_lock(sbi
, block_group
));
225 le16_add_cpu(&gdp
->bg_free_inodes_count
, 1);
227 le16_add_cpu(&gdp
->bg_used_dirs_count
, -1);
228 gdp
->bg_checksum
= ext4_group_desc_csum(sbi
,
230 spin_unlock(sb_bgl_lock(sbi
, block_group
));
231 percpu_counter_inc(&sbi
->s_freeinodes_counter
);
233 percpu_counter_dec(&sbi
->s_dirs_counter
);
236 BUFFER_TRACE(bh2
, "call ext4_journal_dirty_metadata");
237 err
= ext4_journal_dirty_metadata(handle
, bh2
);
238 if (!fatal
) fatal
= err
;
240 BUFFER_TRACE(bitmap_bh
, "call ext4_journal_dirty_metadata");
241 err
= ext4_journal_dirty_metadata(handle
, bitmap_bh
);
247 ext4_std_error(sb
, fatal
);
251 * There are two policies for allocating an inode. If the new inode is
252 * a directory, then a forward search is made for a block group with both
253 * free space and a low directory-to-inode ratio; if that fails, then of
254 * the groups with above-average free space, that group with the fewest
255 * directories already is chosen.
257 * For other inodes, search forward from the parent directory\'s block
258 * group to find a free inode.
260 static int find_group_dir(struct super_block
*sb
, struct inode
*parent
,
261 ext4_group_t
*best_group
)
263 ext4_group_t ngroups
= EXT4_SB(sb
)->s_groups_count
;
264 unsigned int freei
, avefreei
;
265 struct ext4_group_desc
*desc
, *best_desc
= NULL
;
269 freei
= percpu_counter_read_positive(&EXT4_SB(sb
)->s_freeinodes_counter
);
270 avefreei
= freei
/ ngroups
;
272 for (group
= 0; group
< ngroups
; group
++) {
273 desc
= ext4_get_group_desc (sb
, group
, NULL
);
274 if (!desc
|| !desc
->bg_free_inodes_count
)
276 if (le16_to_cpu(desc
->bg_free_inodes_count
) < avefreei
)
279 (le16_to_cpu(desc
->bg_free_blocks_count
) >
280 le16_to_cpu(best_desc
->bg_free_blocks_count
))) {
290 * Orlov's allocator for directories.
292 * We always try to spread first-level directories.
294 * If there are blockgroups with both free inodes and free blocks counts
295 * not worse than average we return one with smallest directory count.
296 * Otherwise we simply return a random group.
298 * For the rest rules look so:
300 * It's OK to put directory into a group unless
301 * it has too many directories already (max_dirs) or
302 * it has too few free inodes left (min_inodes) or
303 * it has too few free blocks left (min_blocks) or
304 * it's already running too large debt (max_debt).
305 * Parent's group is preferred, if it doesn't satisfy these
306 * conditions we search cyclically through the rest. If none
307 * of the groups look good we just look for a group with more
308 * free inodes than average (starting at parent's group).
310 * Debt is incremented each time we allocate a directory and decremented
311 * when we allocate an inode, within 0--255.
314 #define INODE_COST 64
315 #define BLOCK_COST 256
317 static int find_group_orlov(struct super_block
*sb
, struct inode
*parent
,
320 ext4_group_t parent_group
= EXT4_I(parent
)->i_block_group
;
321 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
322 struct ext4_super_block
*es
= sbi
->s_es
;
323 ext4_group_t ngroups
= sbi
->s_groups_count
;
324 int inodes_per_group
= EXT4_INODES_PER_GROUP(sb
);
325 unsigned int freei
, avefreei
;
326 ext4_fsblk_t freeb
, avefreeb
;
327 ext4_fsblk_t blocks_per_dir
;
329 int max_debt
, max_dirs
, min_inodes
;
330 ext4_grpblk_t min_blocks
;
332 struct ext4_group_desc
*desc
;
334 freei
= percpu_counter_read_positive(&sbi
->s_freeinodes_counter
);
335 avefreei
= freei
/ ngroups
;
336 freeb
= percpu_counter_read_positive(&sbi
->s_freeblocks_counter
);
338 do_div(avefreeb
, ngroups
);
339 ndirs
= percpu_counter_read_positive(&sbi
->s_dirs_counter
);
341 if ((parent
== sb
->s_root
->d_inode
) ||
342 (EXT4_I(parent
)->i_flags
& EXT4_TOPDIR_FL
)) {
343 int best_ndir
= inodes_per_group
;
347 get_random_bytes(&grp
, sizeof(grp
));
348 parent_group
= (unsigned)grp
% ngroups
;
349 for (i
= 0; i
< ngroups
; i
++) {
350 grp
= (parent_group
+ i
) % ngroups
;
351 desc
= ext4_get_group_desc(sb
, grp
, NULL
);
352 if (!desc
|| !desc
->bg_free_inodes_count
)
354 if (le16_to_cpu(desc
->bg_used_dirs_count
) >= best_ndir
)
356 if (le16_to_cpu(desc
->bg_free_inodes_count
) < avefreei
)
358 if (le16_to_cpu(desc
->bg_free_blocks_count
) < avefreeb
)
362 best_ndir
= le16_to_cpu(desc
->bg_used_dirs_count
);
369 blocks_per_dir
= ext4_blocks_count(es
) - freeb
;
370 do_div(blocks_per_dir
, ndirs
);
372 max_dirs
= ndirs
/ ngroups
+ inodes_per_group
/ 16;
373 min_inodes
= avefreei
- inodes_per_group
/ 4;
374 min_blocks
= avefreeb
- EXT4_BLOCKS_PER_GROUP(sb
) / 4;
376 max_debt
= EXT4_BLOCKS_PER_GROUP(sb
);
377 max_debt
/= max_t(int, blocks_per_dir
, BLOCK_COST
);
378 if (max_debt
* INODE_COST
> inodes_per_group
)
379 max_debt
= inodes_per_group
/ INODE_COST
;
385 for (i
= 0; i
< ngroups
; i
++) {
386 *group
= (parent_group
+ i
) % ngroups
;
387 desc
= ext4_get_group_desc(sb
, *group
, NULL
);
388 if (!desc
|| !desc
->bg_free_inodes_count
)
390 if (le16_to_cpu(desc
->bg_used_dirs_count
) >= max_dirs
)
392 if (le16_to_cpu(desc
->bg_free_inodes_count
) < min_inodes
)
394 if (le16_to_cpu(desc
->bg_free_blocks_count
) < min_blocks
)
400 for (i
= 0; i
< ngroups
; i
++) {
401 *group
= (parent_group
+ i
) % ngroups
;
402 desc
= ext4_get_group_desc(sb
, *group
, NULL
);
403 if (desc
&& desc
->bg_free_inodes_count
&&
404 le16_to_cpu(desc
->bg_free_inodes_count
) >= avefreei
)
410 * The free-inodes counter is approximate, and for really small
411 * filesystems the above test can fail to find any blockgroups
420 static int find_group_other(struct super_block
*sb
, struct inode
*parent
,
423 ext4_group_t parent_group
= EXT4_I(parent
)->i_block_group
;
424 ext4_group_t ngroups
= EXT4_SB(sb
)->s_groups_count
;
425 struct ext4_group_desc
*desc
;
429 * Try to place the inode in its parent directory
431 *group
= parent_group
;
432 desc
= ext4_get_group_desc(sb
, *group
, NULL
);
433 if (desc
&& le16_to_cpu(desc
->bg_free_inodes_count
) &&
434 le16_to_cpu(desc
->bg_free_blocks_count
))
438 * We're going to place this inode in a different blockgroup from its
439 * parent. We want to cause files in a common directory to all land in
440 * the same blockgroup. But we want files which are in a different
441 * directory which shares a blockgroup with our parent to land in a
442 * different blockgroup.
444 * So add our directory's i_ino into the starting point for the hash.
446 *group
= (*group
+ parent
->i_ino
) % ngroups
;
449 * Use a quadratic hash to find a group with a free inode and some free
452 for (i
= 1; i
< ngroups
; i
<<= 1) {
454 if (*group
>= ngroups
)
456 desc
= ext4_get_group_desc(sb
, *group
, NULL
);
457 if (desc
&& le16_to_cpu(desc
->bg_free_inodes_count
) &&
458 le16_to_cpu(desc
->bg_free_blocks_count
))
463 * That failed: try linear search for a free inode, even if that group
464 * has no free blocks.
466 *group
= parent_group
;
467 for (i
= 0; i
< ngroups
; i
++) {
468 if (++*group
>= ngroups
)
470 desc
= ext4_get_group_desc(sb
, *group
, NULL
);
471 if (desc
&& le16_to_cpu(desc
->bg_free_inodes_count
))
479 * There are two policies for allocating an inode. If the new inode is
480 * a directory, then a forward search is made for a block group with both
481 * free space and a low directory-to-inode ratio; if that fails, then of
482 * the groups with above-average free space, that group with the fewest
483 * directories already is chosen.
485 * For other inodes, search forward from the parent directory's block
486 * group to find a free inode.
488 struct inode
*ext4_new_inode(handle_t
*handle
, struct inode
* dir
, int mode
)
490 struct super_block
*sb
;
491 struct buffer_head
*bitmap_bh
= NULL
;
492 struct buffer_head
*bh2
;
493 ext4_group_t group
= 0;
494 unsigned long ino
= 0;
495 struct inode
* inode
;
496 struct ext4_group_desc
* gdp
= NULL
;
497 struct ext4_super_block
* es
;
498 struct ext4_inode_info
*ei
;
499 struct ext4_sb_info
*sbi
;
505 /* Cannot create files in a deleted directory */
506 if (!dir
|| !dir
->i_nlink
)
507 return ERR_PTR(-EPERM
);
510 inode
= new_inode(sb
);
512 return ERR_PTR(-ENOMEM
);
518 if (test_opt (sb
, OLDALLOC
))
519 ret2
= find_group_dir(sb
, dir
, &group
);
521 ret2
= find_group_orlov(sb
, dir
, &group
);
523 ret2
= find_group_other(sb
, dir
, &group
);
529 for (i
= 0; i
< sbi
->s_groups_count
; i
++) {
532 gdp
= ext4_get_group_desc(sb
, group
, &bh2
);
537 bitmap_bh
= read_inode_bitmap(sb
, group
);
543 repeat_in_this_group
:
544 ino
= ext4_find_next_zero_bit((unsigned long *)
545 bitmap_bh
->b_data
, EXT4_INODES_PER_GROUP(sb
), ino
);
546 if (ino
< EXT4_INODES_PER_GROUP(sb
)) {
548 BUFFER_TRACE(bitmap_bh
, "get_write_access");
549 err
= ext4_journal_get_write_access(handle
, bitmap_bh
);
553 if (!ext4_set_bit_atomic(sb_bgl_lock(sbi
, group
),
554 ino
, bitmap_bh
->b_data
)) {
556 BUFFER_TRACE(bitmap_bh
,
557 "call ext4_journal_dirty_metadata");
558 err
= ext4_journal_dirty_metadata(handle
,
565 jbd2_journal_release_buffer(handle
, bitmap_bh
);
567 if (++ino
< EXT4_INODES_PER_GROUP(sb
))
568 goto repeat_in_this_group
;
572 * This case is possible in concurrent environment. It is very
573 * rare. We cannot repeat the find_group_xxx() call because
574 * that will simply return the same blockgroup, because the
575 * group descriptor metadata has not yet been updated.
576 * So we just go onto the next blockgroup.
578 if (++group
== sbi
->s_groups_count
)
586 if ((group
== 0 && ino
< EXT4_FIRST_INO(sb
)) ||
587 ino
> EXT4_INODES_PER_GROUP(sb
)) {
588 ext4_error(sb
, __func__
,
589 "reserved inode or inode > inodes count - "
590 "block_group = %lu, inode=%lu", group
,
591 ino
+ group
* EXT4_INODES_PER_GROUP(sb
));
596 BUFFER_TRACE(bh2
, "get_write_access");
597 err
= ext4_journal_get_write_access(handle
, bh2
);
600 /* We may have to initialize the block bitmap if it isn't already */
601 if (EXT4_HAS_RO_COMPAT_FEATURE(sb
, EXT4_FEATURE_RO_COMPAT_GDT_CSUM
) &&
602 gdp
->bg_flags
& cpu_to_le16(EXT4_BG_BLOCK_UNINIT
)) {
603 struct buffer_head
*block_bh
= read_block_bitmap(sb
, group
);
605 BUFFER_TRACE(block_bh
, "get block bitmap access");
606 err
= ext4_journal_get_write_access(handle
, block_bh
);
613 spin_lock(sb_bgl_lock(sbi
, group
));
614 /* recheck and clear flag under lock if we still need to */
615 if (gdp
->bg_flags
& cpu_to_le16(EXT4_BG_BLOCK_UNINIT
)) {
616 gdp
->bg_flags
&= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT
);
617 free
= ext4_free_blocks_after_init(sb
, group
, gdp
);
618 gdp
->bg_free_blocks_count
= cpu_to_le16(free
);
620 spin_unlock(sb_bgl_lock(sbi
, group
));
622 /* Don't need to dirty bitmap block if we didn't change it */
624 BUFFER_TRACE(block_bh
, "dirty block bitmap");
625 err
= ext4_journal_dirty_metadata(handle
, block_bh
);
633 spin_lock(sb_bgl_lock(sbi
, group
));
634 /* If we didn't allocate from within the initialized part of the inode
635 * table then we need to initialize up to this inode. */
636 if (EXT4_HAS_RO_COMPAT_FEATURE(sb
, EXT4_FEATURE_RO_COMPAT_GDT_CSUM
)) {
637 if (gdp
->bg_flags
& cpu_to_le16(EXT4_BG_INODE_UNINIT
)) {
638 gdp
->bg_flags
&= cpu_to_le16(~EXT4_BG_INODE_UNINIT
);
640 /* When marking the block group with
641 * ~EXT4_BG_INODE_UNINIT we don't want to depend
642 * on the value of bg_itable_unsed even though
643 * mke2fs could have initialized the same for us.
644 * Instead we calculated the value below
649 free
= EXT4_INODES_PER_GROUP(sb
) -
650 le16_to_cpu(gdp
->bg_itable_unused
);
654 * Check the relative inode number against the last used
655 * relative inode number in this group. if it is greater
656 * we need to update the bg_itable_unused count
660 gdp
->bg_itable_unused
=
661 cpu_to_le16(EXT4_INODES_PER_GROUP(sb
) - ino
);
664 le16_add_cpu(&gdp
->bg_free_inodes_count
, -1);
666 le16_add_cpu(&gdp
->bg_used_dirs_count
, 1);
668 gdp
->bg_checksum
= ext4_group_desc_csum(sbi
, group
, gdp
);
669 spin_unlock(sb_bgl_lock(sbi
, group
));
670 BUFFER_TRACE(bh2
, "call ext4_journal_dirty_metadata");
671 err
= ext4_journal_dirty_metadata(handle
, bh2
);
674 percpu_counter_dec(&sbi
->s_freeinodes_counter
);
676 percpu_counter_inc(&sbi
->s_dirs_counter
);
679 inode
->i_uid
= current
->fsuid
;
680 if (test_opt (sb
, GRPID
))
681 inode
->i_gid
= dir
->i_gid
;
682 else if (dir
->i_mode
& S_ISGID
) {
683 inode
->i_gid
= dir
->i_gid
;
687 inode
->i_gid
= current
->fsgid
;
688 inode
->i_mode
= mode
;
690 inode
->i_ino
= ino
+ group
* EXT4_INODES_PER_GROUP(sb
);
691 /* This is the optimal IO size (for stat), not the fs block size */
693 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= ei
->i_crtime
=
694 ext4_current_time(inode
);
696 memset(ei
->i_data
, 0, sizeof(ei
->i_data
));
697 ei
->i_dir_start_lookup
= 0;
701 * Don't inherit extent flag from directory. We set extent flag on
702 * newly created directory and file only if -o extent mount option is
705 ei
->i_flags
= EXT4_I(dir
)->i_flags
& ~(EXT4_INDEX_FL
|EXT4_EXTENTS_FL
);
707 ei
->i_flags
&= ~(EXT4_IMMUTABLE_FL
|EXT4_APPEND_FL
);
708 /* dirsync only applies to directories */
710 ei
->i_flags
&= ~EXT4_DIRSYNC_FL
;
713 ei
->i_block_alloc_info
= NULL
;
714 ei
->i_block_group
= group
;
716 ext4_set_inode_flags(inode
);
717 if (IS_DIRSYNC(inode
))
719 insert_inode_hash(inode
);
720 spin_lock(&sbi
->s_next_gen_lock
);
721 inode
->i_generation
= sbi
->s_next_generation
++;
722 spin_unlock(&sbi
->s_next_gen_lock
);
724 ei
->i_state
= EXT4_STATE_NEW
;
726 ei
->i_extra_isize
= EXT4_SB(sb
)->s_want_extra_isize
;
729 if(DQUOT_ALLOC_INODE(inode
)) {
734 err
= ext4_init_acl(handle
, inode
, dir
);
738 err
= ext4_init_security(handle
,inode
, dir
);
742 if (test_opt(sb
, EXTENTS
)) {
743 /* set extent flag only for diretory, file and normal symlink*/
744 if (S_ISDIR(mode
) || S_ISREG(mode
) || S_ISLNK(mode
)) {
745 EXT4_I(inode
)->i_flags
|= EXT4_EXTENTS_FL
;
746 ext4_ext_tree_init(handle
, inode
);
747 err
= ext4_update_incompat_feature(handle
, sb
,
748 EXT4_FEATURE_INCOMPAT_EXTENTS
);
754 err
= ext4_mark_inode_dirty(handle
, inode
);
756 ext4_std_error(sb
, err
);
760 ext4_debug("allocating inode %lu\n", inode
->i_ino
);
763 ext4_std_error(sb
, err
);
772 DQUOT_FREE_INODE(inode
);
776 inode
->i_flags
|= S_NOQUOTA
;
783 /* Verify that we are loading a valid orphan from disk */
784 struct inode
*ext4_orphan_get(struct super_block
*sb
, unsigned long ino
)
786 unsigned long max_ino
= le32_to_cpu(EXT4_SB(sb
)->s_es
->s_inodes_count
);
787 ext4_group_t block_group
;
789 struct buffer_head
*bitmap_bh
;
790 struct inode
*inode
= NULL
;
793 /* Error cases - e2fsck has already cleaned up for us */
795 ext4_warning(sb
, __func__
,
796 "bad orphan ino %lu! e2fsck was run?", ino
);
800 block_group
= (ino
- 1) / EXT4_INODES_PER_GROUP(sb
);
801 bit
= (ino
- 1) % EXT4_INODES_PER_GROUP(sb
);
802 bitmap_bh
= read_inode_bitmap(sb
, block_group
);
804 ext4_warning(sb
, __func__
,
805 "inode bitmap error for orphan %lu", ino
);
809 /* Having the inode bit set should be a 100% indicator that this
810 * is a valid orphan (no e2fsck run on fs). Orphans also include
811 * inodes that were being truncated, so we can't check i_nlink==0.
813 if (!ext4_test_bit(bit
, bitmap_bh
->b_data
))
816 inode
= ext4_iget(sb
, ino
);
820 if (NEXT_ORPHAN(inode
) > max_ino
)
826 err
= PTR_ERR(inode
);
829 ext4_warning(sb
, __func__
,
830 "bad orphan inode %lu! e2fsck was run?", ino
);
831 printk(KERN_NOTICE
"ext4_test_bit(bit=%d, block=%llu) = %d\n",
832 bit
, (unsigned long long)bitmap_bh
->b_blocknr
,
833 ext4_test_bit(bit
, bitmap_bh
->b_data
));
834 printk(KERN_NOTICE
"inode=%p\n", inode
);
836 printk(KERN_NOTICE
"is_bad_inode(inode)=%d\n",
837 is_bad_inode(inode
));
838 printk(KERN_NOTICE
"NEXT_ORPHAN(inode)=%u\n",
840 printk(KERN_NOTICE
"max_ino=%lu\n", max_ino
);
841 /* Avoid freeing blocks if we got a bad deleted inode */
842 if (inode
->i_nlink
== 0)
851 unsigned long ext4_count_free_inodes (struct super_block
* sb
)
853 unsigned long desc_count
;
854 struct ext4_group_desc
*gdp
;
857 struct ext4_super_block
*es
;
858 unsigned long bitmap_count
, x
;
859 struct buffer_head
*bitmap_bh
= NULL
;
861 es
= EXT4_SB(sb
)->s_es
;
865 for (i
= 0; i
< EXT4_SB(sb
)->s_groups_count
; i
++) {
866 gdp
= ext4_get_group_desc (sb
, i
, NULL
);
869 desc_count
+= le16_to_cpu(gdp
->bg_free_inodes_count
);
871 bitmap_bh
= read_inode_bitmap(sb
, i
);
875 x
= ext4_count_free(bitmap_bh
, EXT4_INODES_PER_GROUP(sb
) / 8);
876 printk(KERN_DEBUG
"group %lu: stored = %d, counted = %lu\n",
877 i
, le16_to_cpu(gdp
->bg_free_inodes_count
), x
);
881 printk("ext4_count_free_inodes: stored = %u, computed = %lu, %lu\n",
882 le32_to_cpu(es
->s_free_inodes_count
), desc_count
, bitmap_count
);
886 for (i
= 0; i
< EXT4_SB(sb
)->s_groups_count
; i
++) {
887 gdp
= ext4_get_group_desc (sb
, i
, NULL
);
890 desc_count
+= le16_to_cpu(gdp
->bg_free_inodes_count
);
897 /* Called at mount-time, super-block is locked */
898 unsigned long ext4_count_dirs (struct super_block
* sb
)
900 unsigned long count
= 0;
903 for (i
= 0; i
< EXT4_SB(sb
)->s_groups_count
; i
++) {
904 struct ext4_group_desc
*gdp
= ext4_get_group_desc (sb
, i
, NULL
);
907 count
+= le16_to_cpu(gdp
->bg_used_dirs_count
);