hwmon: (w83627hf) push nr+1 offset into *_REG_FAN macros and simplify
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / ext4 / ialloc.c
blobc61f37fd3f05e4d72061791d79600024c858df97
1 /*
2 * linux/fs/ext4/ialloc.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * BSD ufs-inspired inode and directory allocation by
10 * Stephen Tweedie (sct@redhat.com), 1993
11 * Big-endian to little-endian byte-swapping/bitmaps by
12 * David S. Miller (davem@caip.rutgers.edu), 1995
15 #include <linux/time.h>
16 #include <linux/fs.h>
17 #include <linux/jbd2.h>
18 #include <linux/ext4_fs.h>
19 #include <linux/ext4_jbd2.h>
20 #include <linux/stat.h>
21 #include <linux/string.h>
22 #include <linux/quotaops.h>
23 #include <linux/buffer_head.h>
24 #include <linux/random.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <asm/byteorder.h>
29 #include "xattr.h"
30 #include "acl.h"
31 #include "group.h"
34 * ialloc.c contains the inodes allocation and deallocation routines
38 * The free inodes are managed by bitmaps. A file system contains several
39 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
40 * block for inodes, N blocks for the inode table and data blocks.
42 * The file system contains group descriptors which are located after the
43 * super block. Each descriptor contains the number of the bitmap block and
44 * the free blocks count in the block.
48 * To avoid calling the atomic setbit hundreds or thousands of times, we only
49 * need to use it within a single byte (to ensure we get endianness right).
50 * We can use memset for the rest of the bitmap as there are no other users.
52 void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
54 int i;
56 if (start_bit >= end_bit)
57 return;
59 ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
60 for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
61 ext4_set_bit(i, bitmap);
62 if (i < end_bit)
63 memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
66 /* Initializes an uninitialized inode bitmap */
67 unsigned ext4_init_inode_bitmap(struct super_block *sb,
68 struct buffer_head *bh, int block_group,
69 struct ext4_group_desc *gdp)
71 struct ext4_sb_info *sbi = EXT4_SB(sb);
73 J_ASSERT_BH(bh, buffer_locked(bh));
75 /* If checksum is bad mark all blocks and inodes use to prevent
76 * allocation, essentially implementing a per-group read-only flag. */
77 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
78 ext4_error(sb, __FUNCTION__, "Checksum bad for group %u\n",
79 block_group);
80 gdp->bg_free_blocks_count = 0;
81 gdp->bg_free_inodes_count = 0;
82 gdp->bg_itable_unused = 0;
83 memset(bh->b_data, 0xff, sb->s_blocksize);
84 return 0;
87 memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
88 mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), EXT4_BLOCKS_PER_GROUP(sb),
89 bh->b_data);
91 return EXT4_INODES_PER_GROUP(sb);
95 * Read the inode allocation bitmap for a given block_group, reading
96 * into the specified slot in the superblock's bitmap cache.
98 * Return buffer_head of bitmap on success or NULL.
100 static struct buffer_head *
101 read_inode_bitmap(struct super_block * sb, unsigned long block_group)
103 struct ext4_group_desc *desc;
104 struct buffer_head *bh = NULL;
106 desc = ext4_get_group_desc(sb, block_group, NULL);
107 if (!desc)
108 goto error_out;
109 if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
110 bh = sb_getblk(sb, ext4_inode_bitmap(sb, desc));
111 if (!buffer_uptodate(bh)) {
112 lock_buffer(bh);
113 if (!buffer_uptodate(bh)) {
114 ext4_init_inode_bitmap(sb, bh, block_group,
115 desc);
116 set_buffer_uptodate(bh);
118 unlock_buffer(bh);
120 } else {
121 bh = sb_bread(sb, ext4_inode_bitmap(sb, desc));
123 if (!bh)
124 ext4_error(sb, "read_inode_bitmap",
125 "Cannot read inode bitmap - "
126 "block_group = %lu, inode_bitmap = %llu",
127 block_group, ext4_inode_bitmap(sb, desc));
128 error_out:
129 return bh;
133 * NOTE! When we get the inode, we're the only people
134 * that have access to it, and as such there are no
135 * race conditions we have to worry about. The inode
136 * is not on the hash-lists, and it cannot be reached
137 * through the filesystem because the directory entry
138 * has been deleted earlier.
140 * HOWEVER: we must make sure that we get no aliases,
141 * which means that we have to call "clear_inode()"
142 * _before_ we mark the inode not in use in the inode
143 * bitmaps. Otherwise a newly created file might use
144 * the same inode number (not actually the same pointer
145 * though), and then we'd have two inodes sharing the
146 * same inode number and space on the harddisk.
148 void ext4_free_inode (handle_t *handle, struct inode * inode)
150 struct super_block * sb = inode->i_sb;
151 int is_directory;
152 unsigned long ino;
153 struct buffer_head *bitmap_bh = NULL;
154 struct buffer_head *bh2;
155 unsigned long block_group;
156 unsigned long bit;
157 struct ext4_group_desc * gdp;
158 struct ext4_super_block * es;
159 struct ext4_sb_info *sbi;
160 int fatal = 0, err;
162 if (atomic_read(&inode->i_count) > 1) {
163 printk ("ext4_free_inode: inode has count=%d\n",
164 atomic_read(&inode->i_count));
165 return;
167 if (inode->i_nlink) {
168 printk ("ext4_free_inode: inode has nlink=%d\n",
169 inode->i_nlink);
170 return;
172 if (!sb) {
173 printk("ext4_free_inode: inode on nonexistent device\n");
174 return;
176 sbi = EXT4_SB(sb);
178 ino = inode->i_ino;
179 ext4_debug ("freeing inode %lu\n", ino);
182 * Note: we must free any quota before locking the superblock,
183 * as writing the quota to disk may need the lock as well.
185 DQUOT_INIT(inode);
186 ext4_xattr_delete_inode(handle, inode);
187 DQUOT_FREE_INODE(inode);
188 DQUOT_DROP(inode);
190 is_directory = S_ISDIR(inode->i_mode);
192 /* Do this BEFORE marking the inode not in use or returning an error */
193 clear_inode (inode);
195 es = EXT4_SB(sb)->s_es;
196 if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
197 ext4_error (sb, "ext4_free_inode",
198 "reserved or nonexistent inode %lu", ino);
199 goto error_return;
201 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
202 bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
203 bitmap_bh = read_inode_bitmap(sb, block_group);
204 if (!bitmap_bh)
205 goto error_return;
207 BUFFER_TRACE(bitmap_bh, "get_write_access");
208 fatal = ext4_journal_get_write_access(handle, bitmap_bh);
209 if (fatal)
210 goto error_return;
212 /* Ok, now we can actually update the inode bitmaps.. */
213 if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
214 bit, bitmap_bh->b_data))
215 ext4_error (sb, "ext4_free_inode",
216 "bit already cleared for inode %lu", ino);
217 else {
218 gdp = ext4_get_group_desc (sb, block_group, &bh2);
220 BUFFER_TRACE(bh2, "get_write_access");
221 fatal = ext4_journal_get_write_access(handle, bh2);
222 if (fatal) goto error_return;
224 if (gdp) {
225 spin_lock(sb_bgl_lock(sbi, block_group));
226 gdp->bg_free_inodes_count = cpu_to_le16(
227 le16_to_cpu(gdp->bg_free_inodes_count) + 1);
228 if (is_directory)
229 gdp->bg_used_dirs_count = cpu_to_le16(
230 le16_to_cpu(gdp->bg_used_dirs_count) - 1);
231 gdp->bg_checksum = ext4_group_desc_csum(sbi,
232 block_group, gdp);
233 spin_unlock(sb_bgl_lock(sbi, block_group));
234 percpu_counter_inc(&sbi->s_freeinodes_counter);
235 if (is_directory)
236 percpu_counter_dec(&sbi->s_dirs_counter);
239 BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
240 err = ext4_journal_dirty_metadata(handle, bh2);
241 if (!fatal) fatal = err;
243 BUFFER_TRACE(bitmap_bh, "call ext4_journal_dirty_metadata");
244 err = ext4_journal_dirty_metadata(handle, bitmap_bh);
245 if (!fatal)
246 fatal = err;
247 sb->s_dirt = 1;
248 error_return:
249 brelse(bitmap_bh);
250 ext4_std_error(sb, fatal);
254 * There are two policies for allocating an inode. If the new inode is
255 * a directory, then a forward search is made for a block group with both
256 * free space and a low directory-to-inode ratio; if that fails, then of
257 * the groups with above-average free space, that group with the fewest
258 * directories already is chosen.
260 * For other inodes, search forward from the parent directory\'s block
261 * group to find a free inode.
263 static int find_group_dir(struct super_block *sb, struct inode *parent)
265 int ngroups = EXT4_SB(sb)->s_groups_count;
266 unsigned int freei, avefreei;
267 struct ext4_group_desc *desc, *best_desc = NULL;
268 int group, best_group = -1;
270 freei = percpu_counter_read_positive(&EXT4_SB(sb)->s_freeinodes_counter);
271 avefreei = freei / ngroups;
273 for (group = 0; group < ngroups; group++) {
274 desc = ext4_get_group_desc (sb, group, NULL);
275 if (!desc || !desc->bg_free_inodes_count)
276 continue;
277 if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
278 continue;
279 if (!best_desc ||
280 (le16_to_cpu(desc->bg_free_blocks_count) >
281 le16_to_cpu(best_desc->bg_free_blocks_count))) {
282 best_group = group;
283 best_desc = desc;
286 return best_group;
290 * Orlov's allocator for directories.
292 * We always try to spread first-level directories.
294 * If there are blockgroups with both free inodes and free blocks counts
295 * not worse than average we return one with smallest directory count.
296 * Otherwise we simply return a random group.
298 * For the rest rules look so:
300 * It's OK to put directory into a group unless
301 * it has too many directories already (max_dirs) or
302 * it has too few free inodes left (min_inodes) or
303 * it has too few free blocks left (min_blocks) or
304 * it's already running too large debt (max_debt).
305 * Parent's group is prefered, if it doesn't satisfy these
306 * conditions we search cyclically through the rest. If none
307 * of the groups look good we just look for a group with more
308 * free inodes than average (starting at parent's group).
310 * Debt is incremented each time we allocate a directory and decremented
311 * when we allocate an inode, within 0--255.
314 #define INODE_COST 64
315 #define BLOCK_COST 256
317 static int find_group_orlov(struct super_block *sb, struct inode *parent)
319 int parent_group = EXT4_I(parent)->i_block_group;
320 struct ext4_sb_info *sbi = EXT4_SB(sb);
321 struct ext4_super_block *es = sbi->s_es;
322 int ngroups = sbi->s_groups_count;
323 int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
324 unsigned int freei, avefreei;
325 ext4_fsblk_t freeb, avefreeb;
326 ext4_fsblk_t blocks_per_dir;
327 unsigned int ndirs;
328 int max_debt, max_dirs, min_inodes;
329 ext4_grpblk_t min_blocks;
330 int group = -1, i;
331 struct ext4_group_desc *desc;
333 freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
334 avefreei = freei / ngroups;
335 freeb = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
336 avefreeb = freeb;
337 do_div(avefreeb, ngroups);
338 ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
340 if ((parent == sb->s_root->d_inode) ||
341 (EXT4_I(parent)->i_flags & EXT4_TOPDIR_FL)) {
342 int best_ndir = inodes_per_group;
343 int best_group = -1;
345 get_random_bytes(&group, sizeof(group));
346 parent_group = (unsigned)group % ngroups;
347 for (i = 0; i < ngroups; i++) {
348 group = (parent_group + i) % ngroups;
349 desc = ext4_get_group_desc (sb, group, NULL);
350 if (!desc || !desc->bg_free_inodes_count)
351 continue;
352 if (le16_to_cpu(desc->bg_used_dirs_count) >= best_ndir)
353 continue;
354 if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
355 continue;
356 if (le16_to_cpu(desc->bg_free_blocks_count) < avefreeb)
357 continue;
358 best_group = group;
359 best_ndir = le16_to_cpu(desc->bg_used_dirs_count);
361 if (best_group >= 0)
362 return best_group;
363 goto fallback;
366 blocks_per_dir = ext4_blocks_count(es) - freeb;
367 do_div(blocks_per_dir, ndirs);
369 max_dirs = ndirs / ngroups + inodes_per_group / 16;
370 min_inodes = avefreei - inodes_per_group / 4;
371 min_blocks = avefreeb - EXT4_BLOCKS_PER_GROUP(sb) / 4;
373 max_debt = EXT4_BLOCKS_PER_GROUP(sb);
374 max_debt /= max_t(int, blocks_per_dir, BLOCK_COST);
375 if (max_debt * INODE_COST > inodes_per_group)
376 max_debt = inodes_per_group / INODE_COST;
377 if (max_debt > 255)
378 max_debt = 255;
379 if (max_debt == 0)
380 max_debt = 1;
382 for (i = 0; i < ngroups; i++) {
383 group = (parent_group + i) % ngroups;
384 desc = ext4_get_group_desc (sb, group, NULL);
385 if (!desc || !desc->bg_free_inodes_count)
386 continue;
387 if (le16_to_cpu(desc->bg_used_dirs_count) >= max_dirs)
388 continue;
389 if (le16_to_cpu(desc->bg_free_inodes_count) < min_inodes)
390 continue;
391 if (le16_to_cpu(desc->bg_free_blocks_count) < min_blocks)
392 continue;
393 return group;
396 fallback:
397 for (i = 0; i < ngroups; i++) {
398 group = (parent_group + i) % ngroups;
399 desc = ext4_get_group_desc (sb, group, NULL);
400 if (!desc || !desc->bg_free_inodes_count)
401 continue;
402 if (le16_to_cpu(desc->bg_free_inodes_count) >= avefreei)
403 return group;
406 if (avefreei) {
408 * The free-inodes counter is approximate, and for really small
409 * filesystems the above test can fail to find any blockgroups
411 avefreei = 0;
412 goto fallback;
415 return -1;
418 static int find_group_other(struct super_block *sb, struct inode *parent)
420 int parent_group = EXT4_I(parent)->i_block_group;
421 int ngroups = EXT4_SB(sb)->s_groups_count;
422 struct ext4_group_desc *desc;
423 int group, i;
426 * Try to place the inode in its parent directory
428 group = parent_group;
429 desc = ext4_get_group_desc (sb, group, NULL);
430 if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
431 le16_to_cpu(desc->bg_free_blocks_count))
432 return group;
435 * We're going to place this inode in a different blockgroup from its
436 * parent. We want to cause files in a common directory to all land in
437 * the same blockgroup. But we want files which are in a different
438 * directory which shares a blockgroup with our parent to land in a
439 * different blockgroup.
441 * So add our directory's i_ino into the starting point for the hash.
443 group = (group + parent->i_ino) % ngroups;
446 * Use a quadratic hash to find a group with a free inode and some free
447 * blocks.
449 for (i = 1; i < ngroups; i <<= 1) {
450 group += i;
451 if (group >= ngroups)
452 group -= ngroups;
453 desc = ext4_get_group_desc (sb, group, NULL);
454 if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
455 le16_to_cpu(desc->bg_free_blocks_count))
456 return group;
460 * That failed: try linear search for a free inode, even if that group
461 * has no free blocks.
463 group = parent_group;
464 for (i = 0; i < ngroups; i++) {
465 if (++group >= ngroups)
466 group = 0;
467 desc = ext4_get_group_desc (sb, group, NULL);
468 if (desc && le16_to_cpu(desc->bg_free_inodes_count))
469 return group;
472 return -1;
476 * There are two policies for allocating an inode. If the new inode is
477 * a directory, then a forward search is made for a block group with both
478 * free space and a low directory-to-inode ratio; if that fails, then of
479 * the groups with above-average free space, that group with the fewest
480 * directories already is chosen.
482 * For other inodes, search forward from the parent directory's block
483 * group to find a free inode.
485 struct inode *ext4_new_inode(handle_t *handle, struct inode * dir, int mode)
487 struct super_block *sb;
488 struct buffer_head *bitmap_bh = NULL;
489 struct buffer_head *bh2;
490 int group;
491 unsigned long ino = 0;
492 struct inode * inode;
493 struct ext4_group_desc * gdp = NULL;
494 struct ext4_super_block * es;
495 struct ext4_inode_info *ei;
496 struct ext4_sb_info *sbi;
497 int err = 0;
498 struct inode *ret;
499 int i, free = 0;
501 /* Cannot create files in a deleted directory */
502 if (!dir || !dir->i_nlink)
503 return ERR_PTR(-EPERM);
505 sb = dir->i_sb;
506 inode = new_inode(sb);
507 if (!inode)
508 return ERR_PTR(-ENOMEM);
509 ei = EXT4_I(inode);
511 sbi = EXT4_SB(sb);
512 es = sbi->s_es;
513 if (S_ISDIR(mode)) {
514 if (test_opt (sb, OLDALLOC))
515 group = find_group_dir(sb, dir);
516 else
517 group = find_group_orlov(sb, dir);
518 } else
519 group = find_group_other(sb, dir);
521 err = -ENOSPC;
522 if (group == -1)
523 goto out;
525 for (i = 0; i < sbi->s_groups_count; i++) {
526 err = -EIO;
528 gdp = ext4_get_group_desc(sb, group, &bh2);
529 if (!gdp)
530 goto fail;
532 brelse(bitmap_bh);
533 bitmap_bh = read_inode_bitmap(sb, group);
534 if (!bitmap_bh)
535 goto fail;
537 ino = 0;
539 repeat_in_this_group:
540 ino = ext4_find_next_zero_bit((unsigned long *)
541 bitmap_bh->b_data, EXT4_INODES_PER_GROUP(sb), ino);
542 if (ino < EXT4_INODES_PER_GROUP(sb)) {
544 BUFFER_TRACE(bitmap_bh, "get_write_access");
545 err = ext4_journal_get_write_access(handle, bitmap_bh);
546 if (err)
547 goto fail;
549 if (!ext4_set_bit_atomic(sb_bgl_lock(sbi, group),
550 ino, bitmap_bh->b_data)) {
551 /* we won it */
552 BUFFER_TRACE(bitmap_bh,
553 "call ext4_journal_dirty_metadata");
554 err = ext4_journal_dirty_metadata(handle,
555 bitmap_bh);
556 if (err)
557 goto fail;
558 goto got;
560 /* we lost it */
561 jbd2_journal_release_buffer(handle, bitmap_bh);
563 if (++ino < EXT4_INODES_PER_GROUP(sb))
564 goto repeat_in_this_group;
568 * This case is possible in concurrent environment. It is very
569 * rare. We cannot repeat the find_group_xxx() call because
570 * that will simply return the same blockgroup, because the
571 * group descriptor metadata has not yet been updated.
572 * So we just go onto the next blockgroup.
574 if (++group == sbi->s_groups_count)
575 group = 0;
577 err = -ENOSPC;
578 goto out;
580 got:
581 ino++;
582 if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
583 ino > EXT4_INODES_PER_GROUP(sb)) {
584 ext4_error(sb, __FUNCTION__,
585 "reserved inode or inode > inodes count - "
586 "block_group = %d, inode=%lu", group,
587 ino + group * EXT4_INODES_PER_GROUP(sb));
588 err = -EIO;
589 goto fail;
592 BUFFER_TRACE(bh2, "get_write_access");
593 err = ext4_journal_get_write_access(handle, bh2);
594 if (err) goto fail;
596 /* We may have to initialize the block bitmap if it isn't already */
597 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
598 gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
599 struct buffer_head *block_bh = read_block_bitmap(sb, group);
601 BUFFER_TRACE(block_bh, "get block bitmap access");
602 err = ext4_journal_get_write_access(handle, block_bh);
603 if (err) {
604 brelse(block_bh);
605 goto fail;
608 free = 0;
609 spin_lock(sb_bgl_lock(sbi, group));
610 /* recheck and clear flag under lock if we still need to */
611 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
612 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
613 free = ext4_free_blocks_after_init(sb, group, gdp);
614 gdp->bg_free_blocks_count = cpu_to_le16(free);
616 spin_unlock(sb_bgl_lock(sbi, group));
618 /* Don't need to dirty bitmap block if we didn't change it */
619 if (free) {
620 BUFFER_TRACE(block_bh, "dirty block bitmap");
621 err = ext4_journal_dirty_metadata(handle, block_bh);
624 brelse(block_bh);
625 if (err)
626 goto fail;
629 spin_lock(sb_bgl_lock(sbi, group));
630 /* If we didn't allocate from within the initialized part of the inode
631 * table then we need to initialize up to this inode. */
632 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
633 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
634 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
636 /* When marking the block group with
637 * ~EXT4_BG_INODE_UNINIT we don't want to depend
638 * on the value of bg_itable_unsed even though
639 * mke2fs could have initialized the same for us.
640 * Instead we calculated the value below
643 free = 0;
644 } else {
645 free = EXT4_INODES_PER_GROUP(sb) -
646 le16_to_cpu(gdp->bg_itable_unused);
650 * Check the relative inode number against the last used
651 * relative inode number in this group. if it is greater
652 * we need to update the bg_itable_unused count
655 if (ino > free)
656 gdp->bg_itable_unused =
657 cpu_to_le16(EXT4_INODES_PER_GROUP(sb) - ino);
660 gdp->bg_free_inodes_count =
661 cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1);
662 if (S_ISDIR(mode)) {
663 gdp->bg_used_dirs_count =
664 cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1);
666 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
667 spin_unlock(sb_bgl_lock(sbi, group));
668 BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
669 err = ext4_journal_dirty_metadata(handle, bh2);
670 if (err) goto fail;
672 percpu_counter_dec(&sbi->s_freeinodes_counter);
673 if (S_ISDIR(mode))
674 percpu_counter_inc(&sbi->s_dirs_counter);
675 sb->s_dirt = 1;
677 inode->i_uid = current->fsuid;
678 if (test_opt (sb, GRPID))
679 inode->i_gid = dir->i_gid;
680 else if (dir->i_mode & S_ISGID) {
681 inode->i_gid = dir->i_gid;
682 if (S_ISDIR(mode))
683 mode |= S_ISGID;
684 } else
685 inode->i_gid = current->fsgid;
686 inode->i_mode = mode;
688 inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
689 /* This is the optimal IO size (for stat), not the fs block size */
690 inode->i_blocks = 0;
691 inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
692 ext4_current_time(inode);
694 memset(ei->i_data, 0, sizeof(ei->i_data));
695 ei->i_dir_start_lookup = 0;
696 ei->i_disksize = 0;
698 ei->i_flags = EXT4_I(dir)->i_flags & ~EXT4_INDEX_FL;
699 if (S_ISLNK(mode))
700 ei->i_flags &= ~(EXT4_IMMUTABLE_FL|EXT4_APPEND_FL);
701 /* dirsync only applies to directories */
702 if (!S_ISDIR(mode))
703 ei->i_flags &= ~EXT4_DIRSYNC_FL;
704 ei->i_file_acl = 0;
705 ei->i_dir_acl = 0;
706 ei->i_dtime = 0;
707 ei->i_block_alloc_info = NULL;
708 ei->i_block_group = group;
710 ext4_set_inode_flags(inode);
711 if (IS_DIRSYNC(inode))
712 handle->h_sync = 1;
713 insert_inode_hash(inode);
714 spin_lock(&sbi->s_next_gen_lock);
715 inode->i_generation = sbi->s_next_generation++;
716 spin_unlock(&sbi->s_next_gen_lock);
718 ei->i_state = EXT4_STATE_NEW;
720 ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
722 ret = inode;
723 if(DQUOT_ALLOC_INODE(inode)) {
724 err = -EDQUOT;
725 goto fail_drop;
728 err = ext4_init_acl(handle, inode, dir);
729 if (err)
730 goto fail_free_drop;
732 err = ext4_init_security(handle,inode, dir);
733 if (err)
734 goto fail_free_drop;
736 err = ext4_mark_inode_dirty(handle, inode);
737 if (err) {
738 ext4_std_error(sb, err);
739 goto fail_free_drop;
741 if (test_opt(sb, EXTENTS)) {
742 EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL;
743 ext4_ext_tree_init(handle, inode);
744 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
745 err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
746 if (err) goto fail;
747 EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS);
748 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "call ext4_journal_dirty_metadata");
749 err = ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh);
753 ext4_debug("allocating inode %lu\n", inode->i_ino);
754 goto really_out;
755 fail:
756 ext4_std_error(sb, err);
757 out:
758 iput(inode);
759 ret = ERR_PTR(err);
760 really_out:
761 brelse(bitmap_bh);
762 return ret;
764 fail_free_drop:
765 DQUOT_FREE_INODE(inode);
767 fail_drop:
768 DQUOT_DROP(inode);
769 inode->i_flags |= S_NOQUOTA;
770 inode->i_nlink = 0;
771 iput(inode);
772 brelse(bitmap_bh);
773 return ERR_PTR(err);
776 /* Verify that we are loading a valid orphan from disk */
777 struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
779 unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
780 unsigned long block_group;
781 int bit;
782 struct buffer_head *bitmap_bh = NULL;
783 struct inode *inode = NULL;
785 /* Error cases - e2fsck has already cleaned up for us */
786 if (ino > max_ino) {
787 ext4_warning(sb, __FUNCTION__,
788 "bad orphan ino %lu! e2fsck was run?", ino);
789 goto out;
792 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
793 bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
794 bitmap_bh = read_inode_bitmap(sb, block_group);
795 if (!bitmap_bh) {
796 ext4_warning(sb, __FUNCTION__,
797 "inode bitmap error for orphan %lu", ino);
798 goto out;
801 /* Having the inode bit set should be a 100% indicator that this
802 * is a valid orphan (no e2fsck run on fs). Orphans also include
803 * inodes that were being truncated, so we can't check i_nlink==0.
805 if (!ext4_test_bit(bit, bitmap_bh->b_data) ||
806 !(inode = iget(sb, ino)) || is_bad_inode(inode) ||
807 NEXT_ORPHAN(inode) > max_ino) {
808 ext4_warning(sb, __FUNCTION__,
809 "bad orphan inode %lu! e2fsck was run?", ino);
810 printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
811 bit, (unsigned long long)bitmap_bh->b_blocknr,
812 ext4_test_bit(bit, bitmap_bh->b_data));
813 printk(KERN_NOTICE "inode=%p\n", inode);
814 if (inode) {
815 printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
816 is_bad_inode(inode));
817 printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
818 NEXT_ORPHAN(inode));
819 printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
821 /* Avoid freeing blocks if we got a bad deleted inode */
822 if (inode && inode->i_nlink == 0)
823 inode->i_blocks = 0;
824 iput(inode);
825 inode = NULL;
827 out:
828 brelse(bitmap_bh);
829 return inode;
832 unsigned long ext4_count_free_inodes (struct super_block * sb)
834 unsigned long desc_count;
835 struct ext4_group_desc *gdp;
836 int i;
837 #ifdef EXT4FS_DEBUG
838 struct ext4_super_block *es;
839 unsigned long bitmap_count, x;
840 struct buffer_head *bitmap_bh = NULL;
842 es = EXT4_SB(sb)->s_es;
843 desc_count = 0;
844 bitmap_count = 0;
845 gdp = NULL;
846 for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
847 gdp = ext4_get_group_desc (sb, i, NULL);
848 if (!gdp)
849 continue;
850 desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
851 brelse(bitmap_bh);
852 bitmap_bh = read_inode_bitmap(sb, i);
853 if (!bitmap_bh)
854 continue;
856 x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
857 printk("group %d: stored = %d, counted = %lu\n",
858 i, le16_to_cpu(gdp->bg_free_inodes_count), x);
859 bitmap_count += x;
861 brelse(bitmap_bh);
862 printk("ext4_count_free_inodes: stored = %u, computed = %lu, %lu\n",
863 le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
864 return desc_count;
865 #else
866 desc_count = 0;
867 for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
868 gdp = ext4_get_group_desc (sb, i, NULL);
869 if (!gdp)
870 continue;
871 desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
872 cond_resched();
874 return desc_count;
875 #endif
878 /* Called at mount-time, super-block is locked */
879 unsigned long ext4_count_dirs (struct super_block * sb)
881 unsigned long count = 0;
882 int i;
884 for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
885 struct ext4_group_desc *gdp = ext4_get_group_desc (sb, i, NULL);
886 if (!gdp)
887 continue;
888 count += le16_to_cpu(gdp->bg_used_dirs_count);
890 return count;