2 * linux/fs/ext2/balloc.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * Enhanced block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
10 * Big-endian to little-endian byte-swapping/bitmaps by
11 * David S. Miller (davem@caip.rutgers.edu), 1995
15 * balloc.c contains the blocks allocation and deallocation routines
19 * The free blocks are managed by bitmaps. A file system contains several
20 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
21 * block for inodes, N blocks for the inode table and data blocks.
23 * The file system contains group descriptors which are located after the
24 * super block. Each descriptor contains the number of the bitmap block and
25 * the free blocks count in the block. The descriptors are loaded in memory
26 * when a file system is mounted (see ext2_read_super).
30 #include <linux/ext2_fs.h>
31 #include <linux/stat.h>
32 #include <linux/sched.h>
33 #include <linux/string.h>
34 #include <linux/locks.h>
35 #include <linux/quotaops.h>
37 #include <asm/bitops.h>
38 #include <asm/byteorder.h>
40 #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
42 struct ext2_group_desc
* ext2_get_group_desc(struct super_block
* sb
,
43 unsigned int block_group
,
44 struct buffer_head
** bh
)
46 unsigned long group_desc
;
48 struct ext2_group_desc
* gdp
;
50 if (block_group
>= sb
->u
.ext2_sb
.s_groups_count
) {
51 ext2_error (sb
, "ext2_get_group_desc",
52 "block_group >= groups_count - "
53 "block_group = %d, groups_count = %lu",
54 block_group
, sb
->u
.ext2_sb
.s_groups_count
);
59 group_desc
= block_group
/ EXT2_DESC_PER_BLOCK(sb
);
60 desc
= block_group
% EXT2_DESC_PER_BLOCK(sb
);
61 if (!sb
->u
.ext2_sb
.s_group_desc
[group_desc
]) {
62 ext2_error (sb
, "ext2_get_group_desc",
63 "Group descriptor not loaded - "
64 "block_group = %d, group_desc = %lu, desc = %lu",
65 block_group
, group_desc
, desc
);
69 gdp
= (struct ext2_group_desc
*)
70 sb
->u
.ext2_sb
.s_group_desc
[group_desc
]->b_data
;
72 *bh
= sb
->u
.ext2_sb
.s_group_desc
[group_desc
];
77 * Read the bitmap for a given block_group, reading into the specified
78 * slot in the superblock's bitmap cache.
80 * Return >=0 on success or a -ve error code.
83 static int read_block_bitmap (struct super_block
* sb
,
84 unsigned int block_group
,
85 unsigned long bitmap_nr
)
87 struct ext2_group_desc
* gdp
;
88 struct buffer_head
* bh
= NULL
;
91 gdp
= ext2_get_group_desc (sb
, block_group
, NULL
);
94 bh
= bread (sb
->s_dev
, le32_to_cpu(gdp
->bg_block_bitmap
), sb
->s_blocksize
);
96 ext2_error (sb
, "read_block_bitmap",
97 "Cannot read block bitmap - "
98 "block_group = %d, block_bitmap = %lu",
99 block_group
, (unsigned long) gdp
->bg_block_bitmap
);
103 * On IO error, just leave a zero in the superblock's block pointer for
104 * this group. The IO will be retried next time.
107 sb
->u
.ext2_sb
.s_block_bitmap_number
[bitmap_nr
] = block_group
;
108 sb
->u
.ext2_sb
.s_block_bitmap
[bitmap_nr
] = bh
;
113 * load_block_bitmap loads the block bitmap for a blocks group
115 * It maintains a cache for the last bitmaps loaded. This cache is managed
116 * with a LRU algorithm.
119 * 1/ There is one cache per mounted file system.
120 * 2/ If the file system contains less than EXT2_MAX_GROUP_LOADED groups,
121 * this function reads the bitmap without maintaining a LRU cache.
123 * Return the slot used to store the bitmap, or a -ve error code.
125 static int load__block_bitmap (struct super_block
* sb
,
126 unsigned int block_group
)
128 int i
, j
, retval
= 0;
129 unsigned long block_bitmap_number
;
130 struct buffer_head
* block_bitmap
;
132 if (block_group
>= sb
->u
.ext2_sb
.s_groups_count
)
133 ext2_panic (sb
, "load_block_bitmap",
134 "block_group >= groups_count - "
135 "block_group = %d, groups_count = %lu",
136 block_group
, sb
->u
.ext2_sb
.s_groups_count
);
138 if (sb
->u
.ext2_sb
.s_groups_count
<= EXT2_MAX_GROUP_LOADED
) {
139 if (sb
->u
.ext2_sb
.s_block_bitmap
[block_group
]) {
140 if (sb
->u
.ext2_sb
.s_block_bitmap_number
[block_group
] ==
143 ext2_error (sb
, "load_block_bitmap",
144 "block_group != block_bitmap_number");
146 retval
= read_block_bitmap (sb
, block_group
, block_group
);
152 for (i
= 0; i
< sb
->u
.ext2_sb
.s_loaded_block_bitmaps
&&
153 sb
->u
.ext2_sb
.s_block_bitmap_number
[i
] != block_group
; i
++)
155 if (i
< sb
->u
.ext2_sb
.s_loaded_block_bitmaps
&&
156 sb
->u
.ext2_sb
.s_block_bitmap_number
[i
] == block_group
) {
157 block_bitmap_number
= sb
->u
.ext2_sb
.s_block_bitmap_number
[i
];
158 block_bitmap
= sb
->u
.ext2_sb
.s_block_bitmap
[i
];
159 for (j
= i
; j
> 0; j
--) {
160 sb
->u
.ext2_sb
.s_block_bitmap_number
[j
] =
161 sb
->u
.ext2_sb
.s_block_bitmap_number
[j
- 1];
162 sb
->u
.ext2_sb
.s_block_bitmap
[j
] =
163 sb
->u
.ext2_sb
.s_block_bitmap
[j
- 1];
165 sb
->u
.ext2_sb
.s_block_bitmap_number
[0] = block_bitmap_number
;
166 sb
->u
.ext2_sb
.s_block_bitmap
[0] = block_bitmap
;
169 * There's still one special case here --- if block_bitmap == 0
170 * then our last attempt to read the bitmap failed and we have
171 * just ended up caching that failure. Try again to read it.
174 retval
= read_block_bitmap (sb
, block_group
, 0);
176 if (sb
->u
.ext2_sb
.s_loaded_block_bitmaps
< EXT2_MAX_GROUP_LOADED
)
177 sb
->u
.ext2_sb
.s_loaded_block_bitmaps
++;
179 brelse (sb
->u
.ext2_sb
.s_block_bitmap
[EXT2_MAX_GROUP_LOADED
- 1]);
180 for (j
= sb
->u
.ext2_sb
.s_loaded_block_bitmaps
- 1; j
> 0; j
--) {
181 sb
->u
.ext2_sb
.s_block_bitmap_number
[j
] =
182 sb
->u
.ext2_sb
.s_block_bitmap_number
[j
- 1];
183 sb
->u
.ext2_sb
.s_block_bitmap
[j
] =
184 sb
->u
.ext2_sb
.s_block_bitmap
[j
- 1];
186 retval
= read_block_bitmap (sb
, block_group
, 0);
192 * Load the block bitmap for a given block group. First of all do a couple
193 * of fast lookups for common cases and then pass the request onto the guts
194 * of the bitmap loader.
196 * Return the slot number of the group in the superblock bitmap cache's on
197 * success, or a -ve error code.
199 * There is still one inconsistancy here --- if the number of groups in this
200 * filesystems is <= EXT2_MAX_GROUP_LOADED, then we have no way of
201 * differentiating between a group for which we have never performed a bitmap
202 * IO request, and a group for which the last bitmap read request failed.
204 static inline int load_block_bitmap (struct super_block
* sb
,
205 unsigned int block_group
)
210 * Do the lookup for the slot. First of all, check if we're asking
211 * for the same slot as last time, and did we succeed that last time?
213 if (sb
->u
.ext2_sb
.s_loaded_block_bitmaps
> 0 &&
214 sb
->u
.ext2_sb
.s_block_bitmap_number
[0] == block_group
&&
215 sb
->u
.ext2_sb
.s_block_bitmap
[block_group
]) {
219 * Or can we do a fast lookup based on a loaded group on a filesystem
220 * small enough to be mapped directly into the superblock?
222 else if (sb
->u
.ext2_sb
.s_groups_count
<= EXT2_MAX_GROUP_LOADED
&&
223 sb
->u
.ext2_sb
.s_block_bitmap_number
[block_group
] == block_group
&&
224 sb
->u
.ext2_sb
.s_block_bitmap
[block_group
]) {
228 * If not, then do a full lookup for this block group.
231 slot
= load__block_bitmap (sb
, block_group
);
235 * <0 means we just got an error
241 * If it's a valid slot, we may still have cached a previous IO error,
242 * in which case the bh in the superblock cache will be zero.
244 if (!sb
->u
.ext2_sb
.s_block_bitmap
[slot
])
248 * Must have been read in OK to get this far.
253 void ext2_free_blocks (const struct inode
* inode
, unsigned long block
,
256 struct buffer_head
* bh
;
257 struct buffer_head
* bh2
;
258 unsigned long block_group
;
262 unsigned long overflow
;
263 struct super_block
* sb
;
264 struct ext2_group_desc
* gdp
;
265 struct ext2_super_block
* es
;
269 printk ("ext2_free_blocks: nonexistent device");
273 es
= sb
->u
.ext2_sb
.s_es
;
274 if (block
< le32_to_cpu(es
->s_first_data_block
) ||
275 (block
+ count
) > le32_to_cpu(es
->s_blocks_count
)) {
276 ext2_error (sb
, "ext2_free_blocks",
277 "Freeing blocks not in datazone - "
278 "block = %lu, count = %lu", block
, count
);
282 ext2_debug ("freeing block %lu\n", block
);
286 block_group
= (block
- le32_to_cpu(es
->s_first_data_block
)) /
287 EXT2_BLOCKS_PER_GROUP(sb
);
288 bit
= (block
- le32_to_cpu(es
->s_first_data_block
)) %
289 EXT2_BLOCKS_PER_GROUP(sb
);
291 * Check to see if we are freeing blocks across a group
294 if (bit
+ count
> EXT2_BLOCKS_PER_GROUP(sb
)) {
295 overflow
= bit
+ count
- EXT2_BLOCKS_PER_GROUP(sb
);
298 bitmap_nr
= load_block_bitmap (sb
, block_group
);
302 bh
= sb
->u
.ext2_sb
.s_block_bitmap
[bitmap_nr
];
303 gdp
= ext2_get_group_desc (sb
, block_group
, &bh2
);
307 if (test_opt (sb
, CHECK_STRICT
) &&
308 (in_range (le32_to_cpu(gdp
->bg_block_bitmap
), block
, count
) ||
309 in_range (le32_to_cpu(gdp
->bg_inode_bitmap
), block
, count
) ||
310 in_range (block
, le32_to_cpu(gdp
->bg_inode_table
),
311 sb
->u
.ext2_sb
.s_itb_per_group
) ||
312 in_range (block
+ count
- 1, le32_to_cpu(gdp
->bg_inode_table
),
313 sb
->u
.ext2_sb
.s_itb_per_group
)))
314 ext2_panic (sb
, "ext2_free_blocks",
315 "Freeing blocks in system zones - "
316 "Block = %lu, count = %lu",
319 for (i
= 0; i
< count
; i
++) {
320 if (!ext2_clear_bit (bit
+ i
, bh
->b_data
))
321 ext2_warning (sb
, "ext2_free_blocks",
322 "bit already cleared for block %lu",
325 DQUOT_FREE_BLOCK(sb
, inode
, 1);
326 gdp
->bg_free_blocks_count
=
327 cpu_to_le16(le16_to_cpu(gdp
->bg_free_blocks_count
)+1);
328 es
->s_free_blocks_count
=
329 cpu_to_le32(le32_to_cpu(es
->s_free_blocks_count
)+1);
333 mark_buffer_dirty(bh2
, 1);
334 mark_buffer_dirty(sb
->u
.ext2_sb
.s_sbh
, 1);
336 mark_buffer_dirty(bh
, 1);
337 if (sb
->s_flags
& MS_SYNCHRONOUS
) {
338 ll_rw_block (WRITE
, 1, &bh
);
353 * ext2_new_block uses a goal block to assist allocation. If the goal is
354 * free, or there is a free block within 32 blocks of the goal, that block
355 * is allocated. Otherwise a forward search is made for a free block; within
356 * each block group the search first looks for an entire free byte in the block
357 * bitmap, and then for any free bit if that fails.
359 int ext2_new_block (const struct inode
* inode
, unsigned long goal
,
360 u32
* prealloc_count
, u32
* prealloc_block
, int * err
)
362 struct buffer_head
* bh
;
363 struct buffer_head
* bh2
;
367 struct super_block
* sb
;
368 struct ext2_group_desc
* gdp
;
369 struct ext2_super_block
* es
;
373 static int goal_hits
= 0, goal_attempts
= 0;
377 printk ("ext2_new_block: nonexistent device");
382 es
= sb
->u
.ext2_sb
.s_es
;
383 if (le32_to_cpu(es
->s_free_blocks_count
) <= le32_to_cpu(es
->s_r_blocks_count
) &&
384 ((sb
->u
.ext2_sb
.s_resuid
!= current
->fsuid
) &&
385 (sb
->u
.ext2_sb
.s_resgid
== 0 ||
386 !in_group_p (sb
->u
.ext2_sb
.s_resgid
)) &&
387 !capable(CAP_SYS_RESOURCE
))) {
392 ext2_debug ("goal=%lu.\n", goal
);
396 * First, test whether the goal block is free.
398 if (goal
< le32_to_cpu(es
->s_first_data_block
) ||
399 goal
>= le32_to_cpu(es
->s_blocks_count
))
400 goal
= le32_to_cpu(es
->s_first_data_block
);
401 i
= (goal
- le32_to_cpu(es
->s_first_data_block
)) / EXT2_BLOCKS_PER_GROUP(sb
);
402 gdp
= ext2_get_group_desc (sb
, i
, &bh2
);
406 if (le16_to_cpu(gdp
->bg_free_blocks_count
) > 0) {
407 j
= ((goal
- le32_to_cpu(es
->s_first_data_block
)) % EXT2_BLOCKS_PER_GROUP(sb
));
412 bitmap_nr
= load_block_bitmap (sb
, i
);
416 bh
= sb
->u
.ext2_sb
.s_block_bitmap
[bitmap_nr
];
418 ext2_debug ("goal is at %d:%d.\n", i
, j
);
420 if (!ext2_test_bit(j
, bh
->b_data
)) {
423 ext2_debug ("goal bit allocated.\n");
429 * The goal was occupied; search forward for a free
430 * block within the next XX blocks.
432 * end_goal is more or less random, but it has to be
433 * less than EXT2_BLOCKS_PER_GROUP. Aligning up to the
434 * next 64-bit boundary is simple..
436 int end_goal
= (j
+ 63) & ~63;
437 j
= ext2_find_next_zero_bit(bh
->b_data
, end_goal
, j
);
442 ext2_debug ("Bit not found near goal\n");
445 * There has been no free block found in the near vicinity
446 * of the goal: do a search forward through the block groups,
447 * searching in each group first for an entire free byte in
448 * the bitmap and then for any free bit.
450 * Search first in the remainder of the current group; then,
451 * cyclicly search through the rest of the groups.
453 p
= ((char *) bh
->b_data
) + (j
>> 3);
454 r
= memscan(p
, 0, (EXT2_BLOCKS_PER_GROUP(sb
) - j
+ 7) >> 3);
455 k
= (r
- ((char *) bh
->b_data
)) << 3;
456 if (k
< EXT2_BLOCKS_PER_GROUP(sb
)) {
461 k
= ext2_find_next_zero_bit ((unsigned long *) bh
->b_data
,
462 EXT2_BLOCKS_PER_GROUP(sb
),
464 if (k
< EXT2_BLOCKS_PER_GROUP(sb
)) {
470 ext2_debug ("Bit not found in block group %d.\n", i
);
473 * Now search the rest of the groups. We assume that
474 * i and gdp correctly point to the last group visited.
476 for (k
= 0; k
< sb
->u
.ext2_sb
.s_groups_count
; k
++) {
478 if (i
>= sb
->u
.ext2_sb
.s_groups_count
)
480 gdp
= ext2_get_group_desc (sb
, i
, &bh2
);
486 if (le16_to_cpu(gdp
->bg_free_blocks_count
) > 0)
489 if (k
>= sb
->u
.ext2_sb
.s_groups_count
) {
493 bitmap_nr
= load_block_bitmap (sb
, i
);
497 bh
= sb
->u
.ext2_sb
.s_block_bitmap
[bitmap_nr
];
498 r
= memscan(bh
->b_data
, 0, EXT2_BLOCKS_PER_GROUP(sb
) >> 3);
499 j
= (r
- bh
->b_data
) << 3;
500 if (j
< EXT2_BLOCKS_PER_GROUP(sb
))
503 j
= ext2_find_first_zero_bit ((unsigned long *) bh
->b_data
,
504 EXT2_BLOCKS_PER_GROUP(sb
));
505 if (j
>= EXT2_BLOCKS_PER_GROUP(sb
)) {
506 ext2_error (sb
, "ext2_new_block",
507 "Free blocks count corrupted for block group %d", i
);
514 * We have succeeded in finding a free byte in the block
515 * bitmap. Now search backwards up to 7 bits to find the
516 * start of this group of free blocks.
518 for (k
= 0; k
< 7 && j
> 0 && !ext2_test_bit (j
- 1, bh
->b_data
); k
++, j
--);
522 ext2_debug ("using block group %d(%d)\n", i
, gdp
->bg_free_blocks_count
);
525 * Check quota for allocation of this block.
527 if(DQUOT_ALLOC_BLOCK(sb
, inode
, 1)) {
533 tmp
= j
+ i
* EXT2_BLOCKS_PER_GROUP(sb
) + le32_to_cpu(es
->s_first_data_block
);
535 if (test_opt (sb
, CHECK_STRICT
) &&
536 (tmp
== le32_to_cpu(gdp
->bg_block_bitmap
) ||
537 tmp
== le32_to_cpu(gdp
->bg_inode_bitmap
) ||
538 in_range (tmp
, le32_to_cpu(gdp
->bg_inode_table
), sb
->u
.ext2_sb
.s_itb_per_group
)))
539 ext2_panic (sb
, "ext2_new_block",
540 "Allocating block in system zone - "
543 if (ext2_set_bit (j
, bh
->b_data
)) {
544 ext2_warning (sb
, "ext2_new_block",
545 "bit already set for block %d", j
);
546 DQUOT_FREE_BLOCK(sb
, inode
, 1);
550 ext2_debug ("found bit %d\n", j
);
553 * Do block preallocation now if required.
555 #ifdef EXT2_PREALLOCATE
556 if (prealloc_block
) {
559 prealloc_goal
= es
->s_prealloc_blocks
?
560 es
->s_prealloc_blocks
: EXT2_DEFAULT_PREALLOC_BLOCKS
;
563 *prealloc_block
= tmp
+ 1;
565 k
< prealloc_goal
&& (j
+ k
) < EXT2_BLOCKS_PER_GROUP(sb
);
567 if (DQUOT_PREALLOC_BLOCK(sb
, inode
, 1))
569 if (ext2_set_bit (j
+ k
, bh
->b_data
)) {
570 DQUOT_FREE_BLOCK(sb
, inode
, 1);
575 gdp
->bg_free_blocks_count
=
576 cpu_to_le16(le16_to_cpu(gdp
->bg_free_blocks_count
) -
578 es
->s_free_blocks_count
=
579 cpu_to_le32(le32_to_cpu(es
->s_free_blocks_count
) -
581 ext2_debug ("Preallocated a further %lu bits.\n",
588 mark_buffer_dirty(bh
, 1);
589 if (sb
->s_flags
& MS_SYNCHRONOUS
) {
590 ll_rw_block (WRITE
, 1, &bh
);
594 if (j
>= le32_to_cpu(es
->s_blocks_count
)) {
595 ext2_error (sb
, "ext2_new_block",
596 "block >= blocks count - "
597 "block_group = %d, block=%d", i
, j
);
601 if (!(bh
= getblk (sb
->s_dev
, j
, sb
->s_blocksize
))) {
602 ext2_error (sb
, "ext2_new_block", "cannot get block %d", j
);
606 memset(bh
->b_data
, 0, sb
->s_blocksize
);
607 mark_buffer_uptodate(bh
, 1);
608 mark_buffer_dirty(bh
, 1);
611 ext2_debug ("allocating block %d. "
612 "Goal hits %d of %d.\n", j
, goal_hits
, goal_attempts
);
614 gdp
->bg_free_blocks_count
= cpu_to_le16(le16_to_cpu(gdp
->bg_free_blocks_count
) - 1);
615 mark_buffer_dirty(bh2
, 1);
616 es
->s_free_blocks_count
= cpu_to_le32(le32_to_cpu(es
->s_free_blocks_count
) - 1);
617 mark_buffer_dirty(sb
->u
.ext2_sb
.s_sbh
, 1);
630 unsigned long ext2_count_free_blocks (struct super_block
* sb
)
633 struct ext2_super_block
* es
;
634 unsigned long desc_count
, bitmap_count
, x
;
636 struct ext2_group_desc
* gdp
;
640 es
= sb
->u
.ext2_sb
.s_es
;
644 for (i
= 0; i
< sb
->u
.ext2_sb
.s_groups_count
; i
++) {
645 gdp
= ext2_get_group_desc (sb
, i
, NULL
);
648 desc_count
+= le16_to_cpu(gdp
->bg_free_blocks_count
);
649 bitmap_nr
= load_block_bitmap (sb
, i
);
653 x
= ext2_count_free (sb
->u
.ext2_sb
.s_block_bitmap
[bitmap_nr
],
655 printk ("group %d: stored = %d, counted = %lu\n",
656 i
, le16_to_cpu(gdp
->bg_free_blocks_count
), x
);
659 printk("ext2_count_free_blocks: stored = %lu, computed = %lu, %lu\n",
660 le32_to_cpu(es
->s_free_blocks_count
), desc_count
, bitmap_count
);
664 return le32_to_cpu(sb
->u
.ext2_sb
.s_es
->s_free_blocks_count
);
668 static inline int block_in_use (unsigned long block
,
669 struct super_block
* sb
,
672 return ext2_test_bit ((block
- le32_to_cpu(sb
->u
.ext2_sb
.s_es
->s_first_data_block
)) %
673 EXT2_BLOCKS_PER_GROUP(sb
), map
);
676 static int test_root(int a
, int b
)
689 void ext2_check_blocks_bitmap (struct super_block
* sb
)
691 struct buffer_head
* bh
;
692 struct ext2_super_block
* es
;
693 unsigned long desc_count
, bitmap_count
, x
;
694 unsigned long desc_blocks
;
696 struct ext2_group_desc
* gdp
;
700 es
= sb
->u
.ext2_sb
.s_es
;
704 desc_blocks
= (sb
->u
.ext2_sb
.s_groups_count
+ EXT2_DESC_PER_BLOCK(sb
) - 1) /
705 EXT2_DESC_PER_BLOCK(sb
);
706 for (i
= 0; i
< sb
->u
.ext2_sb
.s_groups_count
; i
++) {
707 gdp
= ext2_get_group_desc (sb
, i
, NULL
);
710 desc_count
+= le16_to_cpu(gdp
->bg_free_blocks_count
);
711 bitmap_nr
= load_block_bitmap (sb
, i
);
715 bh
= sb
->u
.ext2_sb
.s_block_bitmap
[bitmap_nr
];
717 if (!(le32_to_cpu(sb
->u
.ext2_sb
.s_feature_ro_compat
) &
718 EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER
) ||
719 (test_root(i
, 3) || test_root(i
, 5) || test_root(i
, 7))) {
720 if (!ext2_test_bit (0, bh
->b_data
))
721 ext2_error (sb
, "ext2_check_blocks_bitmap",
722 "Superblock in group %d "
723 "is marked free", i
);
725 for (j
= 0; j
< desc_blocks
; j
++)
726 if (!ext2_test_bit (j
+ 1, bh
->b_data
))
728 "ext2_check_blocks_bitmap",
729 "Descriptor block #%d in group "
730 "%d is marked free", j
, i
);
733 if (!block_in_use (le32_to_cpu(gdp
->bg_block_bitmap
), sb
, bh
->b_data
))
734 ext2_error (sb
, "ext2_check_blocks_bitmap",
735 "Block bitmap for group %d is marked free",
738 if (!block_in_use (le32_to_cpu(gdp
->bg_inode_bitmap
), sb
, bh
->b_data
))
739 ext2_error (sb
, "ext2_check_blocks_bitmap",
740 "Inode bitmap for group %d is marked free",
743 for (j
= 0; j
< sb
->u
.ext2_sb
.s_itb_per_group
; j
++)
744 if (!block_in_use (le32_to_cpu(gdp
->bg_inode_table
) + j
, sb
, bh
->b_data
))
745 ext2_error (sb
, "ext2_check_blocks_bitmap",
746 "Block #%d of the inode table in "
747 "group %d is marked free", j
, i
);
749 x
= ext2_count_free (bh
, sb
->s_blocksize
);
750 if (le16_to_cpu(gdp
->bg_free_blocks_count
) != x
)
751 ext2_error (sb
, "ext2_check_blocks_bitmap",
752 "Wrong free blocks count for group %d, "
753 "stored = %d, counted = %lu", i
,
754 le16_to_cpu(gdp
->bg_free_blocks_count
), x
);
757 if (le32_to_cpu(es
->s_free_blocks_count
) != bitmap_count
)
758 ext2_error (sb
, "ext2_check_blocks_bitmap",
759 "Wrong free blocks count in super block, "
760 "stored = %lu, counted = %lu",
761 (unsigned long) le32_to_cpu(es
->s_free_blocks_count
), bitmap_count
);