2 * linux/fs/ext2/balloc.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * Enhanced block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
10 * Big-endian to little-endian byte-swapping/bitmaps by
11 * David S. Miller (davem@caip.rutgers.edu), 1995
15 * balloc.c contains the blocks allocation and deallocation routines
19 * The free blocks are managed by bitmaps. A file system contains several
20 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
21 * block for inodes, N blocks for the inode table and data blocks.
23 * The file system contains group descriptors which are located after the
24 * super block. Each descriptor contains the number of the bitmap block and
25 * the free blocks count in the block. The descriptors are loaded in memory
26 * when a file system is mounted (see ext2_read_super).
30 #include <linux/ext2_fs.h>
31 #include <linux/stat.h>
32 #include <linux/sched.h>
33 #include <linux/string.h>
34 #include <linux/locks.h>
35 #include <linux/quotaops.h>
37 #include <asm/bitops.h>
38 #include <asm/byteorder.h>
40 #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
42 struct ext2_group_desc
* ext2_get_group_desc(struct super_block
* sb
,
43 unsigned int block_group
,
44 struct buffer_head
** bh
)
46 unsigned long group_desc
;
48 struct ext2_group_desc
* gdp
;
50 if (block_group
>= sb
->u
.ext2_sb
.s_groups_count
) {
51 ext2_error (sb
, "ext2_get_group_desc",
52 "block_group >= groups_count - "
53 "block_group = %d, groups_count = %lu",
54 block_group
, sb
->u
.ext2_sb
.s_groups_count
);
59 group_desc
= block_group
/ EXT2_DESC_PER_BLOCK(sb
);
60 desc
= block_group
% EXT2_DESC_PER_BLOCK(sb
);
61 if (!sb
->u
.ext2_sb
.s_group_desc
[group_desc
]) {
62 ext2_error (sb
, "ext2_get_group_desc",
63 "Group descriptor not loaded - "
64 "block_group = %d, group_desc = %lu, desc = %lu",
65 block_group
, group_desc
, desc
);
69 gdp
= (struct ext2_group_desc
*)
70 sb
->u
.ext2_sb
.s_group_desc
[group_desc
]->b_data
;
72 *bh
= sb
->u
.ext2_sb
.s_group_desc
[group_desc
];
77 * Read the bitmap for a given block_group, reading into the specified
78 * slot in the superblock's bitmap cache.
80 * Return >=0 on success or a -ve error code.
83 static int read_block_bitmap (struct super_block
* sb
,
84 unsigned int block_group
,
85 unsigned long bitmap_nr
)
87 struct ext2_group_desc
* gdp
;
88 struct buffer_head
* bh
= NULL
;
91 gdp
= ext2_get_group_desc (sb
, block_group
, NULL
);
95 bh
= bread (sb
->s_dev
, le32_to_cpu(gdp
->bg_block_bitmap
), sb
->s_blocksize
);
97 ext2_error (sb
, "read_block_bitmap",
98 "Cannot read block bitmap - "
99 "block_group = %d, block_bitmap = %lu",
100 block_group
, (unsigned long) gdp
->bg_block_bitmap
);
104 * On IO error, just leave a zero in the superblock's block pointer for
105 * this group. The IO will be retried next time.
108 sb
->u
.ext2_sb
.s_block_bitmap_number
[bitmap_nr
] = block_group
;
109 sb
->u
.ext2_sb
.s_block_bitmap
[bitmap_nr
] = bh
;
114 * load_block_bitmap loads the block bitmap for a blocks group
116 * It maintains a cache for the last bitmaps loaded. This cache is managed
117 * with a LRU algorithm.
120 * 1/ There is one cache per mounted file system.
121 * 2/ If the file system contains less than EXT2_MAX_GROUP_LOADED groups,
122 * this function reads the bitmap without maintaining a LRU cache.
124 * Return the slot used to store the bitmap, or a -ve error code.
126 static int load__block_bitmap (struct super_block
* sb
,
127 unsigned int block_group
)
129 int i
, j
, retval
= 0;
130 unsigned long block_bitmap_number
;
131 struct buffer_head
* block_bitmap
;
133 if (block_group
>= sb
->u
.ext2_sb
.s_groups_count
)
134 ext2_panic (sb
, "load_block_bitmap",
135 "block_group >= groups_count - "
136 "block_group = %d, groups_count = %lu",
137 block_group
, sb
->u
.ext2_sb
.s_groups_count
);
139 if (sb
->u
.ext2_sb
.s_groups_count
<= EXT2_MAX_GROUP_LOADED
) {
140 if (sb
->u
.ext2_sb
.s_block_bitmap
[block_group
]) {
141 if (sb
->u
.ext2_sb
.s_block_bitmap_number
[block_group
] ==
144 ext2_error (sb
, "load_block_bitmap",
145 "block_group != block_bitmap_number");
147 retval
= read_block_bitmap (sb
, block_group
, block_group
);
153 for (i
= 0; i
< sb
->u
.ext2_sb
.s_loaded_block_bitmaps
&&
154 sb
->u
.ext2_sb
.s_block_bitmap_number
[i
] != block_group
; i
++)
156 if (i
< sb
->u
.ext2_sb
.s_loaded_block_bitmaps
&&
157 sb
->u
.ext2_sb
.s_block_bitmap_number
[i
] == block_group
) {
158 block_bitmap_number
= sb
->u
.ext2_sb
.s_block_bitmap_number
[i
];
159 block_bitmap
= sb
->u
.ext2_sb
.s_block_bitmap
[i
];
160 for (j
= i
; j
> 0; j
--) {
161 sb
->u
.ext2_sb
.s_block_bitmap_number
[j
] =
162 sb
->u
.ext2_sb
.s_block_bitmap_number
[j
- 1];
163 sb
->u
.ext2_sb
.s_block_bitmap
[j
] =
164 sb
->u
.ext2_sb
.s_block_bitmap
[j
- 1];
166 sb
->u
.ext2_sb
.s_block_bitmap_number
[0] = block_bitmap_number
;
167 sb
->u
.ext2_sb
.s_block_bitmap
[0] = block_bitmap
;
170 * There's still one special case here --- if block_bitmap == 0
171 * then our last attempt to read the bitmap failed and we have
172 * just ended up caching that failure. Try again to read it.
175 retval
= read_block_bitmap (sb
, block_group
, 0);
177 if (sb
->u
.ext2_sb
.s_loaded_block_bitmaps
< EXT2_MAX_GROUP_LOADED
)
178 sb
->u
.ext2_sb
.s_loaded_block_bitmaps
++;
180 brelse (sb
->u
.ext2_sb
.s_block_bitmap
[EXT2_MAX_GROUP_LOADED
- 1]);
181 for (j
= sb
->u
.ext2_sb
.s_loaded_block_bitmaps
- 1; j
> 0; j
--) {
182 sb
->u
.ext2_sb
.s_block_bitmap_number
[j
] =
183 sb
->u
.ext2_sb
.s_block_bitmap_number
[j
- 1];
184 sb
->u
.ext2_sb
.s_block_bitmap
[j
] =
185 sb
->u
.ext2_sb
.s_block_bitmap
[j
- 1];
187 retval
= read_block_bitmap (sb
, block_group
, 0);
193 * Load the block bitmap for a given block group. First of all do a couple
194 * of fast lookups for common cases and then pass the request onto the guts
195 * of the bitmap loader.
197 * Return the slot number of the group in the superblock bitmap cache's on
198 * success, or a -ve error code.
200 * There is still one inconsistancy here --- if the number of groups in this
201 * filesystems is <= EXT2_MAX_GROUP_LOADED, then we have no way of
202 * differentiating between a group for which we have never performed a bitmap
203 * IO request, and a group for which the last bitmap read request failed.
205 static inline int load_block_bitmap (struct super_block
* sb
,
206 unsigned int block_group
)
211 * Do the lookup for the slot. First of all, check if we're asking
212 * for the same slot as last time, and did we succeed that last time?
214 if (sb
->u
.ext2_sb
.s_loaded_block_bitmaps
> 0 &&
215 sb
->u
.ext2_sb
.s_block_bitmap_number
[0] == block_group
&&
216 sb
->u
.ext2_sb
.s_block_bitmap
[block_group
]) {
220 * Or can we do a fast lookup based on a loaded group on a filesystem
221 * small enough to be mapped directly into the superblock?
223 else if (sb
->u
.ext2_sb
.s_groups_count
<= EXT2_MAX_GROUP_LOADED
&&
224 sb
->u
.ext2_sb
.s_block_bitmap_number
[block_group
] == block_group
&&
225 sb
->u
.ext2_sb
.s_block_bitmap
[block_group
]) {
229 * If not, then do a full lookup for this block group.
232 slot
= load__block_bitmap (sb
, block_group
);
236 * <0 means we just got an error
242 * If it's a valid slot, we may still have cached a previous IO error,
243 * in which case the bh in the superblock cache will be zero.
245 if (!sb
->u
.ext2_sb
.s_block_bitmap
[slot
])
249 * Must have been read in OK to get this far.
254 void ext2_free_blocks (const struct inode
* inode
, unsigned long block
,
257 struct buffer_head
* bh
;
258 struct buffer_head
* bh2
;
259 unsigned long block_group
;
263 unsigned long overflow
;
264 struct super_block
* sb
;
265 struct ext2_group_desc
* gdp
;
266 struct ext2_super_block
* es
;
270 printk ("ext2_free_blocks: nonexistent device");
274 es
= sb
->u
.ext2_sb
.s_es
;
275 if (block
< le32_to_cpu(es
->s_first_data_block
) ||
276 (block
+ count
) > le32_to_cpu(es
->s_blocks_count
)) {
277 ext2_error (sb
, "ext2_free_blocks",
278 "Freeing blocks not in datazone - "
279 "block = %lu, count = %lu", block
, count
);
283 ext2_debug ("freeing block %lu\n", block
);
287 block_group
= (block
- le32_to_cpu(es
->s_first_data_block
)) /
288 EXT2_BLOCKS_PER_GROUP(sb
);
289 bit
= (block
- le32_to_cpu(es
->s_first_data_block
)) %
290 EXT2_BLOCKS_PER_GROUP(sb
);
292 * Check to see if we are freeing blocks across a group
295 if (bit
+ count
> EXT2_BLOCKS_PER_GROUP(sb
)) {
296 overflow
= bit
+ count
- EXT2_BLOCKS_PER_GROUP(sb
);
299 bitmap_nr
= load_block_bitmap (sb
, block_group
);
303 bh
= sb
->u
.ext2_sb
.s_block_bitmap
[bitmap_nr
];
304 gdp
= ext2_get_group_desc (sb
, block_group
, &bh2
);
308 if (test_opt (sb
, CHECK_STRICT
) &&
309 (in_range (le32_to_cpu(gdp
->bg_block_bitmap
), block
, count
) ||
310 in_range (le32_to_cpu(gdp
->bg_inode_bitmap
), block
, count
) ||
311 in_range (block
, le32_to_cpu(gdp
->bg_inode_table
),
312 sb
->u
.ext2_sb
.s_itb_per_group
) ||
313 in_range (block
+ count
- 1, le32_to_cpu(gdp
->bg_inode_table
),
314 sb
->u
.ext2_sb
.s_itb_per_group
)))
315 ext2_panic (sb
, "ext2_free_blocks",
316 "Freeing blocks in system zones - "
317 "Block = %lu, count = %lu",
320 for (i
= 0; i
< count
; i
++) {
321 if (!ext2_clear_bit (bit
+ i
, bh
->b_data
))
322 ext2_warning (sb
, "ext2_free_blocks",
323 "bit already cleared for block %lu",
326 DQUOT_FREE_BLOCK(sb
, inode
, 1);
327 gdp
->bg_free_blocks_count
=
328 cpu_to_le16(le16_to_cpu(gdp
->bg_free_blocks_count
)+1);
329 es
->s_free_blocks_count
=
330 cpu_to_le32(le32_to_cpu(es
->s_free_blocks_count
)+1);
334 mark_buffer_dirty(bh2
, 1);
335 mark_buffer_dirty(sb
->u
.ext2_sb
.s_sbh
, 1);
337 mark_buffer_dirty(bh
, 1);
338 if (sb
->s_flags
& MS_SYNCHRONOUS
) {
339 ll_rw_block (WRITE
, 1, &bh
);
354 * ext2_new_block uses a goal block to assist allocation. If the goal is
355 * free, or there is a free block within 32 blocks of the goal, that block
356 * is allocated. Otherwise a forward search is made for a free block; within
357 * each block group the search first looks for an entire free byte in the block
358 * bitmap, and then for any free bit if that fails.
360 int ext2_new_block (const struct inode
* inode
, unsigned long goal
,
361 u32
* prealloc_count
, u32
* prealloc_block
, int * err
)
363 struct buffer_head
* bh
;
364 struct buffer_head
* bh2
;
368 struct super_block
* sb
;
369 struct ext2_group_desc
* gdp
;
370 struct ext2_super_block
* es
;
374 static int goal_hits
= 0, goal_attempts
= 0;
378 printk ("ext2_new_block: nonexistent device");
383 es
= sb
->u
.ext2_sb
.s_es
;
384 if (le32_to_cpu(es
->s_free_blocks_count
) <= le32_to_cpu(es
->s_r_blocks_count
) &&
385 ((sb
->u
.ext2_sb
.s_resuid
!= current
->fsuid
) &&
386 (sb
->u
.ext2_sb
.s_resgid
== 0 ||
387 !in_group_p (sb
->u
.ext2_sb
.s_resgid
)) &&
388 !capable(CAP_SYS_RESOURCE
))) {
393 ext2_debug ("goal=%lu.\n", goal
);
397 * First, test whether the goal block is free.
399 if (goal
< le32_to_cpu(es
->s_first_data_block
) ||
400 goal
>= le32_to_cpu(es
->s_blocks_count
))
401 goal
= le32_to_cpu(es
->s_first_data_block
);
402 i
= (goal
- le32_to_cpu(es
->s_first_data_block
)) / EXT2_BLOCKS_PER_GROUP(sb
);
403 gdp
= ext2_get_group_desc (sb
, i
, &bh2
);
407 if (le16_to_cpu(gdp
->bg_free_blocks_count
) > 0) {
408 j
= ((goal
- le32_to_cpu(es
->s_first_data_block
)) % EXT2_BLOCKS_PER_GROUP(sb
));
413 bitmap_nr
= load_block_bitmap (sb
, i
);
417 bh
= sb
->u
.ext2_sb
.s_block_bitmap
[bitmap_nr
];
419 ext2_debug ("goal is at %d:%d.\n", i
, j
);
421 if (!ext2_test_bit(j
, bh
->b_data
)) {
424 ext2_debug ("goal bit allocated.\n");
430 * The goal was occupied; search forward for a free
431 * block within the next XX blocks.
433 * end_goal is more or less random, but it has to be
434 * less than EXT2_BLOCKS_PER_GROUP. Aligning up to the
435 * next 64-bit boundary is simple..
437 int end_goal
= (j
+ 63) & ~63;
438 j
= ext2_find_next_zero_bit(bh
->b_data
, end_goal
, j
);
443 ext2_debug ("Bit not found near goal\n");
446 * There has been no free block found in the near vicinity
447 * of the goal: do a search forward through the block groups,
448 * searching in each group first for an entire free byte in
449 * the bitmap and then for any free bit.
451 * Search first in the remainder of the current group; then,
452 * cyclicly search through the rest of the groups.
454 p
= ((char *) bh
->b_data
) + (j
>> 3);
455 r
= memscan(p
, 0, (EXT2_BLOCKS_PER_GROUP(sb
) - j
+ 7) >> 3);
456 k
= (r
- ((char *) bh
->b_data
)) << 3;
457 if (k
< EXT2_BLOCKS_PER_GROUP(sb
)) {
462 k
= ext2_find_next_zero_bit ((unsigned long *) bh
->b_data
,
463 EXT2_BLOCKS_PER_GROUP(sb
),
465 if (k
< EXT2_BLOCKS_PER_GROUP(sb
)) {
471 ext2_debug ("Bit not found in block group %d.\n", i
);
474 * Now search the rest of the groups. We assume that
475 * i and gdp correctly point to the last group visited.
477 for (k
= 0; k
< sb
->u
.ext2_sb
.s_groups_count
; k
++) {
479 if (i
>= sb
->u
.ext2_sb
.s_groups_count
)
481 gdp
= ext2_get_group_desc (sb
, i
, &bh2
);
487 if (le16_to_cpu(gdp
->bg_free_blocks_count
) > 0)
490 if (k
>= sb
->u
.ext2_sb
.s_groups_count
) {
494 bitmap_nr
= load_block_bitmap (sb
, i
);
498 bh
= sb
->u
.ext2_sb
.s_block_bitmap
[bitmap_nr
];
499 r
= memscan(bh
->b_data
, 0, EXT2_BLOCKS_PER_GROUP(sb
) >> 3);
500 j
= (r
- bh
->b_data
) << 3;
501 if (j
< EXT2_BLOCKS_PER_GROUP(sb
))
504 j
= ext2_find_first_zero_bit ((unsigned long *) bh
->b_data
,
505 EXT2_BLOCKS_PER_GROUP(sb
));
506 if (j
>= EXT2_BLOCKS_PER_GROUP(sb
)) {
507 ext2_error (sb
, "ext2_new_block",
508 "Free blocks count corrupted for block group %d", i
);
515 * We have succeeded in finding a free byte in the block
516 * bitmap. Now search backwards up to 7 bits to find the
517 * start of this group of free blocks.
519 for (k
= 0; k
< 7 && j
> 0 && !ext2_test_bit (j
- 1, bh
->b_data
); k
++, j
--);
523 ext2_debug ("using block group %d(%d)\n", i
, gdp
->bg_free_blocks_count
);
526 * Check quota for allocation of this block.
528 if(DQUOT_ALLOC_BLOCK(sb
, inode
, 1)) {
534 tmp
= j
+ i
* EXT2_BLOCKS_PER_GROUP(sb
) + le32_to_cpu(es
->s_first_data_block
);
536 if (test_opt (sb
, CHECK_STRICT
) &&
537 (tmp
== le32_to_cpu(gdp
->bg_block_bitmap
) ||
538 tmp
== le32_to_cpu(gdp
->bg_inode_bitmap
) ||
539 in_range (tmp
, le32_to_cpu(gdp
->bg_inode_table
), sb
->u
.ext2_sb
.s_itb_per_group
)))
540 ext2_panic (sb
, "ext2_new_block",
541 "Allocating block in system zone - "
544 if (ext2_set_bit (j
, bh
->b_data
)) {
545 ext2_warning (sb
, "ext2_new_block",
546 "bit already set for block %d", j
);
547 DQUOT_FREE_BLOCK(sb
, inode
, 1);
551 ext2_debug ("found bit %d\n", j
);
554 * Do block preallocation now if required.
556 #ifdef EXT2_PREALLOCATE
557 if (prealloc_block
) {
560 prealloc_goal
= es
->s_prealloc_blocks
?
561 es
->s_prealloc_blocks
: EXT2_DEFAULT_PREALLOC_BLOCKS
;
564 *prealloc_block
= tmp
+ 1;
566 k
< prealloc_goal
&& (j
+ k
) < EXT2_BLOCKS_PER_GROUP(sb
);
568 if (DQUOT_PREALLOC_BLOCK(sb
, inode
, 1))
570 if (ext2_set_bit (j
+ k
, bh
->b_data
)) {
571 DQUOT_FREE_BLOCK(sb
, inode
, 1);
576 gdp
->bg_free_blocks_count
=
577 cpu_to_le16(le16_to_cpu(gdp
->bg_free_blocks_count
) -
579 es
->s_free_blocks_count
=
580 cpu_to_le32(le32_to_cpu(es
->s_free_blocks_count
) -
582 ext2_debug ("Preallocated a further %lu bits.\n",
589 mark_buffer_dirty(bh
, 1);
590 if (sb
->s_flags
& MS_SYNCHRONOUS
) {
591 ll_rw_block (WRITE
, 1, &bh
);
595 if (j
>= le32_to_cpu(es
->s_blocks_count
)) {
596 ext2_error (sb
, "ext2_new_block",
597 "block >= blocks count - "
598 "block_group = %d, block=%d", i
, j
);
602 if (!(bh
= getblk (sb
->s_dev
, j
, sb
->s_blocksize
))) {
603 ext2_error (sb
, "ext2_new_block", "cannot get block %d", j
);
607 memset(bh
->b_data
, 0, sb
->s_blocksize
);
608 mark_buffer_uptodate(bh
, 1);
609 mark_buffer_dirty(bh
, 1);
612 ext2_debug ("allocating block %d. "
613 "Goal hits %d of %d.\n", j
, goal_hits
, goal_attempts
);
615 gdp
->bg_free_blocks_count
= cpu_to_le16(le16_to_cpu(gdp
->bg_free_blocks_count
) - 1);
616 mark_buffer_dirty(bh2
, 1);
617 es
->s_free_blocks_count
= cpu_to_le32(le32_to_cpu(es
->s_free_blocks_count
) - 1);
618 mark_buffer_dirty(sb
->u
.ext2_sb
.s_sbh
, 1);
631 unsigned long ext2_count_free_blocks (struct super_block
* sb
)
634 struct ext2_super_block
* es
;
635 unsigned long desc_count
, bitmap_count
, x
;
637 struct ext2_group_desc
* gdp
;
641 es
= sb
->u
.ext2_sb
.s_es
;
645 for (i
= 0; i
< sb
->u
.ext2_sb
.s_groups_count
; i
++) {
646 gdp
= ext2_get_group_desc (sb
, i
, NULL
);
649 desc_count
+= le16_to_cpu(gdp
->bg_free_blocks_count
);
650 bitmap_nr
= load_block_bitmap (sb
, i
);
654 x
= ext2_count_free (sb
->u
.ext2_sb
.s_block_bitmap
[bitmap_nr
],
656 printk ("group %d: stored = %d, counted = %lu\n",
657 i
, le16_to_cpu(gdp
->bg_free_blocks_count
), x
);
660 printk("ext2_count_free_blocks: stored = %lu, computed = %lu, %lu\n",
661 le32_to_cpu(es
->s_free_blocks_count
), desc_count
, bitmap_count
);
665 return le32_to_cpu(sb
->u
.ext2_sb
.s_es
->s_free_blocks_count
);
669 static inline int block_in_use (unsigned long block
,
670 struct super_block
* sb
,
673 return ext2_test_bit ((block
- le32_to_cpu(sb
->u
.ext2_sb
.s_es
->s_first_data_block
)) %
674 EXT2_BLOCKS_PER_GROUP(sb
), map
);
677 static int test_root(int a
, int b
)
690 int ext2_group_sparse(int group
)
692 return (test_root(group
, 3) || test_root(group
, 5) ||
693 test_root(group
, 7));
696 void ext2_check_blocks_bitmap (struct super_block
* sb
)
698 struct buffer_head
* bh
;
699 struct ext2_super_block
* es
;
700 unsigned long desc_count
, bitmap_count
, x
;
701 unsigned long desc_blocks
;
703 struct ext2_group_desc
* gdp
;
707 es
= sb
->u
.ext2_sb
.s_es
;
711 desc_blocks
= (sb
->u
.ext2_sb
.s_groups_count
+ EXT2_DESC_PER_BLOCK(sb
) - 1) /
712 EXT2_DESC_PER_BLOCK(sb
);
713 for (i
= 0; i
< sb
->u
.ext2_sb
.s_groups_count
; i
++) {
714 gdp
= ext2_get_group_desc (sb
, i
, NULL
);
717 desc_count
+= le16_to_cpu(gdp
->bg_free_blocks_count
);
718 bitmap_nr
= load_block_bitmap (sb
, i
);
722 bh
= sb
->u
.ext2_sb
.s_block_bitmap
[bitmap_nr
];
724 if (!(sb
->u
.ext2_sb
.s_feature_ro_compat
&
725 EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER
) ||
726 ext2_group_sparse(i
)) {
727 if (!ext2_test_bit (0, bh
->b_data
))
728 ext2_error (sb
, "ext2_check_blocks_bitmap",
729 "Superblock in group %d "
730 "is marked free", i
);
732 for (j
= 0; j
< desc_blocks
; j
++)
733 if (!ext2_test_bit (j
+ 1, bh
->b_data
))
735 "ext2_check_blocks_bitmap",
736 "Descriptor block #%d in group "
737 "%d is marked free", j
, i
);
740 if (!block_in_use (le32_to_cpu(gdp
->bg_block_bitmap
), sb
, bh
->b_data
))
741 ext2_error (sb
, "ext2_check_blocks_bitmap",
742 "Block bitmap for group %d is marked free",
745 if (!block_in_use (le32_to_cpu(gdp
->bg_inode_bitmap
), sb
, bh
->b_data
))
746 ext2_error (sb
, "ext2_check_blocks_bitmap",
747 "Inode bitmap for group %d is marked free",
750 for (j
= 0; j
< sb
->u
.ext2_sb
.s_itb_per_group
; j
++)
751 if (!block_in_use (le32_to_cpu(gdp
->bg_inode_table
) + j
, sb
, bh
->b_data
))
752 ext2_error (sb
, "ext2_check_blocks_bitmap",
753 "Block #%d of the inode table in "
754 "group %d is marked free", j
, i
);
756 x
= ext2_count_free (bh
, sb
->s_blocksize
);
757 if (le16_to_cpu(gdp
->bg_free_blocks_count
) != x
)
758 ext2_error (sb
, "ext2_check_blocks_bitmap",
759 "Wrong free blocks count for group %d, "
760 "stored = %d, counted = %lu", i
,
761 le16_to_cpu(gdp
->bg_free_blocks_count
), x
);
764 if (le32_to_cpu(es
->s_free_blocks_count
) != bitmap_count
)
765 ext2_error (sb
, "ext2_check_blocks_bitmap",
766 "Wrong free blocks count in super block, "
767 "stored = %lu, counted = %lu",
768 (unsigned long) le32_to_cpu(es
->s_free_blocks_count
), bitmap_count
);