2 * linux/fs/ext3/balloc.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
10 * Big-endian to little-endian byte-swapping/bitmaps by
11 * David S. Miller (davem@caip.rutgers.edu), 1995
14 #include <linux/config.h>
15 #include <linux/time.h>
17 #include <linux/jbd.h>
18 #include <linux/ext3_fs.h>
19 #include <linux/ext3_jbd.h>
20 #include <linux/quotaops.h>
21 #include <linux/buffer_head.h>
24 * balloc.c contains the blocks allocation and deallocation routines
28 * The free blocks are managed by bitmaps. A file system contains several
29 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
30 * block for inodes, N blocks for the inode table and data blocks.
32 * The file system contains group descriptors which are located after the
33 * super block. Each descriptor contains the number of the bitmap block and
34 * the free blocks count in the block. The descriptors are loaded in memory
35 * when a file system is mounted (see ext3_read_super).
39 #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
41 struct ext3_group_desc
* ext3_get_group_desc(struct super_block
* sb
,
42 unsigned int block_group
,
43 struct buffer_head
** bh
)
45 unsigned long group_desc
;
47 struct ext3_group_desc
* gdp
;
49 if (block_group
>= EXT3_SB(sb
)->s_groups_count
) {
50 ext3_error (sb
, "ext3_get_group_desc",
51 "block_group >= groups_count - "
52 "block_group = %d, groups_count = %lu",
53 block_group
, EXT3_SB(sb
)->s_groups_count
);
58 group_desc
= block_group
/ EXT3_DESC_PER_BLOCK(sb
);
59 desc
= block_group
% EXT3_DESC_PER_BLOCK(sb
);
60 if (!EXT3_SB(sb
)->s_group_desc
[group_desc
]) {
61 ext3_error (sb
, "ext3_get_group_desc",
62 "Group descriptor not loaded - "
63 "block_group = %d, group_desc = %lu, desc = %lu",
64 block_group
, group_desc
, desc
);
68 gdp
= (struct ext3_group_desc
*)
69 EXT3_SB(sb
)->s_group_desc
[group_desc
]->b_data
;
71 *bh
= EXT3_SB(sb
)->s_group_desc
[group_desc
];
76 * Read the bitmap for a given block_group, reading into the specified
77 * slot in the superblock's bitmap cache.
79 * Return buffer_head on success or NULL in case of failure.
81 static struct buffer_head
*
82 read_block_bitmap(struct super_block
*sb
, unsigned int block_group
)
84 struct ext3_group_desc
* desc
;
85 struct buffer_head
* bh
= NULL
;
87 desc
= ext3_get_group_desc (sb
, block_group
, NULL
);
90 bh
= sb_bread(sb
, le32_to_cpu(desc
->bg_block_bitmap
));
92 ext3_error (sb
, "read_block_bitmap",
93 "Cannot read block bitmap - "
94 "block_group = %d, block_bitmap = %u",
95 block_group
, le32_to_cpu(desc
->bg_block_bitmap
));
100 /* Free given blocks, update quota and i_blocks field */
101 void ext3_free_blocks (handle_t
*handle
, struct inode
* inode
,
102 unsigned long block
, unsigned long count
)
104 struct buffer_head
*bitmap_bh
= NULL
;
105 struct buffer_head
*gd_bh
;
106 unsigned long block_group
;
109 unsigned long overflow
;
110 struct super_block
* sb
;
111 struct ext3_group_desc
* gdp
;
112 struct ext3_super_block
* es
;
113 struct ext3_sb_info
*sbi
;
115 int dquot_freed_blocks
= 0;
119 printk ("ext3_free_blocks: nonexistent device");
123 es
= EXT3_SB(sb
)->s_es
;
124 if (block
< le32_to_cpu(es
->s_first_data_block
) ||
125 block
+ count
< block
||
126 block
+ count
> le32_to_cpu(es
->s_blocks_count
)) {
127 ext3_error (sb
, "ext3_free_blocks",
128 "Freeing blocks not in datazone - "
129 "block = %lu, count = %lu", block
, count
);
133 ext3_debug ("freeing block %lu\n", block
);
137 block_group
= (block
- le32_to_cpu(es
->s_first_data_block
)) /
138 EXT3_BLOCKS_PER_GROUP(sb
);
139 bit
= (block
- le32_to_cpu(es
->s_first_data_block
)) %
140 EXT3_BLOCKS_PER_GROUP(sb
);
142 * Check to see if we are freeing blocks across a group
145 if (bit
+ count
> EXT3_BLOCKS_PER_GROUP(sb
)) {
146 overflow
= bit
+ count
- EXT3_BLOCKS_PER_GROUP(sb
);
150 bitmap_bh
= read_block_bitmap(sb
, block_group
);
153 gdp
= ext3_get_group_desc (sb
, block_group
, &gd_bh
);
157 if (in_range (le32_to_cpu(gdp
->bg_block_bitmap
), block
, count
) ||
158 in_range (le32_to_cpu(gdp
->bg_inode_bitmap
), block
, count
) ||
159 in_range (block
, le32_to_cpu(gdp
->bg_inode_table
),
160 EXT3_SB(sb
)->s_itb_per_group
) ||
161 in_range (block
+ count
- 1, le32_to_cpu(gdp
->bg_inode_table
),
162 EXT3_SB(sb
)->s_itb_per_group
))
163 ext3_error (sb
, "ext3_free_blocks",
164 "Freeing blocks in system zones - "
165 "Block = %lu, count = %lu",
169 * We are about to start releasing blocks in the bitmap,
170 * so we need undo access.
172 /* @@@ check errors */
173 BUFFER_TRACE(bitmap_bh
, "getting undo access");
174 err
= ext3_journal_get_undo_access(handle
, bitmap_bh
, NULL
);
179 * We are about to modify some metadata. Call the journal APIs
180 * to unshare ->b_data if a currently-committing transaction is
183 BUFFER_TRACE(gd_bh
, "get_write_access");
184 err
= ext3_journal_get_write_access(handle
, gd_bh
);
188 jbd_lock_bh_state(bitmap_bh
);
190 for (i
= 0; i
< count
; i
++) {
192 * An HJ special. This is expensive...
194 #ifdef CONFIG_JBD_DEBUG
195 jbd_unlock_bh_state(bitmap_bh
);
197 struct buffer_head
*debug_bh
;
198 debug_bh
= sb_find_get_block(sb
, block
+ i
);
200 BUFFER_TRACE(debug_bh
, "Deleted!");
201 if (!bh2jh(bitmap_bh
)->b_committed_data
)
202 BUFFER_TRACE(debug_bh
,
203 "No commited data in bitmap");
204 BUFFER_TRACE2(debug_bh
, bitmap_bh
, "bitmap");
208 jbd_lock_bh_state(bitmap_bh
);
210 /* @@@ This prevents newly-allocated data from being
211 * freed and then reallocated within the same
214 * Ideally we would want to allow that to happen, but to
215 * do so requires making journal_forget() capable of
216 * revoking the queued write of a data block, which
217 * implies blocking on the journal lock. *forget()
218 * cannot block due to truncate races.
220 * Eventually we can fix this by making journal_forget()
221 * return a status indicating whether or not it was able
222 * to revoke the buffer. On successful revoke, it is
223 * safe not to set the allocation bit in the committed
224 * bitmap, because we know that there is no outstanding
225 * activity on the buffer any more and so it is safe to
228 BUFFER_TRACE(bitmap_bh
, "set in b_committed_data");
229 J_ASSERT_BH(bitmap_bh
,
230 bh2jh(bitmap_bh
)->b_committed_data
!= NULL
);
231 ext3_set_bit_atomic(sb_bgl_lock(sbi
, block_group
), bit
+ i
,
232 bh2jh(bitmap_bh
)->b_committed_data
);
235 * We clear the bit in the bitmap after setting the committed
236 * data bit, because this is the reverse order to that which
237 * the allocator uses.
239 BUFFER_TRACE(bitmap_bh
, "clear bit");
240 if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi
, block_group
),
241 bit
+ i
, bitmap_bh
->b_data
)) {
242 jbd_unlock_bh_state(bitmap_bh
);
243 ext3_error(sb
, __FUNCTION__
,
244 "bit already cleared for block %lu", block
+ i
);
245 jbd_lock_bh_state(bitmap_bh
);
246 BUFFER_TRACE(bitmap_bh
, "bit already cleared");
248 dquot_freed_blocks
++;
251 jbd_unlock_bh_state(bitmap_bh
);
253 spin_lock(sb_bgl_lock(sbi
, block_group
));
254 gdp
->bg_free_blocks_count
=
255 cpu_to_le16(le16_to_cpu(gdp
->bg_free_blocks_count
) +
257 spin_unlock(sb_bgl_lock(sbi
, block_group
));
258 percpu_counter_mod(&sbi
->s_freeblocks_counter
, count
);
260 /* We dirtied the bitmap block */
261 BUFFER_TRACE(bitmap_bh
, "dirtied bitmap block");
262 err
= ext3_journal_dirty_metadata(handle
, bitmap_bh
);
264 /* And the group descriptor block */
265 BUFFER_TRACE(gd_bh
, "dirtied group descriptor block");
266 ret
= ext3_journal_dirty_metadata(handle
, gd_bh
);
269 if (overflow
&& !err
) {
277 ext3_std_error(sb
, err
);
278 if (dquot_freed_blocks
)
279 DQUOT_FREE_BLOCK(inode
, dquot_freed_blocks
);
284 * For ext3 allocations, we must not reuse any blocks which are
285 * allocated in the bitmap buffer's "last committed data" copy. This
286 * prevents deletes from freeing up the page for reuse until we have
287 * committed the delete transaction.
289 * If we didn't do this, then deleting something and reallocating it as
290 * data would allow the old block to be overwritten before the
291 * transaction committed (because we force data to disk before commit).
292 * This would lead to corruption if we crashed between overwriting the
293 * data and committing the delete.
295 * @@@ We may want to make this allocation behaviour conditional on
296 * data-writes at some point, and disable it for metadata allocations or
299 static inline int ext3_test_allocatable(int nr
, struct buffer_head
*bh
)
302 struct journal_head
*jh
= bh2jh(bh
);
304 if (ext3_test_bit(nr
, bh
->b_data
))
307 jbd_lock_bh_state(bh
);
308 if (!jh
->b_committed_data
)
311 ret
= !ext3_test_bit(nr
, jh
->b_committed_data
);
312 jbd_unlock_bh_state(bh
);
317 * Find an allocatable block in a bitmap. We honour both the bitmap and
318 * its last-committed copy (if that exists), and perform the "most
319 * appropriate allocation" algorithm of looking for a free block near
320 * the initial goal; then for a free byte somewhere in the bitmap; then
321 * for any free bit in the bitmap.
324 find_next_usable_block(int start
, struct buffer_head
*bh
, int maxblocks
)
328 struct journal_head
*jh
= bh2jh(bh
);
332 * The goal was occupied; search forward for a free
333 * block within the next XX blocks.
335 * end_goal is more or less random, but it has to be
336 * less than EXT3_BLOCKS_PER_GROUP. Aligning up to the
337 * next 64-bit boundary is simple..
339 int end_goal
= (start
+ 63) & ~63;
340 here
= ext3_find_next_zero_bit(bh
->b_data
, end_goal
, start
);
341 if (here
< end_goal
&& ext3_test_allocatable(here
, bh
))
343 ext3_debug("Bit not found near goal\n");
350 p
= ((char *)bh
->b_data
) + (here
>> 3);
351 r
= memscan(p
, 0, (maxblocks
- here
+ 7) >> 3);
352 next
= (r
- ((char *)bh
->b_data
)) << 3;
354 if (next
< maxblocks
&& ext3_test_allocatable(next
, bh
))
358 * The bitmap search --- search forward alternately through the actual
359 * bitmap and the last-committed copy until we find a bit free in
362 while (here
< maxblocks
) {
363 next
= ext3_find_next_zero_bit(bh
->b_data
, maxblocks
, here
);
364 if (next
>= maxblocks
)
366 if (ext3_test_allocatable(next
, bh
))
368 jbd_lock_bh_state(bh
);
369 if (jh
->b_committed_data
)
370 here
= ext3_find_next_zero_bit(jh
->b_committed_data
,
372 jbd_unlock_bh_state(bh
);
378 * We think we can allocate this block in this bitmap. Try to set the bit.
379 * If that succeeds then check that nobody has allocated and then freed the
380 * block since we saw that is was not marked in b_committed_data. If it _was_
381 * allocated and freed then clear the bit in the bitmap again and return
385 claim_block(spinlock_t
*lock
, int block
, struct buffer_head
*bh
)
387 struct journal_head
*jh
= bh2jh(bh
);
390 if (ext3_set_bit_atomic(lock
, block
, bh
->b_data
))
392 jbd_lock_bh_state(bh
);
393 if (jh
->b_committed_data
&& ext3_test_bit(block
,jh
->b_committed_data
)) {
394 ext3_clear_bit_atomic(lock
, block
, bh
->b_data
);
399 jbd_unlock_bh_state(bh
);
404 * If we failed to allocate the desired block then we may end up crossing to a
405 * new bitmap. In that case we must release write access to the old one via
406 * ext3_journal_release_buffer(), else we'll run out of credits.
409 ext3_try_to_allocate(struct super_block
*sb
, handle_t
*handle
, int group
,
410 struct buffer_head
*bitmap_bh
, int goal
, int *errp
)
419 * Make sure we use undo access for the bitmap, because it is critical
420 * that we do the frozen_data COW on bitmap buffers in all cases even
421 * if the buffer is in BJ_Forget state in the committing transaction.
423 BUFFER_TRACE(bitmap_bh
, "get undo access for new block");
424 fatal
= ext3_journal_get_undo_access(handle
, bitmap_bh
, &credits
);
431 if (goal
< 0 || !ext3_test_allocatable(goal
, bitmap_bh
)) {
432 goal
= find_next_usable_block(goal
, bitmap_bh
,
433 EXT3_BLOCKS_PER_GROUP(sb
));
437 for (i
= 0; i
< 7 && goal
> 0 &&
438 ext3_test_allocatable(goal
- 1, bitmap_bh
);
442 if (!claim_block(sb_bgl_lock(EXT3_SB(sb
), group
), goal
, bitmap_bh
)) {
444 * The block was allocated by another thread, or it was
445 * allocated and then freed by another thread
448 if (goal
>= EXT3_BLOCKS_PER_GROUP(sb
))
453 BUFFER_TRACE(bitmap_bh
, "journal_dirty_metadata for bitmap block");
454 fatal
= ext3_journal_dirty_metadata(handle
, bitmap_bh
);
462 BUFFER_TRACE(bitmap_bh
, "journal_release_buffer");
463 ext3_journal_release_buffer(handle
, bitmap_bh
, credits
);
468 static int ext3_has_free_blocks(struct ext3_sb_info
*sbi
)
470 int free_blocks
, root_blocks
;
472 free_blocks
= percpu_counter_read_positive(&sbi
->s_freeblocks_counter
);
473 root_blocks
= le32_to_cpu(sbi
->s_es
->s_r_blocks_count
);
474 if (free_blocks
< root_blocks
+ 1 && !capable(CAP_SYS_RESOURCE
) &&
475 sbi
->s_resuid
!= current
->fsuid
&&
476 (sbi
->s_resgid
== 0 || !in_group_p (sbi
->s_resgid
))) {
483 * ext3_should_retry_alloc() is called when ENOSPC is returned, and if
484 * it is profitable to retry the operation, this function will wait
485 * for the current or commiting transaction to complete, and then
488 int ext3_should_retry_alloc(struct super_block
*sb
, int *retries
)
490 if (!ext3_has_free_blocks(EXT3_SB(sb
)) || (*retries
)++ > 3)
493 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb
->s_id
);
495 return journal_force_commit_nested(EXT3_SB(sb
)->s_journal
);
499 * ext3_new_block uses a goal block to assist allocation. If the goal is
500 * free, or there is a free block within 32 blocks of the goal, that block
501 * is allocated. Otherwise a forward search is made for a free block; within
502 * each block group the search first looks for an entire free byte in the block
503 * bitmap, and then for any free bit if that fails.
504 * This function also updates quota and i_blocks field.
507 ext3_new_block(handle_t
*handle
, struct inode
*inode
, unsigned long goal
,
508 u32
*prealloc_count
, u32
*prealloc_block
, int *errp
)
510 struct buffer_head
*bitmap_bh
= NULL
; /* bh */
511 struct buffer_head
*gdp_bh
; /* bh2 */
512 int group_no
; /* i */
513 int ret_block
; /* j */
514 int bgi
; /* blockgroup iteration index */
515 int target_block
; /* tmp */
517 int performed_allocation
= 0;
519 struct super_block
*sb
;
520 struct ext3_group_desc
*gdp
;
521 struct ext3_super_block
*es
;
522 struct ext3_sb_info
*sbi
;
524 static int goal_hits
, goal_attempts
;
529 printk("ext3_new_block: nonexistent device");
534 * Check quota for allocation of this block.
536 if (DQUOT_ALLOC_BLOCK(inode
, 1)) {
542 es
= EXT3_SB(sb
)->s_es
;
543 ext3_debug("goal=%lu.\n", goal
);
545 if (!ext3_has_free_blocks(sbi
)) {
551 * First, test whether the goal block is free.
553 if (goal
< le32_to_cpu(es
->s_first_data_block
) ||
554 goal
>= le32_to_cpu(es
->s_blocks_count
))
555 goal
= le32_to_cpu(es
->s_first_data_block
);
556 group_no
= (goal
- le32_to_cpu(es
->s_first_data_block
)) /
557 EXT3_BLOCKS_PER_GROUP(sb
);
558 gdp
= ext3_get_group_desc(sb
, group_no
, &gdp_bh
);
562 free_blocks
= le16_to_cpu(gdp
->bg_free_blocks_count
);
563 if (free_blocks
> 0) {
564 ret_block
= ((goal
- le32_to_cpu(es
->s_first_data_block
)) %
565 EXT3_BLOCKS_PER_GROUP(sb
));
566 bitmap_bh
= read_block_bitmap(sb
, group_no
);
569 ret_block
= ext3_try_to_allocate(sb
, handle
, group_no
,
570 bitmap_bh
, ret_block
, &fatal
);
578 * Now search the rest of the groups. We assume that
579 * i and gdp correctly point to the last group visited.
581 for (bgi
= 0; bgi
< EXT3_SB(sb
)->s_groups_count
; bgi
++) {
583 if (group_no
>= EXT3_SB(sb
)->s_groups_count
)
585 gdp
= ext3_get_group_desc(sb
, group_no
, &gdp_bh
);
590 free_blocks
= le16_to_cpu(gdp
->bg_free_blocks_count
);
591 if (free_blocks
<= 0)
595 bitmap_bh
= read_block_bitmap(sb
, group_no
);
598 ret_block
= ext3_try_to_allocate(sb
, handle
, group_no
,
599 bitmap_bh
, -1, &fatal
);
606 /* No space left on the device */
612 ext3_debug("using block group %d(%d)\n",
613 group_no
, gdp
->bg_free_blocks_count
);
615 BUFFER_TRACE(gdp_bh
, "get_write_access");
616 fatal
= ext3_journal_get_write_access(handle
, gdp_bh
);
620 target_block
= ret_block
+ group_no
* EXT3_BLOCKS_PER_GROUP(sb
)
621 + le32_to_cpu(es
->s_first_data_block
);
623 if (target_block
== le32_to_cpu(gdp
->bg_block_bitmap
) ||
624 target_block
== le32_to_cpu(gdp
->bg_inode_bitmap
) ||
625 in_range(target_block
, le32_to_cpu(gdp
->bg_inode_table
),
626 EXT3_SB(sb
)->s_itb_per_group
))
627 ext3_error(sb
, "ext3_new_block",
628 "Allocating block in system zone - "
629 "block = %u", target_block
);
631 performed_allocation
= 1;
633 #ifdef CONFIG_JBD_DEBUG
635 struct buffer_head
*debug_bh
;
637 /* Record bitmap buffer state in the newly allocated block */
638 debug_bh
= sb_find_get_block(sb
, target_block
);
640 BUFFER_TRACE(debug_bh
, "state when allocated");
641 BUFFER_TRACE2(debug_bh
, bitmap_bh
, "bitmap state");
645 jbd_lock_bh_state(bitmap_bh
);
646 spin_lock(sb_bgl_lock(sbi
, group_no
));
647 if (buffer_jbd(bitmap_bh
) && bh2jh(bitmap_bh
)->b_committed_data
) {
648 if (ext3_test_bit(ret_block
,
649 bh2jh(bitmap_bh
)->b_committed_data
)) {
650 printk("%s: block was unexpectedly set in "
651 "b_committed_data\n", __FUNCTION__
);
654 ext3_debug("found bit %d\n", ret_block
);
655 spin_unlock(sb_bgl_lock(sbi
, group_no
));
656 jbd_unlock_bh_state(bitmap_bh
);
659 /* ret_block was blockgroup-relative. Now it becomes fs-relative */
660 ret_block
= target_block
;
662 if (ret_block
>= le32_to_cpu(es
->s_blocks_count
)) {
663 ext3_error(sb
, "ext3_new_block",
664 "block(%d) >= blocks count(%d) - "
665 "block_group = %d, es == %p ", ret_block
,
666 le32_to_cpu(es
->s_blocks_count
), group_no
, es
);
671 * It is up to the caller to add the new buffer to a journal
672 * list of some description. We don't know in advance whether
673 * the caller wants to use it as metadata or data.
675 ext3_debug("allocating block %d. Goal hits %d of %d.\n",
676 ret_block
, goal_hits
, goal_attempts
);
678 spin_lock(sb_bgl_lock(sbi
, group_no
));
679 gdp
->bg_free_blocks_count
=
680 cpu_to_le16(le16_to_cpu(gdp
->bg_free_blocks_count
) - 1);
681 spin_unlock(sb_bgl_lock(sbi
, group_no
));
682 percpu_counter_mod(&sbi
->s_freeblocks_counter
, -1);
684 BUFFER_TRACE(gdp_bh
, "journal_dirty_metadata for group descriptor");
685 err
= ext3_journal_dirty_metadata(handle
, gdp_bh
);
702 ext3_std_error(sb
, fatal
);
705 * Undo the block allocation
707 if (!performed_allocation
)
708 DQUOT_FREE_BLOCK(inode
, 1);
713 unsigned long ext3_count_free_blocks(struct super_block
*sb
)
715 unsigned long desc_count
;
716 struct ext3_group_desc
*gdp
;
719 struct ext3_super_block
*es
;
720 unsigned long bitmap_count
, x
;
721 struct buffer_head
*bitmap_bh
= NULL
;
724 es
= EXT3_SB(sb
)->s_es
;
728 for (i
= 0; i
< EXT3_SB(sb
)->s_groups_count
; i
++) {
729 gdp
= ext3_get_group_desc(sb
, i
, NULL
);
732 desc_count
+= le16_to_cpu(gdp
->bg_free_blocks_count
);
734 bitmap_bh
= read_block_bitmap(sb
, i
);
735 if (bitmap_bh
== NULL
)
738 x
= ext3_count_free(bitmap_bh
, sb
->s_blocksize
);
739 printk("group %d: stored = %d, counted = %lu\n",
740 i
, le16_to_cpu(gdp
->bg_free_blocks_count
), x
);
744 printk("ext3_count_free_blocks: stored = %u, computed = %lu, %lu\n",
745 le32_to_cpu(es
->s_free_blocks_count
), desc_count
, bitmap_count
);
750 for (i
= 0; i
< EXT3_SB(sb
)->s_groups_count
; i
++) {
751 gdp
= ext3_get_group_desc(sb
, i
, NULL
);
754 desc_count
+= le16_to_cpu(gdp
->bg_free_blocks_count
);
761 static inline int block_in_use(unsigned long block
,
762 struct super_block
* sb
,
765 return ext3_test_bit ((block
-
766 le32_to_cpu(EXT3_SB(sb
)->s_es
->s_first_data_block
)) %
767 EXT3_BLOCKS_PER_GROUP(sb
), map
);
770 static inline int test_root(int a
, int b
)
783 int ext3_group_sparse(int group
)
785 return (test_root(group
, 3) || test_root(group
, 5) ||
786 test_root(group
, 7));
790 * ext3_bg_has_super - number of blocks used by the superblock in group
791 * @sb: superblock for filesystem
792 * @group: group number to check
794 * Return the number of blocks used by the superblock (primary or backup)
795 * in this group. Currently this will be only 0 or 1.
797 int ext3_bg_has_super(struct super_block
*sb
, int group
)
799 if (EXT3_HAS_RO_COMPAT_FEATURE(sb
,EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER
)&&
800 !ext3_group_sparse(group
))
806 * ext3_bg_num_gdb - number of blocks used by the group table in group
807 * @sb: superblock for filesystem
808 * @group: group number to check
810 * Return the number of blocks used by the group descriptor table
811 * (primary or backup) in this group. In the future there may be a
812 * different number of descriptor blocks in each group.
814 unsigned long ext3_bg_num_gdb(struct super_block
*sb
, int group
)
816 if (EXT3_HAS_RO_COMPAT_FEATURE(sb
,EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER
)&&
817 !ext3_group_sparse(group
))
819 return EXT3_SB(sb
)->s_gdb_count
;
822 #ifdef CONFIG_EXT3_CHECK
823 /* Called at mount-time, super-block is locked */
824 void ext3_check_blocks_bitmap (struct super_block
* sb
)
826 struct ext3_super_block
*es
;
827 unsigned long desc_count
, bitmap_count
, x
, j
;
828 unsigned long desc_blocks
;
829 struct buffer_head
*bitmap_bh
= NULL
;
830 struct ext3_group_desc
*gdp
;
833 es
= EXT3_SB(sb
)->s_es
;
837 for (i
= 0; i
< EXT3_SB(sb
)->s_groups_count
; i
++) {
838 gdp
= ext3_get_group_desc (sb
, i
, NULL
);
841 desc_count
+= le16_to_cpu(gdp
->bg_free_blocks_count
);
843 bitmap_bh
= read_block_bitmap(sb
, i
);
844 if (bitmap_bh
== NULL
)
847 if (ext3_bg_has_super(sb
, i
) &&
848 !ext3_test_bit(0, bitmap_bh
->b_data
))
849 ext3_error(sb
, __FUNCTION__
,
850 "Superblock in group %d is marked free", i
);
852 desc_blocks
= ext3_bg_num_gdb(sb
, i
);
853 for (j
= 0; j
< desc_blocks
; j
++)
854 if (!ext3_test_bit(j
+ 1, bitmap_bh
->b_data
))
855 ext3_error(sb
, __FUNCTION__
,
856 "Descriptor block #%ld in group "
857 "%d is marked free", j
, i
);
859 if (!block_in_use (le32_to_cpu(gdp
->bg_block_bitmap
),
860 sb
, bitmap_bh
->b_data
))
861 ext3_error (sb
, "ext3_check_blocks_bitmap",
862 "Block bitmap for group %d is marked free",
865 if (!block_in_use (le32_to_cpu(gdp
->bg_inode_bitmap
),
866 sb
, bitmap_bh
->b_data
))
867 ext3_error (sb
, "ext3_check_blocks_bitmap",
868 "Inode bitmap for group %d is marked free",
871 for (j
= 0; j
< EXT3_SB(sb
)->s_itb_per_group
; j
++)
872 if (!block_in_use (le32_to_cpu(gdp
->bg_inode_table
) + j
,
873 sb
, bitmap_bh
->b_data
))
874 ext3_error (sb
, "ext3_check_blocks_bitmap",
875 "Block #%d of the inode table in "
876 "group %d is marked free", j
, i
);
878 x
= ext3_count_free(bitmap_bh
, sb
->s_blocksize
);
879 if (le16_to_cpu(gdp
->bg_free_blocks_count
) != x
)
880 ext3_error (sb
, "ext3_check_blocks_bitmap",
881 "Wrong free blocks count for group %d, "
882 "stored = %d, counted = %lu", i
,
883 le16_to_cpu(gdp
->bg_free_blocks_count
), x
);
887 if (le32_to_cpu(es
->s_free_blocks_count
) != bitmap_count
)
888 ext3_error (sb
, "ext3_check_blocks_bitmap",
889 "Wrong free blocks count in super block, "
890 "stored = %lu, counted = %lu",
891 (unsigned long)le32_to_cpu(es
->s_free_blocks_count
),