5 * Block allocation handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1999-2001 Ben Fennema
14 * (C) 1999 Stelias Computing Inc
18 * 02/24/99 blf Created.
24 #include <linux/buffer_head.h>
25 #include <linux/bitops.h>
30 #define udf_clear_bit __test_and_clear_bit_le
31 #define udf_set_bit __test_and_set_bit_le
32 #define udf_test_bit test_bit_le
33 #define udf_find_next_one_bit find_next_bit_le
35 static int read_block_bitmap(struct super_block
*sb
,
36 struct udf_bitmap
*bitmap
, unsigned int block
,
37 unsigned long bitmap_nr
)
39 struct buffer_head
*bh
= NULL
;
41 struct kernel_lb_addr loc
;
43 loc
.logicalBlockNum
= bitmap
->s_extPosition
;
44 loc
.partitionReferenceNum
= UDF_SB(sb
)->s_partition
;
46 bh
= udf_tread(sb
, udf_get_lb_pblock(sb
, &loc
, block
));
50 bitmap
->s_block_bitmap
[bitmap_nr
] = bh
;
54 static int __load_block_bitmap(struct super_block
*sb
,
55 struct udf_bitmap
*bitmap
,
56 unsigned int block_group
)
59 int nr_groups
= bitmap
->s_nr_groups
;
61 if (block_group
>= nr_groups
) {
62 udf_debug("block_group (%d) > nr_groups (%d)\n", block_group
,
66 if (bitmap
->s_block_bitmap
[block_group
]) {
69 retval
= read_block_bitmap(sb
, bitmap
, block_group
,
77 static inline int load_block_bitmap(struct super_block
*sb
,
78 struct udf_bitmap
*bitmap
,
79 unsigned int block_group
)
83 slot
= __load_block_bitmap(sb
, bitmap
, block_group
);
88 if (!bitmap
->s_block_bitmap
[slot
])
94 static void udf_add_free_space(struct super_block
*sb
, u16 partition
, u32 cnt
)
96 struct udf_sb_info
*sbi
= UDF_SB(sb
);
97 struct logicalVolIntegrityDesc
*lvid
;
102 lvid
= (struct logicalVolIntegrityDesc
*)sbi
->s_lvid_bh
->b_data
;
103 le32_add_cpu(&lvid
->freeSpaceTable
[partition
], cnt
);
104 udf_updated_lvid(sb
);
107 static void udf_bitmap_free_blocks(struct super_block
*sb
,
109 struct udf_bitmap
*bitmap
,
110 struct kernel_lb_addr
*bloc
,
114 struct udf_sb_info
*sbi
= UDF_SB(sb
);
115 struct buffer_head
*bh
= NULL
;
116 struct udf_part_map
*partmap
;
118 unsigned long block_group
;
122 unsigned long overflow
;
124 mutex_lock(&sbi
->s_alloc_mutex
);
125 partmap
= &sbi
->s_partmaps
[bloc
->partitionReferenceNum
];
126 if (bloc
->logicalBlockNum
+ count
< count
||
127 (bloc
->logicalBlockNum
+ count
) > partmap
->s_partition_len
) {
128 udf_debug("%d < %d || %d + %d > %d\n",
129 bloc
->logicalBlockNum
, 0, bloc
->logicalBlockNum
,
130 count
, partmap
->s_partition_len
);
134 block
= bloc
->logicalBlockNum
+ offset
+
135 (sizeof(struct spaceBitmapDesc
) << 3);
139 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
140 bit
= block
% (sb
->s_blocksize
<< 3);
143 * Check to see if we are freeing blocks across a group boundary.
145 if (bit
+ count
> (sb
->s_blocksize
<< 3)) {
146 overflow
= bit
+ count
- (sb
->s_blocksize
<< 3);
149 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
153 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
154 for (i
= 0; i
< count
; i
++) {
155 if (udf_set_bit(bit
+ i
, bh
->b_data
)) {
156 udf_debug("bit %ld already set\n", bit
+ i
);
157 udf_debug("byte=%2x\n",
158 ((char *)bh
->b_data
)[(bit
+ i
) >> 3]);
161 udf_add_free_space(sb
, sbi
->s_partition
, count
);
162 mark_buffer_dirty(bh
);
170 mutex_unlock(&sbi
->s_alloc_mutex
);
173 static int udf_bitmap_prealloc_blocks(struct super_block
*sb
,
175 struct udf_bitmap
*bitmap
,
176 uint16_t partition
, uint32_t first_block
,
177 uint32_t block_count
)
179 struct udf_sb_info
*sbi
= UDF_SB(sb
);
181 int bit
, block
, block_group
, group_start
;
182 int nr_groups
, bitmap_nr
;
183 struct buffer_head
*bh
;
186 mutex_lock(&sbi
->s_alloc_mutex
);
187 part_len
= sbi
->s_partmaps
[partition
].s_partition_len
;
188 if (first_block
>= part_len
)
191 if (first_block
+ block_count
> part_len
)
192 block_count
= part_len
- first_block
;
195 nr_groups
= udf_compute_nr_groups(sb
, partition
);
196 block
= first_block
+ (sizeof(struct spaceBitmapDesc
) << 3);
197 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
198 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
200 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
203 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
205 bit
= block
% (sb
->s_blocksize
<< 3);
207 while (bit
< (sb
->s_blocksize
<< 3) && block_count
> 0) {
208 if (!udf_clear_bit(bit
, bh
->b_data
))
215 mark_buffer_dirty(bh
);
216 } while (block_count
> 0);
219 udf_add_free_space(sb
, partition
, -alloc_count
);
220 mutex_unlock(&sbi
->s_alloc_mutex
);
224 static int udf_bitmap_new_block(struct super_block
*sb
,
226 struct udf_bitmap
*bitmap
, uint16_t partition
,
227 uint32_t goal
, int *err
)
229 struct udf_sb_info
*sbi
= UDF_SB(sb
);
230 int newbit
, bit
= 0, block
, block_group
, group_start
;
231 int end_goal
, nr_groups
, bitmap_nr
, i
;
232 struct buffer_head
*bh
= NULL
;
237 mutex_lock(&sbi
->s_alloc_mutex
);
240 if (goal
>= sbi
->s_partmaps
[partition
].s_partition_len
)
243 nr_groups
= bitmap
->s_nr_groups
;
244 block
= goal
+ (sizeof(struct spaceBitmapDesc
) << 3);
245 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
246 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
248 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
251 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
252 ptr
= memscan((char *)bh
->b_data
+ group_start
, 0xFF,
253 sb
->s_blocksize
- group_start
);
255 if ((ptr
- ((char *)bh
->b_data
)) < sb
->s_blocksize
) {
256 bit
= block
% (sb
->s_blocksize
<< 3);
257 if (udf_test_bit(bit
, bh
->b_data
))
260 end_goal
= (bit
+ 63) & ~63;
261 bit
= udf_find_next_one_bit(bh
->b_data
, end_goal
, bit
);
265 ptr
= memscan((char *)bh
->b_data
+ (bit
>> 3), 0xFF,
266 sb
->s_blocksize
- ((bit
+ 7) >> 3));
267 newbit
= (ptr
- ((char *)bh
->b_data
)) << 3;
268 if (newbit
< sb
->s_blocksize
<< 3) {
273 newbit
= udf_find_next_one_bit(bh
->b_data
,
274 sb
->s_blocksize
<< 3, bit
);
275 if (newbit
< sb
->s_blocksize
<< 3) {
281 for (i
= 0; i
< (nr_groups
* 2); i
++) {
283 if (block_group
>= nr_groups
)
285 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
287 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
290 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
292 ptr
= memscan((char *)bh
->b_data
+ group_start
, 0xFF,
293 sb
->s_blocksize
- group_start
);
294 if ((ptr
- ((char *)bh
->b_data
)) < sb
->s_blocksize
) {
295 bit
= (ptr
- ((char *)bh
->b_data
)) << 3;
299 bit
= udf_find_next_one_bit(bh
->b_data
,
300 sb
->s_blocksize
<< 3,
302 if (bit
< sb
->s_blocksize
<< 3)
306 if (i
>= (nr_groups
* 2)) {
307 mutex_unlock(&sbi
->s_alloc_mutex
);
310 if (bit
< sb
->s_blocksize
<< 3)
313 bit
= udf_find_next_one_bit(bh
->b_data
, sb
->s_blocksize
<< 3,
315 if (bit
>= sb
->s_blocksize
<< 3) {
316 mutex_unlock(&sbi
->s_alloc_mutex
);
322 while (i
< 7 && bit
> (group_start
<< 3) &&
323 udf_test_bit(bit
- 1, bh
->b_data
)) {
329 newblock
= bit
+ (block_group
<< (sb
->s_blocksize_bits
+ 3)) -
330 (sizeof(struct spaceBitmapDesc
) << 3);
332 if (!udf_clear_bit(bit
, bh
->b_data
)) {
333 udf_debug("bit already cleared for block %d\n", bit
);
337 mark_buffer_dirty(bh
);
339 udf_add_free_space(sb
, partition
, -1);
340 mutex_unlock(&sbi
->s_alloc_mutex
);
346 mutex_unlock(&sbi
->s_alloc_mutex
);
350 static void udf_table_free_blocks(struct super_block
*sb
,
353 struct kernel_lb_addr
*bloc
,
357 struct udf_sb_info
*sbi
= UDF_SB(sb
);
358 struct udf_part_map
*partmap
;
361 struct kernel_lb_addr eloc
;
362 struct extent_position oepos
, epos
;
365 struct udf_inode_info
*iinfo
;
367 mutex_lock(&sbi
->s_alloc_mutex
);
368 partmap
= &sbi
->s_partmaps
[bloc
->partitionReferenceNum
];
369 if (bloc
->logicalBlockNum
+ count
< count
||
370 (bloc
->logicalBlockNum
+ count
) > partmap
->s_partition_len
) {
371 udf_debug("%d < %d || %d + %d > %d\n",
372 bloc
->logicalBlockNum
, 0, bloc
->logicalBlockNum
, count
,
373 partmap
->s_partition_len
);
377 iinfo
= UDF_I(table
);
378 udf_add_free_space(sb
, sbi
->s_partition
, count
);
380 start
= bloc
->logicalBlockNum
+ offset
;
381 end
= bloc
->logicalBlockNum
+ offset
+ count
- 1;
383 epos
.offset
= oepos
.offset
= sizeof(struct unallocSpaceEntry
);
385 epos
.block
= oepos
.block
= iinfo
->i_location
;
386 epos
.bh
= oepos
.bh
= NULL
;
389 (etype
= udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1) {
390 if (((eloc
.logicalBlockNum
+
391 (elen
>> sb
->s_blocksize_bits
)) == start
)) {
392 if ((0x3FFFFFFF - elen
) <
393 (count
<< sb
->s_blocksize_bits
)) {
394 uint32_t tmp
= ((0x3FFFFFFF - elen
) >>
395 sb
->s_blocksize_bits
);
398 elen
= (etype
<< 30) |
399 (0x40000000 - sb
->s_blocksize
);
401 elen
= (etype
<< 30) |
403 (count
<< sb
->s_blocksize_bits
));
407 udf_write_aext(table
, &oepos
, &eloc
, elen
, 1);
408 } else if (eloc
.logicalBlockNum
== (end
+ 1)) {
409 if ((0x3FFFFFFF - elen
) <
410 (count
<< sb
->s_blocksize_bits
)) {
411 uint32_t tmp
= ((0x3FFFFFFF - elen
) >>
412 sb
->s_blocksize_bits
);
415 eloc
.logicalBlockNum
-= tmp
;
416 elen
= (etype
<< 30) |
417 (0x40000000 - sb
->s_blocksize
);
419 eloc
.logicalBlockNum
= start
;
420 elen
= (etype
<< 30) |
422 (count
<< sb
->s_blocksize_bits
));
426 udf_write_aext(table
, &oepos
, &eloc
, elen
, 1);
429 if (epos
.bh
!= oepos
.bh
) {
431 oepos
.block
= epos
.block
;
437 oepos
.offset
= epos
.offset
;
443 * NOTE: we CANNOT use udf_add_aext here, as it can try to
444 * allocate a new block, and since we hold the super block
445 * lock already very bad things would happen :)
447 * We copy the behavior of udf_add_aext, but instead of
448 * trying to allocate a new block close to the existing one,
449 * we just steal a block from the extent we are trying to add.
451 * It would be nice if the blocks were close together, but it
456 struct short_ad
*sad
= NULL
;
457 struct long_ad
*lad
= NULL
;
458 struct allocExtDesc
*aed
;
460 eloc
.logicalBlockNum
= start
;
461 elen
= EXT_RECORDED_ALLOCATED
|
462 (count
<< sb
->s_blocksize_bits
);
464 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
465 adsize
= sizeof(struct short_ad
);
466 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
467 adsize
= sizeof(struct long_ad
);
474 if (epos
.offset
+ (2 * adsize
) > sb
->s_blocksize
) {
475 unsigned char *sptr
, *dptr
;
481 /* Steal a block from the extent being free'd */
482 epos
.block
.logicalBlockNum
= eloc
.logicalBlockNum
;
483 eloc
.logicalBlockNum
++;
484 elen
-= sb
->s_blocksize
;
486 epos
.bh
= udf_tread(sb
,
487 udf_get_lb_pblock(sb
, &epos
.block
, 0));
492 aed
= (struct allocExtDesc
*)(epos
.bh
->b_data
);
493 aed
->previousAllocExtLocation
=
494 cpu_to_le32(oepos
.block
.logicalBlockNum
);
495 if (epos
.offset
+ adsize
> sb
->s_blocksize
) {
496 loffset
= epos
.offset
;
497 aed
->lengthAllocDescs
= cpu_to_le32(adsize
);
498 sptr
= iinfo
->i_ext
.i_data
+ epos
.offset
500 dptr
= epos
.bh
->b_data
+
501 sizeof(struct allocExtDesc
);
502 memcpy(dptr
, sptr
, adsize
);
503 epos
.offset
= sizeof(struct allocExtDesc
) +
506 loffset
= epos
.offset
+ adsize
;
507 aed
->lengthAllocDescs
= cpu_to_le32(0);
509 sptr
= oepos
.bh
->b_data
+ epos
.offset
;
510 aed
= (struct allocExtDesc
*)
512 le32_add_cpu(&aed
->lengthAllocDescs
,
515 sptr
= iinfo
->i_ext
.i_data
+
517 iinfo
->i_lenAlloc
+= adsize
;
518 mark_inode_dirty(table
);
520 epos
.offset
= sizeof(struct allocExtDesc
);
522 if (sbi
->s_udfrev
>= 0x0200)
523 udf_new_tag(epos
.bh
->b_data
, TAG_IDENT_AED
,
524 3, 1, epos
.block
.logicalBlockNum
,
527 udf_new_tag(epos
.bh
->b_data
, TAG_IDENT_AED
,
528 2, 1, epos
.block
.logicalBlockNum
,
531 switch (iinfo
->i_alloc_type
) {
532 case ICBTAG_FLAG_AD_SHORT
:
533 sad
= (struct short_ad
*)sptr
;
534 sad
->extLength
= cpu_to_le32(
535 EXT_NEXT_EXTENT_ALLOCDECS
|
538 cpu_to_le32(epos
.block
.logicalBlockNum
);
540 case ICBTAG_FLAG_AD_LONG
:
541 lad
= (struct long_ad
*)sptr
;
542 lad
->extLength
= cpu_to_le32(
543 EXT_NEXT_EXTENT_ALLOCDECS
|
546 cpu_to_lelb(epos
.block
);
550 udf_update_tag(oepos
.bh
->b_data
, loffset
);
551 mark_buffer_dirty(oepos
.bh
);
553 mark_inode_dirty(table
);
557 /* It's possible that stealing the block emptied the extent */
559 udf_write_aext(table
, &epos
, &eloc
, elen
, 1);
562 iinfo
->i_lenAlloc
+= adsize
;
563 mark_inode_dirty(table
);
565 aed
= (struct allocExtDesc
*)epos
.bh
->b_data
;
566 le32_add_cpu(&aed
->lengthAllocDescs
, adsize
);
567 udf_update_tag(epos
.bh
->b_data
, epos
.offset
);
568 mark_buffer_dirty(epos
.bh
);
577 mutex_unlock(&sbi
->s_alloc_mutex
);
581 static int udf_table_prealloc_blocks(struct super_block
*sb
,
583 struct inode
*table
, uint16_t partition
,
584 uint32_t first_block
, uint32_t block_count
)
586 struct udf_sb_info
*sbi
= UDF_SB(sb
);
588 uint32_t elen
, adsize
;
589 struct kernel_lb_addr eloc
;
590 struct extent_position epos
;
592 struct udf_inode_info
*iinfo
;
594 if (first_block
>= sbi
->s_partmaps
[partition
].s_partition_len
)
597 iinfo
= UDF_I(table
);
598 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
599 adsize
= sizeof(struct short_ad
);
600 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
601 adsize
= sizeof(struct long_ad
);
605 mutex_lock(&sbi
->s_alloc_mutex
);
606 epos
.offset
= sizeof(struct unallocSpaceEntry
);
607 epos
.block
= iinfo
->i_location
;
609 eloc
.logicalBlockNum
= 0xFFFFFFFF;
611 while (first_block
!= eloc
.logicalBlockNum
&&
612 (etype
= udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1) {
613 udf_debug("eloc=%d, elen=%d, first_block=%d\n",
614 eloc
.logicalBlockNum
, elen
, first_block
);
615 ; /* empty loop body */
618 if (first_block
== eloc
.logicalBlockNum
) {
619 epos
.offset
-= adsize
;
621 alloc_count
= (elen
>> sb
->s_blocksize_bits
);
622 if (alloc_count
> block_count
) {
623 alloc_count
= block_count
;
624 eloc
.logicalBlockNum
+= alloc_count
;
625 elen
-= (alloc_count
<< sb
->s_blocksize_bits
);
626 udf_write_aext(table
, &epos
, &eloc
,
627 (etype
<< 30) | elen
, 1);
629 udf_delete_aext(table
, epos
, eloc
,
630 (etype
<< 30) | elen
);
638 udf_add_free_space(sb
, partition
, -alloc_count
);
639 mutex_unlock(&sbi
->s_alloc_mutex
);
643 static int udf_table_new_block(struct super_block
*sb
,
645 struct inode
*table
, uint16_t partition
,
646 uint32_t goal
, int *err
)
648 struct udf_sb_info
*sbi
= UDF_SB(sb
);
649 uint32_t spread
= 0xFFFFFFFF, nspread
= 0xFFFFFFFF;
650 uint32_t newblock
= 0, adsize
;
651 uint32_t elen
, goal_elen
= 0;
652 struct kernel_lb_addr eloc
, uninitialized_var(goal_eloc
);
653 struct extent_position epos
, goal_epos
;
655 struct udf_inode_info
*iinfo
= UDF_I(table
);
659 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
660 adsize
= sizeof(struct short_ad
);
661 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
662 adsize
= sizeof(struct long_ad
);
666 mutex_lock(&sbi
->s_alloc_mutex
);
667 if (goal
>= sbi
->s_partmaps
[partition
].s_partition_len
)
670 /* We search for the closest matching block to goal. If we find
671 a exact hit, we stop. Otherwise we keep going till we run out
672 of extents. We store the buffer_head, bloc, and extoffset
673 of the current closest match and use that when we are done.
675 epos
.offset
= sizeof(struct unallocSpaceEntry
);
676 epos
.block
= iinfo
->i_location
;
677 epos
.bh
= goal_epos
.bh
= NULL
;
680 (etype
= udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1) {
681 if (goal
>= eloc
.logicalBlockNum
) {
682 if (goal
< eloc
.logicalBlockNum
+
683 (elen
>> sb
->s_blocksize_bits
))
686 nspread
= goal
- eloc
.logicalBlockNum
-
687 (elen
>> sb
->s_blocksize_bits
);
689 nspread
= eloc
.logicalBlockNum
- goal
;
692 if (nspread
< spread
) {
694 if (goal_epos
.bh
!= epos
.bh
) {
695 brelse(goal_epos
.bh
);
696 goal_epos
.bh
= epos
.bh
;
697 get_bh(goal_epos
.bh
);
699 goal_epos
.block
= epos
.block
;
700 goal_epos
.offset
= epos
.offset
- adsize
;
702 goal_elen
= (etype
<< 30) | elen
;
708 if (spread
== 0xFFFFFFFF) {
709 brelse(goal_epos
.bh
);
710 mutex_unlock(&sbi
->s_alloc_mutex
);
714 /* Only allocate blocks from the beginning of the extent.
715 That way, we only delete (empty) extents, never have to insert an
716 extent because of splitting */
717 /* This works, but very poorly.... */
719 newblock
= goal_eloc
.logicalBlockNum
;
720 goal_eloc
.logicalBlockNum
++;
721 goal_elen
-= sb
->s_blocksize
;
724 udf_write_aext(table
, &goal_epos
, &goal_eloc
, goal_elen
, 1);
726 udf_delete_aext(table
, goal_epos
, goal_eloc
, goal_elen
);
727 brelse(goal_epos
.bh
);
729 udf_add_free_space(sb
, partition
, -1);
731 mutex_unlock(&sbi
->s_alloc_mutex
);
736 void udf_free_blocks(struct super_block
*sb
, struct inode
*inode
,
737 struct kernel_lb_addr
*bloc
, uint32_t offset
,
740 uint16_t partition
= bloc
->partitionReferenceNum
;
741 struct udf_part_map
*map
= &UDF_SB(sb
)->s_partmaps
[partition
];
743 if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_BITMAP
) {
744 udf_bitmap_free_blocks(sb
, inode
, map
->s_uspace
.s_bitmap
,
745 bloc
, offset
, count
);
746 } else if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_TABLE
) {
747 udf_table_free_blocks(sb
, inode
, map
->s_uspace
.s_table
,
748 bloc
, offset
, count
);
749 } else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_BITMAP
) {
750 udf_bitmap_free_blocks(sb
, inode
, map
->s_fspace
.s_bitmap
,
751 bloc
, offset
, count
);
752 } else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_TABLE
) {
753 udf_table_free_blocks(sb
, inode
, map
->s_fspace
.s_table
,
754 bloc
, offset
, count
);
758 inline int udf_prealloc_blocks(struct super_block
*sb
,
760 uint16_t partition
, uint32_t first_block
,
761 uint32_t block_count
)
763 struct udf_part_map
*map
= &UDF_SB(sb
)->s_partmaps
[partition
];
765 if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_BITMAP
)
766 return udf_bitmap_prealloc_blocks(sb
, inode
,
767 map
->s_uspace
.s_bitmap
,
768 partition
, first_block
,
770 else if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_TABLE
)
771 return udf_table_prealloc_blocks(sb
, inode
,
772 map
->s_uspace
.s_table
,
773 partition
, first_block
,
775 else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_BITMAP
)
776 return udf_bitmap_prealloc_blocks(sb
, inode
,
777 map
->s_fspace
.s_bitmap
,
778 partition
, first_block
,
780 else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_TABLE
)
781 return udf_table_prealloc_blocks(sb
, inode
,
782 map
->s_fspace
.s_table
,
783 partition
, first_block
,
789 inline int udf_new_block(struct super_block
*sb
,
791 uint16_t partition
, uint32_t goal
, int *err
)
793 struct udf_part_map
*map
= &UDF_SB(sb
)->s_partmaps
[partition
];
795 if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_BITMAP
)
796 return udf_bitmap_new_block(sb
, inode
,
797 map
->s_uspace
.s_bitmap
,
798 partition
, goal
, err
);
799 else if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_TABLE
)
800 return udf_table_new_block(sb
, inode
,
801 map
->s_uspace
.s_table
,
802 partition
, goal
, err
);
803 else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_BITMAP
)
804 return udf_bitmap_new_block(sb
, inode
,
805 map
->s_fspace
.s_bitmap
,
806 partition
, goal
, err
);
807 else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_TABLE
)
808 return udf_table_new_block(sb
, inode
,
809 map
->s_fspace
.s_table
,
810 partition
, goal
, err
);