5 * Block allocation handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1999-2001 Ben Fennema
14 * (C) 1999 Stelias Computing Inc
18 * 02/24/99 blf Created.
24 #include <linux/buffer_head.h>
25 #include <linux/bitops.h>
30 #define udf_clear_bit(nr, addr) ext2_clear_bit(nr, addr)
31 #define udf_set_bit(nr, addr) ext2_set_bit(nr, addr)
32 #define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
33 #define udf_find_next_one_bit(addr, size, offset) \
34 ext2_find_next_bit(addr, size, offset)
36 static int read_block_bitmap(struct super_block
*sb
,
37 struct udf_bitmap
*bitmap
, unsigned int block
,
38 unsigned long bitmap_nr
)
40 struct buffer_head
*bh
= NULL
;
42 struct kernel_lb_addr loc
;
44 loc
.logicalBlockNum
= bitmap
->s_extPosition
;
45 loc
.partitionReferenceNum
= UDF_SB(sb
)->s_partition
;
47 bh
= udf_tread(sb
, udf_get_lb_pblock(sb
, &loc
, block
));
51 bitmap
->s_block_bitmap
[bitmap_nr
] = bh
;
55 static int __load_block_bitmap(struct super_block
*sb
,
56 struct udf_bitmap
*bitmap
,
57 unsigned int block_group
)
60 int nr_groups
= bitmap
->s_nr_groups
;
62 if (block_group
>= nr_groups
) {
63 udf_debug("block_group (%d) > nr_groups (%d)\n", block_group
,
67 if (bitmap
->s_block_bitmap
[block_group
]) {
70 retval
= read_block_bitmap(sb
, bitmap
, block_group
,
78 static inline int load_block_bitmap(struct super_block
*sb
,
79 struct udf_bitmap
*bitmap
,
80 unsigned int block_group
)
84 slot
= __load_block_bitmap(sb
, bitmap
, block_group
);
89 if (!bitmap
->s_block_bitmap
[slot
])
95 static void udf_add_free_space(struct super_block
*sb
, u16 partition
, u32 cnt
)
97 struct udf_sb_info
*sbi
= UDF_SB(sb
);
98 struct logicalVolIntegrityDesc
*lvid
;
103 lvid
= (struct logicalVolIntegrityDesc
*)sbi
->s_lvid_bh
->b_data
;
104 le32_add_cpu(&lvid
->freeSpaceTable
[partition
], cnt
);
105 udf_updated_lvid(sb
);
108 static void udf_bitmap_free_blocks(struct super_block
*sb
,
110 struct udf_bitmap
*bitmap
,
111 struct kernel_lb_addr
*bloc
,
115 struct udf_sb_info
*sbi
= UDF_SB(sb
);
116 struct buffer_head
*bh
= NULL
;
117 struct udf_part_map
*partmap
;
119 unsigned long block_group
;
123 unsigned long overflow
;
125 mutex_lock(&sbi
->s_alloc_mutex
);
126 partmap
= &sbi
->s_partmaps
[bloc
->partitionReferenceNum
];
127 if (bloc
->logicalBlockNum
+ count
< count
||
128 (bloc
->logicalBlockNum
+ count
) > partmap
->s_partition_len
) {
129 udf_debug("%d < %d || %d + %d > %d\n",
130 bloc
->logicalBlockNum
, 0, bloc
->logicalBlockNum
,
131 count
, partmap
->s_partition_len
);
135 block
= bloc
->logicalBlockNum
+ offset
+
136 (sizeof(struct spaceBitmapDesc
) << 3);
140 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
141 bit
= block
% (sb
->s_blocksize
<< 3);
144 * Check to see if we are freeing blocks across a group boundary.
146 if (bit
+ count
> (sb
->s_blocksize
<< 3)) {
147 overflow
= bit
+ count
- (sb
->s_blocksize
<< 3);
150 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
154 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
155 for (i
= 0; i
< count
; i
++) {
156 if (udf_set_bit(bit
+ i
, bh
->b_data
)) {
157 udf_debug("bit %ld already set\n", bit
+ i
);
158 udf_debug("byte=%2x\n",
159 ((char *)bh
->b_data
)[(bit
+ i
) >> 3]);
162 udf_add_free_space(sb
, sbi
->s_partition
, count
);
163 mark_buffer_dirty(bh
);
171 mutex_unlock(&sbi
->s_alloc_mutex
);
174 static int udf_bitmap_prealloc_blocks(struct super_block
*sb
,
176 struct udf_bitmap
*bitmap
,
177 uint16_t partition
, uint32_t first_block
,
178 uint32_t block_count
)
180 struct udf_sb_info
*sbi
= UDF_SB(sb
);
182 int bit
, block
, block_group
, group_start
;
183 int nr_groups
, bitmap_nr
;
184 struct buffer_head
*bh
;
187 mutex_lock(&sbi
->s_alloc_mutex
);
188 part_len
= sbi
->s_partmaps
[partition
].s_partition_len
;
189 if (first_block
>= part_len
)
192 if (first_block
+ block_count
> part_len
)
193 block_count
= part_len
- first_block
;
196 nr_groups
= udf_compute_nr_groups(sb
, partition
);
197 block
= first_block
+ (sizeof(struct spaceBitmapDesc
) << 3);
198 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
199 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
201 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
204 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
206 bit
= block
% (sb
->s_blocksize
<< 3);
208 while (bit
< (sb
->s_blocksize
<< 3) && block_count
> 0) {
209 if (!udf_clear_bit(bit
, bh
->b_data
))
216 mark_buffer_dirty(bh
);
217 } while (block_count
> 0);
220 udf_add_free_space(sb
, partition
, -alloc_count
);
221 mutex_unlock(&sbi
->s_alloc_mutex
);
225 static int udf_bitmap_new_block(struct super_block
*sb
,
227 struct udf_bitmap
*bitmap
, uint16_t partition
,
228 uint32_t goal
, int *err
)
230 struct udf_sb_info
*sbi
= UDF_SB(sb
);
231 int newbit
, bit
= 0, block
, block_group
, group_start
;
232 int end_goal
, nr_groups
, bitmap_nr
, i
;
233 struct buffer_head
*bh
= NULL
;
238 mutex_lock(&sbi
->s_alloc_mutex
);
241 if (goal
>= sbi
->s_partmaps
[partition
].s_partition_len
)
244 nr_groups
= bitmap
->s_nr_groups
;
245 block
= goal
+ (sizeof(struct spaceBitmapDesc
) << 3);
246 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
247 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
249 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
252 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
253 ptr
= memscan((char *)bh
->b_data
+ group_start
, 0xFF,
254 sb
->s_blocksize
- group_start
);
256 if ((ptr
- ((char *)bh
->b_data
)) < sb
->s_blocksize
) {
257 bit
= block
% (sb
->s_blocksize
<< 3);
258 if (udf_test_bit(bit
, bh
->b_data
))
261 end_goal
= (bit
+ 63) & ~63;
262 bit
= udf_find_next_one_bit(bh
->b_data
, end_goal
, bit
);
266 ptr
= memscan((char *)bh
->b_data
+ (bit
>> 3), 0xFF,
267 sb
->s_blocksize
- ((bit
+ 7) >> 3));
268 newbit
= (ptr
- ((char *)bh
->b_data
)) << 3;
269 if (newbit
< sb
->s_blocksize
<< 3) {
274 newbit
= udf_find_next_one_bit(bh
->b_data
,
275 sb
->s_blocksize
<< 3, bit
);
276 if (newbit
< sb
->s_blocksize
<< 3) {
282 for (i
= 0; i
< (nr_groups
* 2); i
++) {
284 if (block_group
>= nr_groups
)
286 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
288 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
291 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
293 ptr
= memscan((char *)bh
->b_data
+ group_start
, 0xFF,
294 sb
->s_blocksize
- group_start
);
295 if ((ptr
- ((char *)bh
->b_data
)) < sb
->s_blocksize
) {
296 bit
= (ptr
- ((char *)bh
->b_data
)) << 3;
300 bit
= udf_find_next_one_bit((char *)bh
->b_data
,
301 sb
->s_blocksize
<< 3,
303 if (bit
< sb
->s_blocksize
<< 3)
307 if (i
>= (nr_groups
* 2)) {
308 mutex_unlock(&sbi
->s_alloc_mutex
);
311 if (bit
< sb
->s_blocksize
<< 3)
314 bit
= udf_find_next_one_bit(bh
->b_data
, sb
->s_blocksize
<< 3,
316 if (bit
>= sb
->s_blocksize
<< 3) {
317 mutex_unlock(&sbi
->s_alloc_mutex
);
323 while (i
< 7 && bit
> (group_start
<< 3) &&
324 udf_test_bit(bit
- 1, bh
->b_data
)) {
330 newblock
= bit
+ (block_group
<< (sb
->s_blocksize_bits
+ 3)) -
331 (sizeof(struct spaceBitmapDesc
) << 3);
333 if (!udf_clear_bit(bit
, bh
->b_data
)) {
334 udf_debug("bit already cleared for block %d\n", bit
);
338 mark_buffer_dirty(bh
);
340 udf_add_free_space(sb
, partition
, -1);
341 mutex_unlock(&sbi
->s_alloc_mutex
);
347 mutex_unlock(&sbi
->s_alloc_mutex
);
351 static void udf_table_free_blocks(struct super_block
*sb
,
354 struct kernel_lb_addr
*bloc
,
358 struct udf_sb_info
*sbi
= UDF_SB(sb
);
359 struct udf_part_map
*partmap
;
362 struct kernel_lb_addr eloc
;
363 struct extent_position oepos
, epos
;
366 struct udf_inode_info
*iinfo
;
368 mutex_lock(&sbi
->s_alloc_mutex
);
369 partmap
= &sbi
->s_partmaps
[bloc
->partitionReferenceNum
];
370 if (bloc
->logicalBlockNum
+ count
< count
||
371 (bloc
->logicalBlockNum
+ count
) > partmap
->s_partition_len
) {
372 udf_debug("%d < %d || %d + %d > %d\n",
373 bloc
->logicalBlockNum
, 0, bloc
->logicalBlockNum
, count
,
374 partmap
->s_partition_len
);
378 iinfo
= UDF_I(table
);
379 udf_add_free_space(sb
, sbi
->s_partition
, count
);
381 start
= bloc
->logicalBlockNum
+ offset
;
382 end
= bloc
->logicalBlockNum
+ offset
+ count
- 1;
384 epos
.offset
= oepos
.offset
= sizeof(struct unallocSpaceEntry
);
386 epos
.block
= oepos
.block
= iinfo
->i_location
;
387 epos
.bh
= oepos
.bh
= NULL
;
390 (etype
= udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1) {
391 if (((eloc
.logicalBlockNum
+
392 (elen
>> sb
->s_blocksize_bits
)) == start
)) {
393 if ((0x3FFFFFFF - elen
) <
394 (count
<< sb
->s_blocksize_bits
)) {
395 uint32_t tmp
= ((0x3FFFFFFF - elen
) >>
396 sb
->s_blocksize_bits
);
399 elen
= (etype
<< 30) |
400 (0x40000000 - sb
->s_blocksize
);
402 elen
= (etype
<< 30) |
404 (count
<< sb
->s_blocksize_bits
));
408 udf_write_aext(table
, &oepos
, &eloc
, elen
, 1);
409 } else if (eloc
.logicalBlockNum
== (end
+ 1)) {
410 if ((0x3FFFFFFF - elen
) <
411 (count
<< sb
->s_blocksize_bits
)) {
412 uint32_t tmp
= ((0x3FFFFFFF - elen
) >>
413 sb
->s_blocksize_bits
);
416 eloc
.logicalBlockNum
-= tmp
;
417 elen
= (etype
<< 30) |
418 (0x40000000 - sb
->s_blocksize
);
420 eloc
.logicalBlockNum
= start
;
421 elen
= (etype
<< 30) |
423 (count
<< sb
->s_blocksize_bits
));
427 udf_write_aext(table
, &oepos
, &eloc
, elen
, 1);
430 if (epos
.bh
!= oepos
.bh
) {
432 oepos
.block
= epos
.block
;
438 oepos
.offset
= epos
.offset
;
444 * NOTE: we CANNOT use udf_add_aext here, as it can try to
445 * allocate a new block, and since we hold the super block
446 * lock already very bad things would happen :)
448 * We copy the behavior of udf_add_aext, but instead of
449 * trying to allocate a new block close to the existing one,
450 * we just steal a block from the extent we are trying to add.
452 * It would be nice if the blocks were close together, but it
457 struct short_ad
*sad
= NULL
;
458 struct long_ad
*lad
= NULL
;
459 struct allocExtDesc
*aed
;
461 eloc
.logicalBlockNum
= start
;
462 elen
= EXT_RECORDED_ALLOCATED
|
463 (count
<< sb
->s_blocksize_bits
);
465 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
466 adsize
= sizeof(struct short_ad
);
467 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
468 adsize
= sizeof(struct long_ad
);
475 if (epos
.offset
+ (2 * adsize
) > sb
->s_blocksize
) {
476 unsigned char *sptr
, *dptr
;
482 /* Steal a block from the extent being free'd */
483 epos
.block
.logicalBlockNum
= eloc
.logicalBlockNum
;
484 eloc
.logicalBlockNum
++;
485 elen
-= sb
->s_blocksize
;
487 epos
.bh
= udf_tread(sb
,
488 udf_get_lb_pblock(sb
, &epos
.block
, 0));
493 aed
= (struct allocExtDesc
*)(epos
.bh
->b_data
);
494 aed
->previousAllocExtLocation
=
495 cpu_to_le32(oepos
.block
.logicalBlockNum
);
496 if (epos
.offset
+ adsize
> sb
->s_blocksize
) {
497 loffset
= epos
.offset
;
498 aed
->lengthAllocDescs
= cpu_to_le32(adsize
);
499 sptr
= iinfo
->i_ext
.i_data
+ epos
.offset
501 dptr
= epos
.bh
->b_data
+
502 sizeof(struct allocExtDesc
);
503 memcpy(dptr
, sptr
, adsize
);
504 epos
.offset
= sizeof(struct allocExtDesc
) +
507 loffset
= epos
.offset
+ adsize
;
508 aed
->lengthAllocDescs
= cpu_to_le32(0);
510 sptr
= oepos
.bh
->b_data
+ epos
.offset
;
511 aed
= (struct allocExtDesc
*)
513 le32_add_cpu(&aed
->lengthAllocDescs
,
516 sptr
= iinfo
->i_ext
.i_data
+
518 iinfo
->i_lenAlloc
+= adsize
;
519 mark_inode_dirty(table
);
521 epos
.offset
= sizeof(struct allocExtDesc
);
523 if (sbi
->s_udfrev
>= 0x0200)
524 udf_new_tag(epos
.bh
->b_data
, TAG_IDENT_AED
,
525 3, 1, epos
.block
.logicalBlockNum
,
528 udf_new_tag(epos
.bh
->b_data
, TAG_IDENT_AED
,
529 2, 1, epos
.block
.logicalBlockNum
,
532 switch (iinfo
->i_alloc_type
) {
533 case ICBTAG_FLAG_AD_SHORT
:
534 sad
= (struct short_ad
*)sptr
;
535 sad
->extLength
= cpu_to_le32(
536 EXT_NEXT_EXTENT_ALLOCDECS
|
539 cpu_to_le32(epos
.block
.logicalBlockNum
);
541 case ICBTAG_FLAG_AD_LONG
:
542 lad
= (struct long_ad
*)sptr
;
543 lad
->extLength
= cpu_to_le32(
544 EXT_NEXT_EXTENT_ALLOCDECS
|
547 cpu_to_lelb(epos
.block
);
551 udf_update_tag(oepos
.bh
->b_data
, loffset
);
552 mark_buffer_dirty(oepos
.bh
);
554 mark_inode_dirty(table
);
558 /* It's possible that stealing the block emptied the extent */
560 udf_write_aext(table
, &epos
, &eloc
, elen
, 1);
563 iinfo
->i_lenAlloc
+= adsize
;
564 mark_inode_dirty(table
);
566 aed
= (struct allocExtDesc
*)epos
.bh
->b_data
;
567 le32_add_cpu(&aed
->lengthAllocDescs
, adsize
);
568 udf_update_tag(epos
.bh
->b_data
, epos
.offset
);
569 mark_buffer_dirty(epos
.bh
);
578 mutex_unlock(&sbi
->s_alloc_mutex
);
582 static int udf_table_prealloc_blocks(struct super_block
*sb
,
584 struct inode
*table
, uint16_t partition
,
585 uint32_t first_block
, uint32_t block_count
)
587 struct udf_sb_info
*sbi
= UDF_SB(sb
);
589 uint32_t elen
, adsize
;
590 struct kernel_lb_addr eloc
;
591 struct extent_position epos
;
593 struct udf_inode_info
*iinfo
;
595 if (first_block
>= sbi
->s_partmaps
[partition
].s_partition_len
)
598 iinfo
= UDF_I(table
);
599 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
600 adsize
= sizeof(struct short_ad
);
601 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
602 adsize
= sizeof(struct long_ad
);
606 mutex_lock(&sbi
->s_alloc_mutex
);
607 epos
.offset
= sizeof(struct unallocSpaceEntry
);
608 epos
.block
= iinfo
->i_location
;
610 eloc
.logicalBlockNum
= 0xFFFFFFFF;
612 while (first_block
!= eloc
.logicalBlockNum
&&
613 (etype
= udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1) {
614 udf_debug("eloc=%d, elen=%d, first_block=%d\n",
615 eloc
.logicalBlockNum
, elen
, first_block
);
616 ; /* empty loop body */
619 if (first_block
== eloc
.logicalBlockNum
) {
620 epos
.offset
-= adsize
;
622 alloc_count
= (elen
>> sb
->s_blocksize_bits
);
623 if (alloc_count
> block_count
) {
624 alloc_count
= block_count
;
625 eloc
.logicalBlockNum
+= alloc_count
;
626 elen
-= (alloc_count
<< sb
->s_blocksize_bits
);
627 udf_write_aext(table
, &epos
, &eloc
,
628 (etype
<< 30) | elen
, 1);
630 udf_delete_aext(table
, epos
, eloc
,
631 (etype
<< 30) | elen
);
639 udf_add_free_space(sb
, partition
, -alloc_count
);
640 mutex_unlock(&sbi
->s_alloc_mutex
);
644 static int udf_table_new_block(struct super_block
*sb
,
646 struct inode
*table
, uint16_t partition
,
647 uint32_t goal
, int *err
)
649 struct udf_sb_info
*sbi
= UDF_SB(sb
);
650 uint32_t spread
= 0xFFFFFFFF, nspread
= 0xFFFFFFFF;
651 uint32_t newblock
= 0, adsize
;
652 uint32_t elen
, goal_elen
= 0;
653 struct kernel_lb_addr eloc
, uninitialized_var(goal_eloc
);
654 struct extent_position epos
, goal_epos
;
656 struct udf_inode_info
*iinfo
= UDF_I(table
);
660 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
661 adsize
= sizeof(struct short_ad
);
662 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
663 adsize
= sizeof(struct long_ad
);
667 mutex_lock(&sbi
->s_alloc_mutex
);
668 if (goal
>= sbi
->s_partmaps
[partition
].s_partition_len
)
671 /* We search for the closest matching block to goal. If we find
672 a exact hit, we stop. Otherwise we keep going till we run out
673 of extents. We store the buffer_head, bloc, and extoffset
674 of the current closest match and use that when we are done.
676 epos
.offset
= sizeof(struct unallocSpaceEntry
);
677 epos
.block
= iinfo
->i_location
;
678 epos
.bh
= goal_epos
.bh
= NULL
;
681 (etype
= udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1) {
682 if (goal
>= eloc
.logicalBlockNum
) {
683 if (goal
< eloc
.logicalBlockNum
+
684 (elen
>> sb
->s_blocksize_bits
))
687 nspread
= goal
- eloc
.logicalBlockNum
-
688 (elen
>> sb
->s_blocksize_bits
);
690 nspread
= eloc
.logicalBlockNum
- goal
;
693 if (nspread
< spread
) {
695 if (goal_epos
.bh
!= epos
.bh
) {
696 brelse(goal_epos
.bh
);
697 goal_epos
.bh
= epos
.bh
;
698 get_bh(goal_epos
.bh
);
700 goal_epos
.block
= epos
.block
;
701 goal_epos
.offset
= epos
.offset
- adsize
;
703 goal_elen
= (etype
<< 30) | elen
;
709 if (spread
== 0xFFFFFFFF) {
710 brelse(goal_epos
.bh
);
711 mutex_unlock(&sbi
->s_alloc_mutex
);
715 /* Only allocate blocks from the beginning of the extent.
716 That way, we only delete (empty) extents, never have to insert an
717 extent because of splitting */
718 /* This works, but very poorly.... */
720 newblock
= goal_eloc
.logicalBlockNum
;
721 goal_eloc
.logicalBlockNum
++;
722 goal_elen
-= sb
->s_blocksize
;
725 udf_write_aext(table
, &goal_epos
, &goal_eloc
, goal_elen
, 1);
727 udf_delete_aext(table
, goal_epos
, goal_eloc
, goal_elen
);
728 brelse(goal_epos
.bh
);
730 udf_add_free_space(sb
, partition
, -1);
732 mutex_unlock(&sbi
->s_alloc_mutex
);
737 void udf_free_blocks(struct super_block
*sb
, struct inode
*inode
,
738 struct kernel_lb_addr
*bloc
, uint32_t offset
,
741 uint16_t partition
= bloc
->partitionReferenceNum
;
742 struct udf_part_map
*map
= &UDF_SB(sb
)->s_partmaps
[partition
];
744 if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_BITMAP
) {
745 udf_bitmap_free_blocks(sb
, inode
, map
->s_uspace
.s_bitmap
,
746 bloc
, offset
, count
);
747 } else if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_TABLE
) {
748 udf_table_free_blocks(sb
, inode
, map
->s_uspace
.s_table
,
749 bloc
, offset
, count
);
750 } else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_BITMAP
) {
751 udf_bitmap_free_blocks(sb
, inode
, map
->s_fspace
.s_bitmap
,
752 bloc
, offset
, count
);
753 } else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_TABLE
) {
754 udf_table_free_blocks(sb
, inode
, map
->s_fspace
.s_table
,
755 bloc
, offset
, count
);
759 inline int udf_prealloc_blocks(struct super_block
*sb
,
761 uint16_t partition
, uint32_t first_block
,
762 uint32_t block_count
)
764 struct udf_part_map
*map
= &UDF_SB(sb
)->s_partmaps
[partition
];
766 if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_BITMAP
)
767 return udf_bitmap_prealloc_blocks(sb
, inode
,
768 map
->s_uspace
.s_bitmap
,
769 partition
, first_block
,
771 else if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_TABLE
)
772 return udf_table_prealloc_blocks(sb
, inode
,
773 map
->s_uspace
.s_table
,
774 partition
, first_block
,
776 else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_BITMAP
)
777 return udf_bitmap_prealloc_blocks(sb
, inode
,
778 map
->s_fspace
.s_bitmap
,
779 partition
, first_block
,
781 else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_TABLE
)
782 return udf_table_prealloc_blocks(sb
, inode
,
783 map
->s_fspace
.s_table
,
784 partition
, first_block
,
790 inline int udf_new_block(struct super_block
*sb
,
792 uint16_t partition
, uint32_t goal
, int *err
)
794 struct udf_part_map
*map
= &UDF_SB(sb
)->s_partmaps
[partition
];
796 if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_BITMAP
)
797 return udf_bitmap_new_block(sb
, inode
,
798 map
->s_uspace
.s_bitmap
,
799 partition
, goal
, err
);
800 else if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_TABLE
)
801 return udf_table_new_block(sb
, inode
,
802 map
->s_uspace
.s_table
,
803 partition
, goal
, err
);
804 else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_BITMAP
)
805 return udf_bitmap_new_block(sb
, inode
,
806 map
->s_fspace
.s_bitmap
,
807 partition
, goal
, err
);
808 else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_TABLE
)
809 return udf_table_new_block(sb
, inode
,
810 map
->s_fspace
.s_table
,
811 partition
, goal
, err
);