5 * Block allocation handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1999-2001 Ben Fennema
14 * (C) 1999 Stelias Computing Inc
18 * 02/24/99 blf Created.
24 #include <linux/buffer_head.h>
25 #include <linux/bitops.h>
30 #define udf_clear_bit(nr, addr) ext2_clear_bit(nr, addr)
31 #define udf_set_bit(nr, addr) ext2_set_bit(nr, addr)
32 #define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
33 #define udf_find_next_one_bit(addr, size, offset) \
34 ext2_find_next_bit(addr, size, offset)
36 static int read_block_bitmap(struct super_block
*sb
,
37 struct udf_bitmap
*bitmap
, unsigned int block
,
38 unsigned long bitmap_nr
)
40 struct buffer_head
*bh
= NULL
;
42 struct kernel_lb_addr loc
;
44 loc
.logicalBlockNum
= bitmap
->s_extPosition
;
45 loc
.partitionReferenceNum
= UDF_SB(sb
)->s_partition
;
47 bh
= udf_tread(sb
, udf_get_lb_pblock(sb
, &loc
, block
));
51 bitmap
->s_block_bitmap
[bitmap_nr
] = bh
;
55 static int __load_block_bitmap(struct super_block
*sb
,
56 struct udf_bitmap
*bitmap
,
57 unsigned int block_group
)
60 int nr_groups
= bitmap
->s_nr_groups
;
62 if (block_group
>= nr_groups
) {
63 udf_debug("block_group (%d) > nr_groups (%d)\n", block_group
,
67 if (bitmap
->s_block_bitmap
[block_group
]) {
70 retval
= read_block_bitmap(sb
, bitmap
, block_group
,
78 static inline int load_block_bitmap(struct super_block
*sb
,
79 struct udf_bitmap
*bitmap
,
80 unsigned int block_group
)
84 slot
= __load_block_bitmap(sb
, bitmap
, block_group
);
89 if (!bitmap
->s_block_bitmap
[slot
])
95 static void udf_add_free_space(struct super_block
*sb
, u16 partition
, u32 cnt
)
97 struct udf_sb_info
*sbi
= UDF_SB(sb
);
98 struct logicalVolIntegrityDesc
*lvid
;
103 lvid
= (struct logicalVolIntegrityDesc
*)sbi
->s_lvid_bh
->b_data
;
104 le32_add_cpu(&lvid
->freeSpaceTable
[partition
], cnt
);
105 udf_updated_lvid(sb
);
108 static void udf_bitmap_free_blocks(struct super_block
*sb
,
110 struct udf_bitmap
*bitmap
,
111 struct kernel_lb_addr
*bloc
,
115 struct udf_sb_info
*sbi
= UDF_SB(sb
);
116 struct buffer_head
*bh
= NULL
;
117 struct udf_part_map
*partmap
;
119 unsigned long block_group
;
123 unsigned long overflow
;
125 mutex_lock(&sbi
->s_alloc_mutex
);
126 partmap
= &sbi
->s_partmaps
[bloc
->partitionReferenceNum
];
127 if (bloc
->logicalBlockNum
+ count
< count
||
128 (bloc
->logicalBlockNum
+ count
) > partmap
->s_partition_len
) {
129 udf_debug("%d < %d || %d + %d > %d\n",
130 bloc
->logicalBlockNum
, 0, bloc
->logicalBlockNum
,
131 count
, partmap
->s_partition_len
);
135 block
= bloc
->logicalBlockNum
+ offset
+
136 (sizeof(struct spaceBitmapDesc
) << 3);
140 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
141 bit
= block
% (sb
->s_blocksize
<< 3);
144 * Check to see if we are freeing blocks across a group boundary.
146 if (bit
+ count
> (sb
->s_blocksize
<< 3)) {
147 overflow
= bit
+ count
- (sb
->s_blocksize
<< 3);
150 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
154 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
155 for (i
= 0; i
< count
; i
++) {
156 if (udf_set_bit(bit
+ i
, bh
->b_data
)) {
157 udf_debug("bit %ld already set\n", bit
+ i
);
158 udf_debug("byte=%2x\n",
159 ((char *)bh
->b_data
)[(bit
+ i
) >> 3]);
161 udf_add_free_space(sb
, sbi
->s_partition
, 1);
164 mark_buffer_dirty(bh
);
172 mutex_unlock(&sbi
->s_alloc_mutex
);
175 static int udf_bitmap_prealloc_blocks(struct super_block
*sb
,
177 struct udf_bitmap
*bitmap
,
178 uint16_t partition
, uint32_t first_block
,
179 uint32_t block_count
)
181 struct udf_sb_info
*sbi
= UDF_SB(sb
);
183 int bit
, block
, block_group
, group_start
;
184 int nr_groups
, bitmap_nr
;
185 struct buffer_head
*bh
;
188 mutex_lock(&sbi
->s_alloc_mutex
);
189 part_len
= sbi
->s_partmaps
[partition
].s_partition_len
;
190 if (first_block
>= part_len
)
193 if (first_block
+ block_count
> part_len
)
194 block_count
= part_len
- first_block
;
197 nr_groups
= udf_compute_nr_groups(sb
, partition
);
198 block
= first_block
+ (sizeof(struct spaceBitmapDesc
) << 3);
199 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
200 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
202 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
205 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
207 bit
= block
% (sb
->s_blocksize
<< 3);
209 while (bit
< (sb
->s_blocksize
<< 3) && block_count
> 0) {
210 if (!udf_clear_bit(bit
, bh
->b_data
))
217 mark_buffer_dirty(bh
);
218 } while (block_count
> 0);
221 udf_add_free_space(sb
, partition
, -alloc_count
);
222 mutex_unlock(&sbi
->s_alloc_mutex
);
226 static int udf_bitmap_new_block(struct super_block
*sb
,
228 struct udf_bitmap
*bitmap
, uint16_t partition
,
229 uint32_t goal
, int *err
)
231 struct udf_sb_info
*sbi
= UDF_SB(sb
);
232 int newbit
, bit
= 0, block
, block_group
, group_start
;
233 int end_goal
, nr_groups
, bitmap_nr
, i
;
234 struct buffer_head
*bh
= NULL
;
239 mutex_lock(&sbi
->s_alloc_mutex
);
242 if (goal
>= sbi
->s_partmaps
[partition
].s_partition_len
)
245 nr_groups
= bitmap
->s_nr_groups
;
246 block
= goal
+ (sizeof(struct spaceBitmapDesc
) << 3);
247 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
248 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
250 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
253 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
254 ptr
= memscan((char *)bh
->b_data
+ group_start
, 0xFF,
255 sb
->s_blocksize
- group_start
);
257 if ((ptr
- ((char *)bh
->b_data
)) < sb
->s_blocksize
) {
258 bit
= block
% (sb
->s_blocksize
<< 3);
259 if (udf_test_bit(bit
, bh
->b_data
))
262 end_goal
= (bit
+ 63) & ~63;
263 bit
= udf_find_next_one_bit(bh
->b_data
, end_goal
, bit
);
267 ptr
= memscan((char *)bh
->b_data
+ (bit
>> 3), 0xFF,
268 sb
->s_blocksize
- ((bit
+ 7) >> 3));
269 newbit
= (ptr
- ((char *)bh
->b_data
)) << 3;
270 if (newbit
< sb
->s_blocksize
<< 3) {
275 newbit
= udf_find_next_one_bit(bh
->b_data
,
276 sb
->s_blocksize
<< 3, bit
);
277 if (newbit
< sb
->s_blocksize
<< 3) {
283 for (i
= 0; i
< (nr_groups
* 2); i
++) {
285 if (block_group
>= nr_groups
)
287 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
289 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
292 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
294 ptr
= memscan((char *)bh
->b_data
+ group_start
, 0xFF,
295 sb
->s_blocksize
- group_start
);
296 if ((ptr
- ((char *)bh
->b_data
)) < sb
->s_blocksize
) {
297 bit
= (ptr
- ((char *)bh
->b_data
)) << 3;
301 bit
= udf_find_next_one_bit((char *)bh
->b_data
,
302 sb
->s_blocksize
<< 3,
304 if (bit
< sb
->s_blocksize
<< 3)
308 if (i
>= (nr_groups
* 2)) {
309 mutex_unlock(&sbi
->s_alloc_mutex
);
312 if (bit
< sb
->s_blocksize
<< 3)
315 bit
= udf_find_next_one_bit(bh
->b_data
, sb
->s_blocksize
<< 3,
317 if (bit
>= sb
->s_blocksize
<< 3) {
318 mutex_unlock(&sbi
->s_alloc_mutex
);
324 while (i
< 7 && bit
> (group_start
<< 3) &&
325 udf_test_bit(bit
- 1, bh
->b_data
)) {
331 newblock
= bit
+ (block_group
<< (sb
->s_blocksize_bits
+ 3)) -
332 (sizeof(struct spaceBitmapDesc
) << 3);
334 if (!udf_clear_bit(bit
, bh
->b_data
)) {
335 udf_debug("bit already cleared for block %d\n", bit
);
339 mark_buffer_dirty(bh
);
341 udf_add_free_space(sb
, partition
, -1);
342 mutex_unlock(&sbi
->s_alloc_mutex
);
348 mutex_unlock(&sbi
->s_alloc_mutex
);
352 static void udf_table_free_blocks(struct super_block
*sb
,
355 struct kernel_lb_addr
*bloc
,
359 struct udf_sb_info
*sbi
= UDF_SB(sb
);
360 struct udf_part_map
*partmap
;
363 struct kernel_lb_addr eloc
;
364 struct extent_position oepos
, epos
;
367 struct udf_inode_info
*iinfo
;
369 mutex_lock(&sbi
->s_alloc_mutex
);
370 partmap
= &sbi
->s_partmaps
[bloc
->partitionReferenceNum
];
371 if (bloc
->logicalBlockNum
+ count
< count
||
372 (bloc
->logicalBlockNum
+ count
) > partmap
->s_partition_len
) {
373 udf_debug("%d < %d || %d + %d > %d\n",
374 bloc
->logicalBlockNum
, 0, bloc
->logicalBlockNum
, count
,
375 partmap
->s_partition_len
);
379 iinfo
= UDF_I(table
);
380 udf_add_free_space(sb
, sbi
->s_partition
, count
);
382 start
= bloc
->logicalBlockNum
+ offset
;
383 end
= bloc
->logicalBlockNum
+ offset
+ count
- 1;
385 epos
.offset
= oepos
.offset
= sizeof(struct unallocSpaceEntry
);
387 epos
.block
= oepos
.block
= iinfo
->i_location
;
388 epos
.bh
= oepos
.bh
= NULL
;
391 (etype
= udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1) {
392 if (((eloc
.logicalBlockNum
+
393 (elen
>> sb
->s_blocksize_bits
)) == start
)) {
394 if ((0x3FFFFFFF - elen
) <
395 (count
<< sb
->s_blocksize_bits
)) {
396 uint32_t tmp
= ((0x3FFFFFFF - elen
) >>
397 sb
->s_blocksize_bits
);
400 elen
= (etype
<< 30) |
401 (0x40000000 - sb
->s_blocksize
);
403 elen
= (etype
<< 30) |
405 (count
<< sb
->s_blocksize_bits
));
409 udf_write_aext(table
, &oepos
, &eloc
, elen
, 1);
410 } else if (eloc
.logicalBlockNum
== (end
+ 1)) {
411 if ((0x3FFFFFFF - elen
) <
412 (count
<< sb
->s_blocksize_bits
)) {
413 uint32_t tmp
= ((0x3FFFFFFF - elen
) >>
414 sb
->s_blocksize_bits
);
417 eloc
.logicalBlockNum
-= tmp
;
418 elen
= (etype
<< 30) |
419 (0x40000000 - sb
->s_blocksize
);
421 eloc
.logicalBlockNum
= start
;
422 elen
= (etype
<< 30) |
424 (count
<< sb
->s_blocksize_bits
));
428 udf_write_aext(table
, &oepos
, &eloc
, elen
, 1);
431 if (epos
.bh
!= oepos
.bh
) {
433 oepos
.block
= epos
.block
;
439 oepos
.offset
= epos
.offset
;
445 * NOTE: we CANNOT use udf_add_aext here, as it can try to
446 * allocate a new block, and since we hold the super block
447 * lock already very bad things would happen :)
449 * We copy the behavior of udf_add_aext, but instead of
450 * trying to allocate a new block close to the existing one,
451 * we just steal a block from the extent we are trying to add.
453 * It would be nice if the blocks were close together, but it
458 struct short_ad
*sad
= NULL
;
459 struct long_ad
*lad
= NULL
;
460 struct allocExtDesc
*aed
;
462 eloc
.logicalBlockNum
= start
;
463 elen
= EXT_RECORDED_ALLOCATED
|
464 (count
<< sb
->s_blocksize_bits
);
466 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
467 adsize
= sizeof(struct short_ad
);
468 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
469 adsize
= sizeof(struct long_ad
);
476 if (epos
.offset
+ (2 * adsize
) > sb
->s_blocksize
) {
477 unsigned char *sptr
, *dptr
;
483 /* Steal a block from the extent being free'd */
484 epos
.block
.logicalBlockNum
= eloc
.logicalBlockNum
;
485 eloc
.logicalBlockNum
++;
486 elen
-= sb
->s_blocksize
;
488 epos
.bh
= udf_tread(sb
,
489 udf_get_lb_pblock(sb
, &epos
.block
, 0));
494 aed
= (struct allocExtDesc
*)(epos
.bh
->b_data
);
495 aed
->previousAllocExtLocation
=
496 cpu_to_le32(oepos
.block
.logicalBlockNum
);
497 if (epos
.offset
+ adsize
> sb
->s_blocksize
) {
498 loffset
= epos
.offset
;
499 aed
->lengthAllocDescs
= cpu_to_le32(adsize
);
500 sptr
= iinfo
->i_ext
.i_data
+ epos
.offset
502 dptr
= epos
.bh
->b_data
+
503 sizeof(struct allocExtDesc
);
504 memcpy(dptr
, sptr
, adsize
);
505 epos
.offset
= sizeof(struct allocExtDesc
) +
508 loffset
= epos
.offset
+ adsize
;
509 aed
->lengthAllocDescs
= cpu_to_le32(0);
511 sptr
= oepos
.bh
->b_data
+ epos
.offset
;
512 aed
= (struct allocExtDesc
*)
514 le32_add_cpu(&aed
->lengthAllocDescs
,
517 sptr
= iinfo
->i_ext
.i_data
+
519 iinfo
->i_lenAlloc
+= adsize
;
520 mark_inode_dirty(table
);
522 epos
.offset
= sizeof(struct allocExtDesc
);
524 if (sbi
->s_udfrev
>= 0x0200)
525 udf_new_tag(epos
.bh
->b_data
, TAG_IDENT_AED
,
526 3, 1, epos
.block
.logicalBlockNum
,
529 udf_new_tag(epos
.bh
->b_data
, TAG_IDENT_AED
,
530 2, 1, epos
.block
.logicalBlockNum
,
533 switch (iinfo
->i_alloc_type
) {
534 case ICBTAG_FLAG_AD_SHORT
:
535 sad
= (struct short_ad
*)sptr
;
536 sad
->extLength
= cpu_to_le32(
537 EXT_NEXT_EXTENT_ALLOCDECS
|
540 cpu_to_le32(epos
.block
.logicalBlockNum
);
542 case ICBTAG_FLAG_AD_LONG
:
543 lad
= (struct long_ad
*)sptr
;
544 lad
->extLength
= cpu_to_le32(
545 EXT_NEXT_EXTENT_ALLOCDECS
|
548 cpu_to_lelb(epos
.block
);
552 udf_update_tag(oepos
.bh
->b_data
, loffset
);
553 mark_buffer_dirty(oepos
.bh
);
555 mark_inode_dirty(table
);
559 /* It's possible that stealing the block emptied the extent */
561 udf_write_aext(table
, &epos
, &eloc
, elen
, 1);
564 iinfo
->i_lenAlloc
+= adsize
;
565 mark_inode_dirty(table
);
567 aed
= (struct allocExtDesc
*)epos
.bh
->b_data
;
568 le32_add_cpu(&aed
->lengthAllocDescs
, adsize
);
569 udf_update_tag(epos
.bh
->b_data
, epos
.offset
);
570 mark_buffer_dirty(epos
.bh
);
579 mutex_unlock(&sbi
->s_alloc_mutex
);
583 static int udf_table_prealloc_blocks(struct super_block
*sb
,
585 struct inode
*table
, uint16_t partition
,
586 uint32_t first_block
, uint32_t block_count
)
588 struct udf_sb_info
*sbi
= UDF_SB(sb
);
590 uint32_t elen
, adsize
;
591 struct kernel_lb_addr eloc
;
592 struct extent_position epos
;
594 struct udf_inode_info
*iinfo
;
596 if (first_block
>= sbi
->s_partmaps
[partition
].s_partition_len
)
599 iinfo
= UDF_I(table
);
600 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
601 adsize
= sizeof(struct short_ad
);
602 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
603 adsize
= sizeof(struct long_ad
);
607 mutex_lock(&sbi
->s_alloc_mutex
);
608 epos
.offset
= sizeof(struct unallocSpaceEntry
);
609 epos
.block
= iinfo
->i_location
;
611 eloc
.logicalBlockNum
= 0xFFFFFFFF;
613 while (first_block
!= eloc
.logicalBlockNum
&&
614 (etype
= udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1) {
615 udf_debug("eloc=%d, elen=%d, first_block=%d\n",
616 eloc
.logicalBlockNum
, elen
, first_block
);
617 ; /* empty loop body */
620 if (first_block
== eloc
.logicalBlockNum
) {
621 epos
.offset
-= adsize
;
623 alloc_count
= (elen
>> sb
->s_blocksize_bits
);
624 if (alloc_count
> block_count
) {
625 alloc_count
= block_count
;
626 eloc
.logicalBlockNum
+= alloc_count
;
627 elen
-= (alloc_count
<< sb
->s_blocksize_bits
);
628 udf_write_aext(table
, &epos
, &eloc
,
629 (etype
<< 30) | elen
, 1);
631 udf_delete_aext(table
, epos
, eloc
,
632 (etype
<< 30) | elen
);
640 udf_add_free_space(sb
, partition
, -alloc_count
);
641 mutex_unlock(&sbi
->s_alloc_mutex
);
645 static int udf_table_new_block(struct super_block
*sb
,
647 struct inode
*table
, uint16_t partition
,
648 uint32_t goal
, int *err
)
650 struct udf_sb_info
*sbi
= UDF_SB(sb
);
651 uint32_t spread
= 0xFFFFFFFF, nspread
= 0xFFFFFFFF;
652 uint32_t newblock
= 0, adsize
;
653 uint32_t elen
, goal_elen
= 0;
654 struct kernel_lb_addr eloc
, uninitialized_var(goal_eloc
);
655 struct extent_position epos
, goal_epos
;
657 struct udf_inode_info
*iinfo
= UDF_I(table
);
661 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
662 adsize
= sizeof(struct short_ad
);
663 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
664 adsize
= sizeof(struct long_ad
);
668 mutex_lock(&sbi
->s_alloc_mutex
);
669 if (goal
>= sbi
->s_partmaps
[partition
].s_partition_len
)
672 /* We search for the closest matching block to goal. If we find
673 a exact hit, we stop. Otherwise we keep going till we run out
674 of extents. We store the buffer_head, bloc, and extoffset
675 of the current closest match and use that when we are done.
677 epos
.offset
= sizeof(struct unallocSpaceEntry
);
678 epos
.block
= iinfo
->i_location
;
679 epos
.bh
= goal_epos
.bh
= NULL
;
682 (etype
= udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1) {
683 if (goal
>= eloc
.logicalBlockNum
) {
684 if (goal
< eloc
.logicalBlockNum
+
685 (elen
>> sb
->s_blocksize_bits
))
688 nspread
= goal
- eloc
.logicalBlockNum
-
689 (elen
>> sb
->s_blocksize_bits
);
691 nspread
= eloc
.logicalBlockNum
- goal
;
694 if (nspread
< spread
) {
696 if (goal_epos
.bh
!= epos
.bh
) {
697 brelse(goal_epos
.bh
);
698 goal_epos
.bh
= epos
.bh
;
699 get_bh(goal_epos
.bh
);
701 goal_epos
.block
= epos
.block
;
702 goal_epos
.offset
= epos
.offset
- adsize
;
704 goal_elen
= (etype
<< 30) | elen
;
710 if (spread
== 0xFFFFFFFF) {
711 brelse(goal_epos
.bh
);
712 mutex_unlock(&sbi
->s_alloc_mutex
);
716 /* Only allocate blocks from the beginning of the extent.
717 That way, we only delete (empty) extents, never have to insert an
718 extent because of splitting */
719 /* This works, but very poorly.... */
721 newblock
= goal_eloc
.logicalBlockNum
;
722 goal_eloc
.logicalBlockNum
++;
723 goal_elen
-= sb
->s_blocksize
;
726 udf_write_aext(table
, &goal_epos
, &goal_eloc
, goal_elen
, 1);
728 udf_delete_aext(table
, goal_epos
, goal_eloc
, goal_elen
);
729 brelse(goal_epos
.bh
);
731 udf_add_free_space(sb
, partition
, -1);
733 mutex_unlock(&sbi
->s_alloc_mutex
);
738 void udf_free_blocks(struct super_block
*sb
, struct inode
*inode
,
739 struct kernel_lb_addr
*bloc
, uint32_t offset
,
742 uint16_t partition
= bloc
->partitionReferenceNum
;
743 struct udf_part_map
*map
= &UDF_SB(sb
)->s_partmaps
[partition
];
745 if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_BITMAP
) {
746 udf_bitmap_free_blocks(sb
, inode
, map
->s_uspace
.s_bitmap
,
747 bloc
, offset
, count
);
748 } else if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_TABLE
) {
749 udf_table_free_blocks(sb
, inode
, map
->s_uspace
.s_table
,
750 bloc
, offset
, count
);
751 } else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_BITMAP
) {
752 udf_bitmap_free_blocks(sb
, inode
, map
->s_fspace
.s_bitmap
,
753 bloc
, offset
, count
);
754 } else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_TABLE
) {
755 udf_table_free_blocks(sb
, inode
, map
->s_fspace
.s_table
,
756 bloc
, offset
, count
);
760 inline int udf_prealloc_blocks(struct super_block
*sb
,
762 uint16_t partition
, uint32_t first_block
,
763 uint32_t block_count
)
765 struct udf_part_map
*map
= &UDF_SB(sb
)->s_partmaps
[partition
];
767 if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_BITMAP
)
768 return udf_bitmap_prealloc_blocks(sb
, inode
,
769 map
->s_uspace
.s_bitmap
,
770 partition
, first_block
,
772 else if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_TABLE
)
773 return udf_table_prealloc_blocks(sb
, inode
,
774 map
->s_uspace
.s_table
,
775 partition
, first_block
,
777 else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_BITMAP
)
778 return udf_bitmap_prealloc_blocks(sb
, inode
,
779 map
->s_fspace
.s_bitmap
,
780 partition
, first_block
,
782 else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_TABLE
)
783 return udf_table_prealloc_blocks(sb
, inode
,
784 map
->s_fspace
.s_table
,
785 partition
, first_block
,
791 inline int udf_new_block(struct super_block
*sb
,
793 uint16_t partition
, uint32_t goal
, int *err
)
795 struct udf_part_map
*map
= &UDF_SB(sb
)->s_partmaps
[partition
];
797 if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_BITMAP
)
798 return udf_bitmap_new_block(sb
, inode
,
799 map
->s_uspace
.s_bitmap
,
800 partition
, goal
, err
);
801 else if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_TABLE
)
802 return udf_table_new_block(sb
, inode
,
803 map
->s_uspace
.s_table
,
804 partition
, goal
, err
);
805 else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_BITMAP
)
806 return udf_bitmap_new_block(sb
, inode
,
807 map
->s_fspace
.s_bitmap
,
808 partition
, goal
, err
);
809 else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_TABLE
)
810 return udf_table_new_block(sb
, inode
,
811 map
->s_fspace
.s_table
,
812 partition
, goal
, err
);