5 * Block allocation handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1999-2001 Ben Fennema
14 * (C) 1999 Stelias Computing Inc
18 * 02/24/99 blf Created.
24 #include <linux/quotaops.h>
25 #include <linux/buffer_head.h>
26 #include <linux/bitops.h>
31 #define udf_clear_bit(nr, addr) ext2_clear_bit(nr, addr)
32 #define udf_set_bit(nr, addr) ext2_set_bit(nr, addr)
33 #define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
34 #define udf_find_first_one_bit(addr, size) find_first_one_bit(addr, size)
35 #define udf_find_next_one_bit(addr, size, offset) \
36 find_next_one_bit(addr, size, offset)
38 #define leBPL_to_cpup(x) leNUM_to_cpup(BITS_PER_LONG, x)
39 #define leNUM_to_cpup(x, y) xleNUM_to_cpup(x, y)
40 #define xleNUM_to_cpup(x, y) (le ## x ## _to_cpup(y))
41 #define uintBPL_t uint(BITS_PER_LONG)
42 #define uint(x) xuint(x)
43 #define xuint(x) __le ## x
45 static inline int find_next_one_bit(void *addr
, int size
, int offset
)
47 uintBPL_t
*p
= ((uintBPL_t
*) addr
) + (offset
/ BITS_PER_LONG
);
48 int result
= offset
& ~(BITS_PER_LONG
- 1);
54 offset
&= (BITS_PER_LONG
- 1);
56 tmp
= leBPL_to_cpup(p
++);
57 tmp
&= ~0UL << offset
;
58 if (size
< BITS_PER_LONG
)
62 size
-= BITS_PER_LONG
;
63 result
+= BITS_PER_LONG
;
65 while (size
& ~(BITS_PER_LONG
- 1)) {
66 tmp
= leBPL_to_cpup(p
++);
69 result
+= BITS_PER_LONG
;
70 size
-= BITS_PER_LONG
;
74 tmp
= leBPL_to_cpup(p
);
76 tmp
&= ~0UL >> (BITS_PER_LONG
- size
);
78 return result
+ ffz(~tmp
);
81 #define find_first_one_bit(addr, size)\
82 find_next_one_bit((addr), (size), 0)
84 static int read_block_bitmap(struct super_block
*sb
,
85 struct udf_bitmap
*bitmap
, unsigned int block
,
86 unsigned long bitmap_nr
)
88 struct buffer_head
*bh
= NULL
;
92 loc
.logicalBlockNum
= bitmap
->s_extPosition
;
93 loc
.partitionReferenceNum
= UDF_SB(sb
)->s_partition
;
95 bh
= udf_tread(sb
, udf_get_lb_pblock(sb
, loc
, block
));
99 bitmap
->s_block_bitmap
[bitmap_nr
] = bh
;
103 static int __load_block_bitmap(struct super_block
*sb
,
104 struct udf_bitmap
*bitmap
,
105 unsigned int block_group
)
108 int nr_groups
= bitmap
->s_nr_groups
;
110 if (block_group
>= nr_groups
) {
111 udf_debug("block_group (%d) > nr_groups (%d)\n", block_group
,
115 if (bitmap
->s_block_bitmap
[block_group
]) {
118 retval
= read_block_bitmap(sb
, bitmap
, block_group
,
126 static inline int load_block_bitmap(struct super_block
*sb
,
127 struct udf_bitmap
*bitmap
,
128 unsigned int block_group
)
132 slot
= __load_block_bitmap(sb
, bitmap
, block_group
);
137 if (!bitmap
->s_block_bitmap
[slot
])
143 static bool udf_add_free_space(struct udf_sb_info
*sbi
,
144 u16 partition
, u32 cnt
)
146 struct logicalVolIntegrityDesc
*lvid
;
148 if (sbi
->s_lvid_bh
== NULL
)
151 lvid
= (struct logicalVolIntegrityDesc
*)sbi
->s_lvid_bh
->b_data
;
152 le32_add_cpu(&lvid
->freeSpaceTable
[partition
], cnt
);
156 static void udf_bitmap_free_blocks(struct super_block
*sb
,
158 struct udf_bitmap
*bitmap
,
159 kernel_lb_addr bloc
, uint32_t offset
,
162 struct udf_sb_info
*sbi
= UDF_SB(sb
);
163 struct buffer_head
*bh
= NULL
;
165 unsigned long block_group
;
169 unsigned long overflow
;
171 mutex_lock(&sbi
->s_alloc_mutex
);
172 if (bloc
.logicalBlockNum
< 0 ||
173 (bloc
.logicalBlockNum
+ count
) >
174 sbi
->s_partmaps
[bloc
.partitionReferenceNum
].s_partition_len
) {
175 udf_debug("%d < %d || %d + %d > %d\n",
176 bloc
.logicalBlockNum
, 0, bloc
.logicalBlockNum
, count
,
177 sbi
->s_partmaps
[bloc
.partitionReferenceNum
].
182 block
= bloc
.logicalBlockNum
+ offset
+
183 (sizeof(struct spaceBitmapDesc
) << 3);
187 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
188 bit
= block
% (sb
->s_blocksize
<< 3);
191 * Check to see if we are freeing blocks across a group boundary.
193 if (bit
+ count
> (sb
->s_blocksize
<< 3)) {
194 overflow
= bit
+ count
- (sb
->s_blocksize
<< 3);
197 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
201 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
202 for (i
= 0; i
< count
; i
++) {
203 if (udf_set_bit(bit
+ i
, bh
->b_data
)) {
204 udf_debug("bit %ld already set\n", bit
+ i
);
205 udf_debug("byte=%2x\n",
206 ((char *)bh
->b_data
)[(bit
+ i
) >> 3]);
209 vfs_dq_free_block(inode
, 1);
210 udf_add_free_space(sbi
, sbi
->s_partition
, 1);
213 mark_buffer_dirty(bh
);
223 mark_buffer_dirty(sbi
->s_lvid_bh
);
224 mutex_unlock(&sbi
->s_alloc_mutex
);
227 static int udf_bitmap_prealloc_blocks(struct super_block
*sb
,
229 struct udf_bitmap
*bitmap
,
230 uint16_t partition
, uint32_t first_block
,
231 uint32_t block_count
)
233 struct udf_sb_info
*sbi
= UDF_SB(sb
);
235 int bit
, block
, block_group
, group_start
;
236 int nr_groups
, bitmap_nr
;
237 struct buffer_head
*bh
;
240 mutex_lock(&sbi
->s_alloc_mutex
);
241 part_len
= sbi
->s_partmaps
[partition
].s_partition_len
;
242 if (first_block
< 0 || first_block
>= part_len
)
245 if (first_block
+ block_count
> part_len
)
246 block_count
= part_len
- first_block
;
249 nr_groups
= udf_compute_nr_groups(sb
, partition
);
250 block
= first_block
+ (sizeof(struct spaceBitmapDesc
) << 3);
251 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
252 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
254 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
257 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
259 bit
= block
% (sb
->s_blocksize
<< 3);
261 while (bit
< (sb
->s_blocksize
<< 3) && block_count
> 0) {
262 if (!udf_test_bit(bit
, bh
->b_data
))
264 else if (vfs_dq_prealloc_block(inode
, 1))
266 else if (!udf_clear_bit(bit
, bh
->b_data
)) {
267 udf_debug("bit already cleared for block %d\n", bit
);
268 vfs_dq_free_block(inode
, 1);
276 mark_buffer_dirty(bh
);
277 } while (block_count
> 0);
280 if (udf_add_free_space(sbi
, partition
, -alloc_count
))
281 mark_buffer_dirty(sbi
->s_lvid_bh
);
283 mutex_unlock(&sbi
->s_alloc_mutex
);
287 static int udf_bitmap_new_block(struct super_block
*sb
,
289 struct udf_bitmap
*bitmap
, uint16_t partition
,
290 uint32_t goal
, int *err
)
292 struct udf_sb_info
*sbi
= UDF_SB(sb
);
293 int newbit
, bit
= 0, block
, block_group
, group_start
;
294 int end_goal
, nr_groups
, bitmap_nr
, i
;
295 struct buffer_head
*bh
= NULL
;
300 mutex_lock(&sbi
->s_alloc_mutex
);
303 if (goal
< 0 || goal
>= sbi
->s_partmaps
[partition
].s_partition_len
)
306 nr_groups
= bitmap
->s_nr_groups
;
307 block
= goal
+ (sizeof(struct spaceBitmapDesc
) << 3);
308 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
309 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
311 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
314 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
315 ptr
= memscan((char *)bh
->b_data
+ group_start
, 0xFF,
316 sb
->s_blocksize
- group_start
);
318 if ((ptr
- ((char *)bh
->b_data
)) < sb
->s_blocksize
) {
319 bit
= block
% (sb
->s_blocksize
<< 3);
320 if (udf_test_bit(bit
, bh
->b_data
))
323 end_goal
= (bit
+ 63) & ~63;
324 bit
= udf_find_next_one_bit(bh
->b_data
, end_goal
, bit
);
328 ptr
= memscan((char *)bh
->b_data
+ (bit
>> 3), 0xFF,
329 sb
->s_blocksize
- ((bit
+ 7) >> 3));
330 newbit
= (ptr
- ((char *)bh
->b_data
)) << 3;
331 if (newbit
< sb
->s_blocksize
<< 3) {
336 newbit
= udf_find_next_one_bit(bh
->b_data
,
337 sb
->s_blocksize
<< 3, bit
);
338 if (newbit
< sb
->s_blocksize
<< 3) {
344 for (i
= 0; i
< (nr_groups
* 2); i
++) {
346 if (block_group
>= nr_groups
)
348 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
350 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
353 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
355 ptr
= memscan((char *)bh
->b_data
+ group_start
, 0xFF,
356 sb
->s_blocksize
- group_start
);
357 if ((ptr
- ((char *)bh
->b_data
)) < sb
->s_blocksize
) {
358 bit
= (ptr
- ((char *)bh
->b_data
)) << 3;
362 bit
= udf_find_next_one_bit((char *)bh
->b_data
,
363 sb
->s_blocksize
<< 3,
365 if (bit
< sb
->s_blocksize
<< 3)
369 if (i
>= (nr_groups
* 2)) {
370 mutex_unlock(&sbi
->s_alloc_mutex
);
373 if (bit
< sb
->s_blocksize
<< 3)
376 bit
= udf_find_next_one_bit(bh
->b_data
, sb
->s_blocksize
<< 3,
378 if (bit
>= sb
->s_blocksize
<< 3) {
379 mutex_unlock(&sbi
->s_alloc_mutex
);
385 while (i
< 7 && bit
> (group_start
<< 3) &&
386 udf_test_bit(bit
- 1, bh
->b_data
)) {
394 * Check quota for allocation of this block.
396 if (inode
&& vfs_dq_alloc_block(inode
, 1)) {
397 mutex_unlock(&sbi
->s_alloc_mutex
);
402 newblock
= bit
+ (block_group
<< (sb
->s_blocksize_bits
+ 3)) -
403 (sizeof(struct spaceBitmapDesc
) << 3);
405 if (!udf_clear_bit(bit
, bh
->b_data
)) {
406 udf_debug("bit already cleared for block %d\n", bit
);
410 mark_buffer_dirty(bh
);
412 if (udf_add_free_space(sbi
, partition
, -1))
413 mark_buffer_dirty(sbi
->s_lvid_bh
);
415 mutex_unlock(&sbi
->s_alloc_mutex
);
421 mutex_unlock(&sbi
->s_alloc_mutex
);
425 static void udf_table_free_blocks(struct super_block
*sb
,
428 kernel_lb_addr bloc
, uint32_t offset
,
431 struct udf_sb_info
*sbi
= UDF_SB(sb
);
435 struct extent_position oepos
, epos
;
438 struct udf_inode_info
*iinfo
;
440 mutex_lock(&sbi
->s_alloc_mutex
);
441 if (bloc
.logicalBlockNum
< 0 ||
442 (bloc
.logicalBlockNum
+ count
) >
443 sbi
->s_partmaps
[bloc
.partitionReferenceNum
].s_partition_len
) {
444 udf_debug("%d < %d || %d + %d > %d\n",
445 bloc
.logicalBlockNum
, 0, bloc
.logicalBlockNum
, count
,
446 sbi
->s_partmaps
[bloc
.partitionReferenceNum
].
451 iinfo
= UDF_I(table
);
452 /* We do this up front - There are some error conditions that
453 could occure, but.. oh well */
455 vfs_dq_free_block(inode
, count
);
456 if (udf_add_free_space(sbi
, sbi
->s_partition
, count
))
457 mark_buffer_dirty(sbi
->s_lvid_bh
);
459 start
= bloc
.logicalBlockNum
+ offset
;
460 end
= bloc
.logicalBlockNum
+ offset
+ count
- 1;
462 epos
.offset
= oepos
.offset
= sizeof(struct unallocSpaceEntry
);
464 epos
.block
= oepos
.block
= iinfo
->i_location
;
465 epos
.bh
= oepos
.bh
= NULL
;
468 (etype
= udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1) {
469 if (((eloc
.logicalBlockNum
+
470 (elen
>> sb
->s_blocksize_bits
)) == start
)) {
471 if ((0x3FFFFFFF - elen
) <
472 (count
<< sb
->s_blocksize_bits
)) {
473 uint32_t tmp
= ((0x3FFFFFFF - elen
) >>
474 sb
->s_blocksize_bits
);
477 elen
= (etype
<< 30) |
478 (0x40000000 - sb
->s_blocksize
);
480 elen
= (etype
<< 30) |
482 (count
<< sb
->s_blocksize_bits
));
486 udf_write_aext(table
, &oepos
, eloc
, elen
, 1);
487 } else if (eloc
.logicalBlockNum
== (end
+ 1)) {
488 if ((0x3FFFFFFF - elen
) <
489 (count
<< sb
->s_blocksize_bits
)) {
490 uint32_t tmp
= ((0x3FFFFFFF - elen
) >>
491 sb
->s_blocksize_bits
);
494 eloc
.logicalBlockNum
-= tmp
;
495 elen
= (etype
<< 30) |
496 (0x40000000 - sb
->s_blocksize
);
498 eloc
.logicalBlockNum
= start
;
499 elen
= (etype
<< 30) |
501 (count
<< sb
->s_blocksize_bits
));
505 udf_write_aext(table
, &oepos
, eloc
, elen
, 1);
508 if (epos
.bh
!= oepos
.bh
) {
510 oepos
.block
= epos
.block
;
516 oepos
.offset
= epos
.offset
;
522 * NOTE: we CANNOT use udf_add_aext here, as it can try to
523 * allocate a new block, and since we hold the super block
524 * lock already very bad things would happen :)
526 * We copy the behavior of udf_add_aext, but instead of
527 * trying to allocate a new block close to the existing one,
528 * we just steal a block from the extent we are trying to add.
530 * It would be nice if the blocks were close together, but it
535 short_ad
*sad
= NULL
;
537 struct allocExtDesc
*aed
;
539 eloc
.logicalBlockNum
= start
;
540 elen
= EXT_RECORDED_ALLOCATED
|
541 (count
<< sb
->s_blocksize_bits
);
543 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
544 adsize
= sizeof(short_ad
);
545 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
546 adsize
= sizeof(long_ad
);
553 if (epos
.offset
+ (2 * adsize
) > sb
->s_blocksize
) {
560 /* Steal a block from the extent being free'd */
561 epos
.block
.logicalBlockNum
= eloc
.logicalBlockNum
;
562 eloc
.logicalBlockNum
++;
563 elen
-= sb
->s_blocksize
;
565 epos
.bh
= udf_tread(sb
,
566 udf_get_lb_pblock(sb
, epos
.block
, 0));
571 aed
= (struct allocExtDesc
*)(epos
.bh
->b_data
);
572 aed
->previousAllocExtLocation
=
573 cpu_to_le32(oepos
.block
.logicalBlockNum
);
574 if (epos
.offset
+ adsize
> sb
->s_blocksize
) {
575 loffset
= epos
.offset
;
576 aed
->lengthAllocDescs
= cpu_to_le32(adsize
);
577 sptr
= iinfo
->i_ext
.i_data
+ epos
.offset
579 dptr
= epos
.bh
->b_data
+
580 sizeof(struct allocExtDesc
);
581 memcpy(dptr
, sptr
, adsize
);
582 epos
.offset
= sizeof(struct allocExtDesc
) +
585 loffset
= epos
.offset
+ adsize
;
586 aed
->lengthAllocDescs
= cpu_to_le32(0);
588 sptr
= oepos
.bh
->b_data
+ epos
.offset
;
589 aed
= (struct allocExtDesc
*)
591 le32_add_cpu(&aed
->lengthAllocDescs
,
594 sptr
= iinfo
->i_ext
.i_data
+
596 iinfo
->i_lenAlloc
+= adsize
;
597 mark_inode_dirty(table
);
599 epos
.offset
= sizeof(struct allocExtDesc
);
601 if (sbi
->s_udfrev
>= 0x0200)
602 udf_new_tag(epos
.bh
->b_data
, TAG_IDENT_AED
,
603 3, 1, epos
.block
.logicalBlockNum
,
606 udf_new_tag(epos
.bh
->b_data
, TAG_IDENT_AED
,
607 2, 1, epos
.block
.logicalBlockNum
,
610 switch (iinfo
->i_alloc_type
) {
611 case ICBTAG_FLAG_AD_SHORT
:
612 sad
= (short_ad
*)sptr
;
613 sad
->extLength
= cpu_to_le32(
614 EXT_NEXT_EXTENT_ALLOCDECS
|
617 cpu_to_le32(epos
.block
.logicalBlockNum
);
619 case ICBTAG_FLAG_AD_LONG
:
620 lad
= (long_ad
*)sptr
;
621 lad
->extLength
= cpu_to_le32(
622 EXT_NEXT_EXTENT_ALLOCDECS
|
625 cpu_to_lelb(epos
.block
);
629 udf_update_tag(oepos
.bh
->b_data
, loffset
);
630 mark_buffer_dirty(oepos
.bh
);
632 mark_inode_dirty(table
);
636 /* It's possible that stealing the block emptied the extent */
638 udf_write_aext(table
, &epos
, eloc
, elen
, 1);
641 iinfo
->i_lenAlloc
+= adsize
;
642 mark_inode_dirty(table
);
644 aed
= (struct allocExtDesc
*)epos
.bh
->b_data
;
645 le32_add_cpu(&aed
->lengthAllocDescs
, adsize
);
646 udf_update_tag(epos
.bh
->b_data
, epos
.offset
);
647 mark_buffer_dirty(epos
.bh
);
657 mutex_unlock(&sbi
->s_alloc_mutex
);
661 static int udf_table_prealloc_blocks(struct super_block
*sb
,
663 struct inode
*table
, uint16_t partition
,
664 uint32_t first_block
, uint32_t block_count
)
666 struct udf_sb_info
*sbi
= UDF_SB(sb
);
668 uint32_t elen
, adsize
;
670 struct extent_position epos
;
672 struct udf_inode_info
*iinfo
;
674 if (first_block
< 0 ||
675 first_block
>= sbi
->s_partmaps
[partition
].s_partition_len
)
678 iinfo
= UDF_I(table
);
679 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
680 adsize
= sizeof(short_ad
);
681 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
682 adsize
= sizeof(long_ad
);
686 mutex_lock(&sbi
->s_alloc_mutex
);
687 epos
.offset
= sizeof(struct unallocSpaceEntry
);
688 epos
.block
= iinfo
->i_location
;
690 eloc
.logicalBlockNum
= 0xFFFFFFFF;
692 while (first_block
!= eloc
.logicalBlockNum
&&
693 (etype
= udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1) {
694 udf_debug("eloc=%d, elen=%d, first_block=%d\n",
695 eloc
.logicalBlockNum
, elen
, first_block
);
696 ; /* empty loop body */
699 if (first_block
== eloc
.logicalBlockNum
) {
700 epos
.offset
-= adsize
;
702 alloc_count
= (elen
>> sb
->s_blocksize_bits
);
703 if (inode
&& vfs_dq_prealloc_block(inode
,
704 alloc_count
> block_count
? block_count
: alloc_count
))
706 else if (alloc_count
> block_count
) {
707 alloc_count
= block_count
;
708 eloc
.logicalBlockNum
+= alloc_count
;
709 elen
-= (alloc_count
<< sb
->s_blocksize_bits
);
710 udf_write_aext(table
, &epos
, eloc
,
711 (etype
<< 30) | elen
, 1);
713 udf_delete_aext(table
, epos
, eloc
,
714 (etype
<< 30) | elen
);
721 if (alloc_count
&& udf_add_free_space(sbi
, partition
, -alloc_count
)) {
722 mark_buffer_dirty(sbi
->s_lvid_bh
);
725 mutex_unlock(&sbi
->s_alloc_mutex
);
729 static int udf_table_new_block(struct super_block
*sb
,
731 struct inode
*table
, uint16_t partition
,
732 uint32_t goal
, int *err
)
734 struct udf_sb_info
*sbi
= UDF_SB(sb
);
735 uint32_t spread
= 0xFFFFFFFF, nspread
= 0xFFFFFFFF;
736 uint32_t newblock
= 0, adsize
;
737 uint32_t elen
, goal_elen
= 0;
738 kernel_lb_addr eloc
, uninitialized_var(goal_eloc
);
739 struct extent_position epos
, goal_epos
;
741 struct udf_inode_info
*iinfo
= UDF_I(table
);
745 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
746 adsize
= sizeof(short_ad
);
747 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
748 adsize
= sizeof(long_ad
);
752 mutex_lock(&sbi
->s_alloc_mutex
);
753 if (goal
< 0 || goal
>= sbi
->s_partmaps
[partition
].s_partition_len
)
756 /* We search for the closest matching block to goal. If we find
757 a exact hit, we stop. Otherwise we keep going till we run out
758 of extents. We store the buffer_head, bloc, and extoffset
759 of the current closest match and use that when we are done.
761 epos
.offset
= sizeof(struct unallocSpaceEntry
);
762 epos
.block
= iinfo
->i_location
;
763 epos
.bh
= goal_epos
.bh
= NULL
;
766 (etype
= udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1) {
767 if (goal
>= eloc
.logicalBlockNum
) {
768 if (goal
< eloc
.logicalBlockNum
+
769 (elen
>> sb
->s_blocksize_bits
))
772 nspread
= goal
- eloc
.logicalBlockNum
-
773 (elen
>> sb
->s_blocksize_bits
);
775 nspread
= eloc
.logicalBlockNum
- goal
;
778 if (nspread
< spread
) {
780 if (goal_epos
.bh
!= epos
.bh
) {
781 brelse(goal_epos
.bh
);
782 goal_epos
.bh
= epos
.bh
;
783 get_bh(goal_epos
.bh
);
785 goal_epos
.block
= epos
.block
;
786 goal_epos
.offset
= epos
.offset
- adsize
;
788 goal_elen
= (etype
<< 30) | elen
;
794 if (spread
== 0xFFFFFFFF) {
795 brelse(goal_epos
.bh
);
796 mutex_unlock(&sbi
->s_alloc_mutex
);
800 /* Only allocate blocks from the beginning of the extent.
801 That way, we only delete (empty) extents, never have to insert an
802 extent because of splitting */
803 /* This works, but very poorly.... */
805 newblock
= goal_eloc
.logicalBlockNum
;
806 goal_eloc
.logicalBlockNum
++;
807 goal_elen
-= sb
->s_blocksize
;
809 if (inode
&& vfs_dq_alloc_block(inode
, 1)) {
810 brelse(goal_epos
.bh
);
811 mutex_unlock(&sbi
->s_alloc_mutex
);
817 udf_write_aext(table
, &goal_epos
, goal_eloc
, goal_elen
, 1);
819 udf_delete_aext(table
, goal_epos
, goal_eloc
, goal_elen
);
820 brelse(goal_epos
.bh
);
822 if (udf_add_free_space(sbi
, partition
, -1))
823 mark_buffer_dirty(sbi
->s_lvid_bh
);
826 mutex_unlock(&sbi
->s_alloc_mutex
);
831 inline void udf_free_blocks(struct super_block
*sb
,
833 kernel_lb_addr bloc
, uint32_t offset
,
836 uint16_t partition
= bloc
.partitionReferenceNum
;
837 struct udf_part_map
*map
= &UDF_SB(sb
)->s_partmaps
[partition
];
839 if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_BITMAP
) {
840 return udf_bitmap_free_blocks(sb
, inode
,
841 map
->s_uspace
.s_bitmap
,
842 bloc
, offset
, count
);
843 } else if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_TABLE
) {
844 return udf_table_free_blocks(sb
, inode
,
845 map
->s_uspace
.s_table
,
846 bloc
, offset
, count
);
847 } else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_BITMAP
) {
848 return udf_bitmap_free_blocks(sb
, inode
,
849 map
->s_fspace
.s_bitmap
,
850 bloc
, offset
, count
);
851 } else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_TABLE
) {
852 return udf_table_free_blocks(sb
, inode
,
853 map
->s_fspace
.s_table
,
854 bloc
, offset
, count
);
860 inline int udf_prealloc_blocks(struct super_block
*sb
,
862 uint16_t partition
, uint32_t first_block
,
863 uint32_t block_count
)
865 struct udf_part_map
*map
= &UDF_SB(sb
)->s_partmaps
[partition
];
867 if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_BITMAP
)
868 return udf_bitmap_prealloc_blocks(sb
, inode
,
869 map
->s_uspace
.s_bitmap
,
870 partition
, first_block
,
872 else if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_TABLE
)
873 return udf_table_prealloc_blocks(sb
, inode
,
874 map
->s_uspace
.s_table
,
875 partition
, first_block
,
877 else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_BITMAP
)
878 return udf_bitmap_prealloc_blocks(sb
, inode
,
879 map
->s_fspace
.s_bitmap
,
880 partition
, first_block
,
882 else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_TABLE
)
883 return udf_table_prealloc_blocks(sb
, inode
,
884 map
->s_fspace
.s_table
,
885 partition
, first_block
,
891 inline int udf_new_block(struct super_block
*sb
,
893 uint16_t partition
, uint32_t goal
, int *err
)
895 struct udf_part_map
*map
= &UDF_SB(sb
)->s_partmaps
[partition
];
897 if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_BITMAP
)
898 return udf_bitmap_new_block(sb
, inode
,
899 map
->s_uspace
.s_bitmap
,
900 partition
, goal
, err
);
901 else if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_TABLE
)
902 return udf_table_new_block(sb
, inode
,
903 map
->s_uspace
.s_table
,
904 partition
, goal
, err
);
905 else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_BITMAP
)
906 return udf_bitmap_new_block(sb
, inode
,
907 map
->s_fspace
.s_bitmap
,
908 partition
, goal
, err
);
909 else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_TABLE
)
910 return udf_table_new_block(sb
, inode
,
911 map
->s_fspace
.s_table
,
912 partition
, goal
, err
);