5 * Block allocation handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1999-2001 Ben Fennema
14 * (C) 1999 Stelias Computing Inc
18 * 02/24/99 blf Created.
24 #include <linux/quotaops.h>
25 #include <linux/buffer_head.h>
26 #include <linux/bitops.h>
31 #define udf_clear_bit(nr, addr) ext2_clear_bit(nr, addr)
32 #define udf_set_bit(nr, addr) ext2_set_bit(nr, addr)
33 #define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
34 #define udf_find_first_one_bit(addr, size) find_first_one_bit(addr, size)
35 #define udf_find_next_one_bit(addr, size, offset) \
36 find_next_one_bit(addr, size, offset)
38 #define leBPL_to_cpup(x) leNUM_to_cpup(BITS_PER_LONG, x)
39 #define leNUM_to_cpup(x, y) xleNUM_to_cpup(x, y)
40 #define xleNUM_to_cpup(x, y) (le ## x ## _to_cpup(y))
41 #define uintBPL_t uint(BITS_PER_LONG)
42 #define uint(x) xuint(x)
43 #define xuint(x) __le ## x
45 static inline int find_next_one_bit(void *addr
, int size
, int offset
)
47 uintBPL_t
*p
= ((uintBPL_t
*) addr
) + (offset
/ BITS_PER_LONG
);
48 int result
= offset
& ~(BITS_PER_LONG
- 1);
54 offset
&= (BITS_PER_LONG
- 1);
56 tmp
= leBPL_to_cpup(p
++);
57 tmp
&= ~0UL << offset
;
58 if (size
< BITS_PER_LONG
)
62 size
-= BITS_PER_LONG
;
63 result
+= BITS_PER_LONG
;
65 while (size
& ~(BITS_PER_LONG
- 1)) {
66 tmp
= leBPL_to_cpup(p
++);
69 result
+= BITS_PER_LONG
;
70 size
-= BITS_PER_LONG
;
74 tmp
= leBPL_to_cpup(p
);
76 tmp
&= ~0UL >> (BITS_PER_LONG
- size
);
78 return result
+ ffz(~tmp
);
81 #define find_first_one_bit(addr, size)\
82 find_next_one_bit((addr), (size), 0)
84 static int read_block_bitmap(struct super_block
*sb
,
85 struct udf_bitmap
*bitmap
, unsigned int block
,
86 unsigned long bitmap_nr
)
88 struct buffer_head
*bh
= NULL
;
92 loc
.logicalBlockNum
= bitmap
->s_extPosition
;
93 loc
.partitionReferenceNum
= UDF_SB(sb
)->s_partition
;
95 bh
= udf_tread(sb
, udf_get_lb_pblock(sb
, loc
, block
));
99 bitmap
->s_block_bitmap
[bitmap_nr
] = bh
;
103 static int __load_block_bitmap(struct super_block
*sb
,
104 struct udf_bitmap
*bitmap
,
105 unsigned int block_group
)
108 int nr_groups
= bitmap
->s_nr_groups
;
110 if (block_group
>= nr_groups
) {
111 udf_debug("block_group (%d) > nr_groups (%d)\n", block_group
,
115 if (bitmap
->s_block_bitmap
[block_group
]) {
118 retval
= read_block_bitmap(sb
, bitmap
, block_group
,
126 static inline int load_block_bitmap(struct super_block
*sb
,
127 struct udf_bitmap
*bitmap
,
128 unsigned int block_group
)
132 slot
= __load_block_bitmap(sb
, bitmap
, block_group
);
137 if (!bitmap
->s_block_bitmap
[slot
])
143 static bool udf_add_free_space(struct udf_sb_info
*sbi
,
144 u16 partition
, u32 cnt
)
146 struct logicalVolIntegrityDesc
*lvid
;
148 if (sbi
->s_lvid_bh
== NULL
)
151 lvid
= (struct logicalVolIntegrityDesc
*)sbi
->s_lvid_bh
->b_data
;
152 lvid
->freeSpaceTable
[partition
] = cpu_to_le32(le32_to_cpu(
153 lvid
->freeSpaceTable
[partition
]) + cnt
);
157 static void udf_bitmap_free_blocks(struct super_block
*sb
,
159 struct udf_bitmap
*bitmap
,
160 kernel_lb_addr bloc
, uint32_t offset
,
163 struct udf_sb_info
*sbi
= UDF_SB(sb
);
164 struct buffer_head
*bh
= NULL
;
166 unsigned long block_group
;
170 unsigned long overflow
;
172 mutex_lock(&sbi
->s_alloc_mutex
);
173 if (bloc
.logicalBlockNum
< 0 ||
174 (bloc
.logicalBlockNum
+ count
) >
175 sbi
->s_partmaps
[bloc
.partitionReferenceNum
].s_partition_len
) {
176 udf_debug("%d < %d || %d + %d > %d\n",
177 bloc
.logicalBlockNum
, 0, bloc
.logicalBlockNum
, count
,
178 sbi
->s_partmaps
[bloc
.partitionReferenceNum
].
183 block
= bloc
.logicalBlockNum
+ offset
+
184 (sizeof(struct spaceBitmapDesc
) << 3);
188 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
189 bit
= block
% (sb
->s_blocksize
<< 3);
192 * Check to see if we are freeing blocks across a group boundary.
194 if (bit
+ count
> (sb
->s_blocksize
<< 3)) {
195 overflow
= bit
+ count
- (sb
->s_blocksize
<< 3);
198 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
202 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
203 for (i
= 0; i
< count
; i
++) {
204 if (udf_set_bit(bit
+ i
, bh
->b_data
)) {
205 udf_debug("bit %ld already set\n", bit
+ i
);
206 udf_debug("byte=%2x\n",
207 ((char *)bh
->b_data
)[(bit
+ i
) >> 3]);
210 DQUOT_FREE_BLOCK(inode
, 1);
211 udf_add_free_space(sbi
, sbi
->s_partition
, 1);
214 mark_buffer_dirty(bh
);
224 mark_buffer_dirty(sbi
->s_lvid_bh
);
225 mutex_unlock(&sbi
->s_alloc_mutex
);
228 static int udf_bitmap_prealloc_blocks(struct super_block
*sb
,
230 struct udf_bitmap
*bitmap
,
231 uint16_t partition
, uint32_t first_block
,
232 uint32_t block_count
)
234 struct udf_sb_info
*sbi
= UDF_SB(sb
);
236 int bit
, block
, block_group
, group_start
;
237 int nr_groups
, bitmap_nr
;
238 struct buffer_head
*bh
;
241 mutex_lock(&sbi
->s_alloc_mutex
);
242 part_len
= sbi
->s_partmaps
[partition
].s_partition_len
;
243 if (first_block
< 0 || first_block
>= part_len
)
246 if (first_block
+ block_count
> part_len
)
247 block_count
= part_len
- first_block
;
250 nr_groups
= udf_compute_nr_groups(sb
, partition
);
251 block
= first_block
+ (sizeof(struct spaceBitmapDesc
) << 3);
252 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
253 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
255 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
258 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
260 bit
= block
% (sb
->s_blocksize
<< 3);
262 while (bit
< (sb
->s_blocksize
<< 3) && block_count
> 0) {
263 if (!udf_test_bit(bit
, bh
->b_data
))
265 else if (DQUOT_PREALLOC_BLOCK(inode
, 1))
267 else if (!udf_clear_bit(bit
, bh
->b_data
)) {
268 udf_debug("bit already cleared for block %d\n", bit
);
269 DQUOT_FREE_BLOCK(inode
, 1);
277 mark_buffer_dirty(bh
);
278 } while (block_count
> 0);
281 if (udf_add_free_space(sbi
, partition
, -alloc_count
))
282 mark_buffer_dirty(sbi
->s_lvid_bh
);
284 mutex_unlock(&sbi
->s_alloc_mutex
);
288 static int udf_bitmap_new_block(struct super_block
*sb
,
290 struct udf_bitmap
*bitmap
, uint16_t partition
,
291 uint32_t goal
, int *err
)
293 struct udf_sb_info
*sbi
= UDF_SB(sb
);
294 int newbit
, bit
= 0, block
, block_group
, group_start
;
295 int end_goal
, nr_groups
, bitmap_nr
, i
;
296 struct buffer_head
*bh
= NULL
;
301 mutex_lock(&sbi
->s_alloc_mutex
);
304 if (goal
< 0 || goal
>= sbi
->s_partmaps
[partition
].s_partition_len
)
307 nr_groups
= bitmap
->s_nr_groups
;
308 block
= goal
+ (sizeof(struct spaceBitmapDesc
) << 3);
309 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
310 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
312 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
315 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
316 ptr
= memscan((char *)bh
->b_data
+ group_start
, 0xFF,
317 sb
->s_blocksize
- group_start
);
319 if ((ptr
- ((char *)bh
->b_data
)) < sb
->s_blocksize
) {
320 bit
= block
% (sb
->s_blocksize
<< 3);
321 if (udf_test_bit(bit
, bh
->b_data
))
324 end_goal
= (bit
+ 63) & ~63;
325 bit
= udf_find_next_one_bit(bh
->b_data
, end_goal
, bit
);
329 ptr
= memscan((char *)bh
->b_data
+ (bit
>> 3), 0xFF,
330 sb
->s_blocksize
- ((bit
+ 7) >> 3));
331 newbit
= (ptr
- ((char *)bh
->b_data
)) << 3;
332 if (newbit
< sb
->s_blocksize
<< 3) {
337 newbit
= udf_find_next_one_bit(bh
->b_data
,
338 sb
->s_blocksize
<< 3, bit
);
339 if (newbit
< sb
->s_blocksize
<< 3) {
345 for (i
= 0; i
< (nr_groups
* 2); i
++) {
347 if (block_group
>= nr_groups
)
349 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
351 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
354 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
356 ptr
= memscan((char *)bh
->b_data
+ group_start
, 0xFF,
357 sb
->s_blocksize
- group_start
);
358 if ((ptr
- ((char *)bh
->b_data
)) < sb
->s_blocksize
) {
359 bit
= (ptr
- ((char *)bh
->b_data
)) << 3;
363 bit
= udf_find_next_one_bit((char *)bh
->b_data
,
364 sb
->s_blocksize
<< 3,
366 if (bit
< sb
->s_blocksize
<< 3)
370 if (i
>= (nr_groups
* 2)) {
371 mutex_unlock(&sbi
->s_alloc_mutex
);
374 if (bit
< sb
->s_blocksize
<< 3)
377 bit
= udf_find_next_one_bit(bh
->b_data
, sb
->s_blocksize
<< 3,
379 if (bit
>= sb
->s_blocksize
<< 3) {
380 mutex_unlock(&sbi
->s_alloc_mutex
);
386 while (i
< 7 && bit
> (group_start
<< 3) &&
387 udf_test_bit(bit
- 1, bh
->b_data
)) {
395 * Check quota for allocation of this block.
397 if (inode
&& DQUOT_ALLOC_BLOCK(inode
, 1)) {
398 mutex_unlock(&sbi
->s_alloc_mutex
);
403 newblock
= bit
+ (block_group
<< (sb
->s_blocksize_bits
+ 3)) -
404 (sizeof(struct spaceBitmapDesc
) << 3);
406 if (!udf_clear_bit(bit
, bh
->b_data
)) {
407 udf_debug("bit already cleared for block %d\n", bit
);
411 mark_buffer_dirty(bh
);
413 if (udf_add_free_space(sbi
, partition
, -1))
414 mark_buffer_dirty(sbi
->s_lvid_bh
);
416 mutex_unlock(&sbi
->s_alloc_mutex
);
422 mutex_unlock(&sbi
->s_alloc_mutex
);
426 static void udf_table_free_blocks(struct super_block
*sb
,
429 kernel_lb_addr bloc
, uint32_t offset
,
432 struct udf_sb_info
*sbi
= UDF_SB(sb
);
436 struct extent_position oepos
, epos
;
439 struct udf_inode_info
*iinfo
;
441 mutex_lock(&sbi
->s_alloc_mutex
);
442 if (bloc
.logicalBlockNum
< 0 ||
443 (bloc
.logicalBlockNum
+ count
) >
444 sbi
->s_partmaps
[bloc
.partitionReferenceNum
].s_partition_len
) {
445 udf_debug("%d < %d || %d + %d > %d\n",
446 bloc
.logicalBlockNum
, 0, bloc
.logicalBlockNum
, count
,
447 sbi
->s_partmaps
[bloc
.partitionReferenceNum
].
452 iinfo
= UDF_I(table
);
453 /* We do this up front - There are some error conditions that
454 could occure, but.. oh well */
456 DQUOT_FREE_BLOCK(inode
, count
);
457 if (udf_add_free_space(sbi
, sbi
->s_partition
, count
))
458 mark_buffer_dirty(sbi
->s_lvid_bh
);
460 start
= bloc
.logicalBlockNum
+ offset
;
461 end
= bloc
.logicalBlockNum
+ offset
+ count
- 1;
463 epos
.offset
= oepos
.offset
= sizeof(struct unallocSpaceEntry
);
465 epos
.block
= oepos
.block
= iinfo
->i_location
;
466 epos
.bh
= oepos
.bh
= NULL
;
469 (etype
= udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1) {
470 if (((eloc
.logicalBlockNum
+
471 (elen
>> sb
->s_blocksize_bits
)) == start
)) {
472 if ((0x3FFFFFFF - elen
) <
473 (count
<< sb
->s_blocksize_bits
)) {
474 uint32_t tmp
= ((0x3FFFFFFF - elen
) >>
475 sb
->s_blocksize_bits
);
478 elen
= (etype
<< 30) |
479 (0x40000000 - sb
->s_blocksize
);
481 elen
= (etype
<< 30) |
483 (count
<< sb
->s_blocksize_bits
));
487 udf_write_aext(table
, &oepos
, eloc
, elen
, 1);
488 } else if (eloc
.logicalBlockNum
== (end
+ 1)) {
489 if ((0x3FFFFFFF - elen
) <
490 (count
<< sb
->s_blocksize_bits
)) {
491 uint32_t tmp
= ((0x3FFFFFFF - elen
) >>
492 sb
->s_blocksize_bits
);
495 eloc
.logicalBlockNum
-= tmp
;
496 elen
= (etype
<< 30) |
497 (0x40000000 - sb
->s_blocksize
);
499 eloc
.logicalBlockNum
= start
;
500 elen
= (etype
<< 30) |
502 (count
<< sb
->s_blocksize_bits
));
506 udf_write_aext(table
, &oepos
, eloc
, elen
, 1);
509 if (epos
.bh
!= oepos
.bh
) {
511 oepos
.block
= epos
.block
;
517 oepos
.offset
= epos
.offset
;
523 * NOTE: we CANNOT use udf_add_aext here, as it can try to
524 * allocate a new block, and since we hold the super block
525 * lock already very bad things would happen :)
527 * We copy the behavior of udf_add_aext, but instead of
528 * trying to allocate a new block close to the existing one,
529 * we just steal a block from the extent we are trying to add.
531 * It would be nice if the blocks were close together, but it
536 short_ad
*sad
= NULL
;
538 struct allocExtDesc
*aed
;
540 eloc
.logicalBlockNum
= start
;
541 elen
= EXT_RECORDED_ALLOCATED
|
542 (count
<< sb
->s_blocksize_bits
);
544 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
545 adsize
= sizeof(short_ad
);
546 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
547 adsize
= sizeof(long_ad
);
554 if (epos
.offset
+ (2 * adsize
) > sb
->s_blocksize
) {
561 /* Steal a block from the extent being free'd */
562 epos
.block
.logicalBlockNum
= eloc
.logicalBlockNum
;
563 eloc
.logicalBlockNum
++;
564 elen
-= sb
->s_blocksize
;
566 epos
.bh
= udf_tread(sb
,
567 udf_get_lb_pblock(sb
, epos
.block
, 0));
572 aed
= (struct allocExtDesc
*)(epos
.bh
->b_data
);
573 aed
->previousAllocExtLocation
=
574 cpu_to_le32(oepos
.block
.logicalBlockNum
);
575 if (epos
.offset
+ adsize
> sb
->s_blocksize
) {
576 loffset
= epos
.offset
;
577 aed
->lengthAllocDescs
= cpu_to_le32(adsize
);
578 sptr
= iinfo
->i_ext
.i_data
+ epos
.offset
580 dptr
= epos
.bh
->b_data
+
581 sizeof(struct allocExtDesc
);
582 memcpy(dptr
, sptr
, adsize
);
583 epos
.offset
= sizeof(struct allocExtDesc
) +
586 loffset
= epos
.offset
+ adsize
;
587 aed
->lengthAllocDescs
= cpu_to_le32(0);
589 sptr
= oepos
.bh
->b_data
+ epos
.offset
;
590 aed
= (struct allocExtDesc
*)
592 aed
->lengthAllocDescs
=
593 cpu_to_le32(le32_to_cpu(
594 aed
->lengthAllocDescs
) +
597 sptr
= iinfo
->i_ext
.i_data
+
599 iinfo
->i_lenAlloc
+= adsize
;
600 mark_inode_dirty(table
);
602 epos
.offset
= sizeof(struct allocExtDesc
);
604 if (sbi
->s_udfrev
>= 0x0200)
605 udf_new_tag(epos
.bh
->b_data
, TAG_IDENT_AED
,
606 3, 1, epos
.block
.logicalBlockNum
,
609 udf_new_tag(epos
.bh
->b_data
, TAG_IDENT_AED
,
610 2, 1, epos
.block
.logicalBlockNum
,
613 switch (iinfo
->i_alloc_type
) {
614 case ICBTAG_FLAG_AD_SHORT
:
615 sad
= (short_ad
*)sptr
;
616 sad
->extLength
= cpu_to_le32(
617 EXT_NEXT_EXTENT_ALLOCDECS
|
620 cpu_to_le32(epos
.block
.logicalBlockNum
);
622 case ICBTAG_FLAG_AD_LONG
:
623 lad
= (long_ad
*)sptr
;
624 lad
->extLength
= cpu_to_le32(
625 EXT_NEXT_EXTENT_ALLOCDECS
|
628 cpu_to_lelb(epos
.block
);
632 udf_update_tag(oepos
.bh
->b_data
, loffset
);
633 mark_buffer_dirty(oepos
.bh
);
635 mark_inode_dirty(table
);
639 /* It's possible that stealing the block emptied the extent */
641 udf_write_aext(table
, &epos
, eloc
, elen
, 1);
644 iinfo
->i_lenAlloc
+= adsize
;
645 mark_inode_dirty(table
);
647 aed
= (struct allocExtDesc
*)epos
.bh
->b_data
;
648 aed
->lengthAllocDescs
=
649 cpu_to_le32(le32_to_cpu(
650 aed
->lengthAllocDescs
) + adsize
);
651 udf_update_tag(epos
.bh
->b_data
, epos
.offset
);
652 mark_buffer_dirty(epos
.bh
);
662 mutex_unlock(&sbi
->s_alloc_mutex
);
666 static int udf_table_prealloc_blocks(struct super_block
*sb
,
668 struct inode
*table
, uint16_t partition
,
669 uint32_t first_block
, uint32_t block_count
)
671 struct udf_sb_info
*sbi
= UDF_SB(sb
);
673 uint32_t elen
, adsize
;
675 struct extent_position epos
;
677 struct udf_inode_info
*iinfo
;
679 if (first_block
< 0 ||
680 first_block
>= sbi
->s_partmaps
[partition
].s_partition_len
)
683 iinfo
= UDF_I(table
);
684 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
685 adsize
= sizeof(short_ad
);
686 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
687 adsize
= sizeof(long_ad
);
691 mutex_lock(&sbi
->s_alloc_mutex
);
692 epos
.offset
= sizeof(struct unallocSpaceEntry
);
693 epos
.block
= iinfo
->i_location
;
695 eloc
.logicalBlockNum
= 0xFFFFFFFF;
697 while (first_block
!= eloc
.logicalBlockNum
&&
698 (etype
= udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1) {
699 udf_debug("eloc=%d, elen=%d, first_block=%d\n",
700 eloc
.logicalBlockNum
, elen
, first_block
);
701 ; /* empty loop body */
704 if (first_block
== eloc
.logicalBlockNum
) {
705 epos
.offset
-= adsize
;
707 alloc_count
= (elen
>> sb
->s_blocksize_bits
);
708 if (inode
&& DQUOT_PREALLOC_BLOCK(inode
,
709 alloc_count
> block_count
? block_count
: alloc_count
))
711 else if (alloc_count
> block_count
) {
712 alloc_count
= block_count
;
713 eloc
.logicalBlockNum
+= alloc_count
;
714 elen
-= (alloc_count
<< sb
->s_blocksize_bits
);
715 udf_write_aext(table
, &epos
, eloc
,
716 (etype
<< 30) | elen
, 1);
718 udf_delete_aext(table
, epos
, eloc
,
719 (etype
<< 30) | elen
);
726 if (alloc_count
&& udf_add_free_space(sbi
, partition
, -alloc_count
)) {
727 mark_buffer_dirty(sbi
->s_lvid_bh
);
730 mutex_unlock(&sbi
->s_alloc_mutex
);
734 static int udf_table_new_block(struct super_block
*sb
,
736 struct inode
*table
, uint16_t partition
,
737 uint32_t goal
, int *err
)
739 struct udf_sb_info
*sbi
= UDF_SB(sb
);
740 uint32_t spread
= 0xFFFFFFFF, nspread
= 0xFFFFFFFF;
741 uint32_t newblock
= 0, adsize
;
742 uint32_t elen
, goal_elen
= 0;
743 kernel_lb_addr eloc
, uninitialized_var(goal_eloc
);
744 struct extent_position epos
, goal_epos
;
746 struct udf_inode_info
*iinfo
= UDF_I(table
);
750 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
751 adsize
= sizeof(short_ad
);
752 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
753 adsize
= sizeof(long_ad
);
757 mutex_lock(&sbi
->s_alloc_mutex
);
758 if (goal
< 0 || goal
>= sbi
->s_partmaps
[partition
].s_partition_len
)
761 /* We search for the closest matching block to goal. If we find
762 a exact hit, we stop. Otherwise we keep going till we run out
763 of extents. We store the buffer_head, bloc, and extoffset
764 of the current closest match and use that when we are done.
766 epos
.offset
= sizeof(struct unallocSpaceEntry
);
767 epos
.block
= iinfo
->i_location
;
768 epos
.bh
= goal_epos
.bh
= NULL
;
771 (etype
= udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1) {
772 if (goal
>= eloc
.logicalBlockNum
) {
773 if (goal
< eloc
.logicalBlockNum
+
774 (elen
>> sb
->s_blocksize_bits
))
777 nspread
= goal
- eloc
.logicalBlockNum
-
778 (elen
>> sb
->s_blocksize_bits
);
780 nspread
= eloc
.logicalBlockNum
- goal
;
783 if (nspread
< spread
) {
785 if (goal_epos
.bh
!= epos
.bh
) {
786 brelse(goal_epos
.bh
);
787 goal_epos
.bh
= epos
.bh
;
788 get_bh(goal_epos
.bh
);
790 goal_epos
.block
= epos
.block
;
791 goal_epos
.offset
= epos
.offset
- adsize
;
793 goal_elen
= (etype
<< 30) | elen
;
799 if (spread
== 0xFFFFFFFF) {
800 brelse(goal_epos
.bh
);
801 mutex_unlock(&sbi
->s_alloc_mutex
);
805 /* Only allocate blocks from the beginning of the extent.
806 That way, we only delete (empty) extents, never have to insert an
807 extent because of splitting */
808 /* This works, but very poorly.... */
810 newblock
= goal_eloc
.logicalBlockNum
;
811 goal_eloc
.logicalBlockNum
++;
812 goal_elen
-= sb
->s_blocksize
;
814 if (inode
&& DQUOT_ALLOC_BLOCK(inode
, 1)) {
815 brelse(goal_epos
.bh
);
816 mutex_unlock(&sbi
->s_alloc_mutex
);
822 udf_write_aext(table
, &goal_epos
, goal_eloc
, goal_elen
, 1);
824 udf_delete_aext(table
, goal_epos
, goal_eloc
, goal_elen
);
825 brelse(goal_epos
.bh
);
827 if (udf_add_free_space(sbi
, partition
, -1))
828 mark_buffer_dirty(sbi
->s_lvid_bh
);
831 mutex_unlock(&sbi
->s_alloc_mutex
);
836 inline void udf_free_blocks(struct super_block
*sb
,
838 kernel_lb_addr bloc
, uint32_t offset
,
841 uint16_t partition
= bloc
.partitionReferenceNum
;
842 struct udf_part_map
*map
= &UDF_SB(sb
)->s_partmaps
[partition
];
844 if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_BITMAP
) {
845 return udf_bitmap_free_blocks(sb
, inode
,
846 map
->s_uspace
.s_bitmap
,
847 bloc
, offset
, count
);
848 } else if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_TABLE
) {
849 return udf_table_free_blocks(sb
, inode
,
850 map
->s_uspace
.s_table
,
851 bloc
, offset
, count
);
852 } else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_BITMAP
) {
853 return udf_bitmap_free_blocks(sb
, inode
,
854 map
->s_fspace
.s_bitmap
,
855 bloc
, offset
, count
);
856 } else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_TABLE
) {
857 return udf_table_free_blocks(sb
, inode
,
858 map
->s_fspace
.s_table
,
859 bloc
, offset
, count
);
865 inline int udf_prealloc_blocks(struct super_block
*sb
,
867 uint16_t partition
, uint32_t first_block
,
868 uint32_t block_count
)
870 struct udf_part_map
*map
= &UDF_SB(sb
)->s_partmaps
[partition
];
872 if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_BITMAP
)
873 return udf_bitmap_prealloc_blocks(sb
, inode
,
874 map
->s_uspace
.s_bitmap
,
875 partition
, first_block
,
877 else if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_TABLE
)
878 return udf_table_prealloc_blocks(sb
, inode
,
879 map
->s_uspace
.s_table
,
880 partition
, first_block
,
882 else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_BITMAP
)
883 return udf_bitmap_prealloc_blocks(sb
, inode
,
884 map
->s_fspace
.s_bitmap
,
885 partition
, first_block
,
887 else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_TABLE
)
888 return udf_table_prealloc_blocks(sb
, inode
,
889 map
->s_fspace
.s_table
,
890 partition
, first_block
,
896 inline int udf_new_block(struct super_block
*sb
,
898 uint16_t partition
, uint32_t goal
, int *err
)
900 struct udf_part_map
*map
= &UDF_SB(sb
)->s_partmaps
[partition
];
902 if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_BITMAP
)
903 return udf_bitmap_new_block(sb
, inode
,
904 map
->s_uspace
.s_bitmap
,
905 partition
, goal
, err
);
906 else if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_TABLE
)
907 return udf_table_new_block(sb
, inode
,
908 map
->s_uspace
.s_table
,
909 partition
, goal
, err
);
910 else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_BITMAP
)
911 return udf_bitmap_new_block(sb
, inode
,
912 map
->s_fspace
.s_bitmap
,
913 partition
, goal
, err
);
914 else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_TABLE
)
915 return udf_table_new_block(sb
, inode
,
916 map
->s_fspace
.s_table
,
917 partition
, goal
, err
);