5 * Block allocation handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1999-2001 Ben Fennema
14 * (C) 1999 Stelias Computing Inc
18 * 02/24/99 blf Created.
24 #include <linux/quotaops.h>
25 #include <linux/buffer_head.h>
26 #include <linux/bitops.h>
31 #define udf_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
32 #define udf_set_bit(nr,addr) ext2_set_bit(nr,addr)
33 #define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
34 #define udf_find_first_one_bit(addr, size) find_first_one_bit(addr, size)
35 #define udf_find_next_one_bit(addr, size, offset) find_next_one_bit(addr, size, offset)
37 #define leBPL_to_cpup(x) leNUM_to_cpup(BITS_PER_LONG, x)
38 #define leNUM_to_cpup(x,y) xleNUM_to_cpup(x,y)
39 #define xleNUM_to_cpup(x,y) (le ## x ## _to_cpup(y))
40 #define uintBPL_t uint(BITS_PER_LONG)
41 #define uint(x) xuint(x)
42 #define xuint(x) __le ## x
44 static inline int find_next_one_bit(void *addr
, int size
, int offset
)
46 uintBPL_t
*p
= ((uintBPL_t
*) addr
) + (offset
/ BITS_PER_LONG
);
47 int result
= offset
& ~(BITS_PER_LONG
- 1);
53 offset
&= (BITS_PER_LONG
- 1);
55 tmp
= leBPL_to_cpup(p
++);
56 tmp
&= ~0UL << offset
;
57 if (size
< BITS_PER_LONG
)
61 size
-= BITS_PER_LONG
;
62 result
+= BITS_PER_LONG
;
64 while (size
& ~(BITS_PER_LONG
- 1)) {
65 if ((tmp
= leBPL_to_cpup(p
++)))
67 result
+= BITS_PER_LONG
;
68 size
-= BITS_PER_LONG
;
72 tmp
= leBPL_to_cpup(p
);
74 tmp
&= ~0UL >> (BITS_PER_LONG
- size
);
76 return result
+ ffz(~tmp
);
79 #define find_first_one_bit(addr, size)\
80 find_next_one_bit((addr), (size), 0)
82 static int read_block_bitmap(struct super_block
*sb
,
83 struct udf_bitmap
*bitmap
, unsigned int block
,
84 unsigned long bitmap_nr
)
86 struct buffer_head
*bh
= NULL
;
90 loc
.logicalBlockNum
= bitmap
->s_extPosition
;
91 loc
.partitionReferenceNum
= UDF_SB_PARTITION(sb
);
93 bh
= udf_tread(sb
, udf_get_lb_pblock(sb
, loc
, block
));
97 bitmap
->s_block_bitmap
[bitmap_nr
] = bh
;
101 static int __load_block_bitmap(struct super_block
*sb
,
102 struct udf_bitmap
*bitmap
,
103 unsigned int block_group
)
106 int nr_groups
= bitmap
->s_nr_groups
;
108 if (block_group
>= nr_groups
) {
109 udf_debug("block_group (%d) > nr_groups (%d)\n", block_group
,
113 if (bitmap
->s_block_bitmap
[block_group
])
117 read_block_bitmap(sb
, bitmap
, block_group
, block_group
);
124 static inline int load_block_bitmap(struct super_block
*sb
,
125 struct udf_bitmap
*bitmap
,
126 unsigned int block_group
)
130 slot
= __load_block_bitmap(sb
, bitmap
, block_group
);
135 if (!bitmap
->s_block_bitmap
[slot
])
141 static void udf_bitmap_free_blocks(struct super_block
*sb
,
143 struct udf_bitmap
*bitmap
,
144 kernel_lb_addr bloc
, uint32_t offset
,
147 struct udf_sb_info
*sbi
= UDF_SB(sb
);
148 struct buffer_head
*bh
= NULL
;
150 unsigned long block_group
;
154 unsigned long overflow
;
156 mutex_lock(&sbi
->s_alloc_mutex
);
157 if (bloc
.logicalBlockNum
< 0 ||
158 (bloc
.logicalBlockNum
+ count
) > UDF_SB_PARTLEN(sb
,
160 partitionReferenceNum
))
162 udf_debug("%d < %d || %d + %d > %d\n", bloc
.logicalBlockNum
, 0,
163 bloc
.logicalBlockNum
, count
, UDF_SB_PARTLEN(sb
,
165 partitionReferenceNum
));
170 bloc
.logicalBlockNum
+ offset
+
171 (sizeof(struct spaceBitmapDesc
) << 3);
175 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
176 bit
= block
% (sb
->s_blocksize
<< 3);
179 * Check to see if we are freeing blocks across a group boundary.
181 if (bit
+ count
> (sb
->s_blocksize
<< 3)) {
182 overflow
= bit
+ count
- (sb
->s_blocksize
<< 3);
185 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
189 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
190 for (i
= 0; i
< count
; i
++) {
191 if (udf_set_bit(bit
+ i
, bh
->b_data
)) {
192 udf_debug("bit %ld already set\n", bit
+ i
);
193 udf_debug("byte=%2x\n",
194 ((char *)bh
->b_data
)[(bit
+ i
) >> 3]);
197 DQUOT_FREE_BLOCK(inode
, 1);
198 if (UDF_SB_LVIDBH(sb
)) {
200 freeSpaceTable
[UDF_SB_PARTITION(sb
)] =
201 cpu_to_le32(le32_to_cpu
203 freeSpaceTable
[UDF_SB_PARTITION
208 mark_buffer_dirty(bh
);
216 if (UDF_SB_LVIDBH(sb
))
217 mark_buffer_dirty(UDF_SB_LVIDBH(sb
));
218 mutex_unlock(&sbi
->s_alloc_mutex
);
222 static int udf_bitmap_prealloc_blocks(struct super_block
*sb
,
224 struct udf_bitmap
*bitmap
,
225 uint16_t partition
, uint32_t first_block
,
226 uint32_t block_count
)
228 struct udf_sb_info
*sbi
= UDF_SB(sb
);
230 int bit
, block
, block_group
, group_start
;
231 int nr_groups
, bitmap_nr
;
232 struct buffer_head
*bh
;
234 mutex_lock(&sbi
->s_alloc_mutex
);
235 if (first_block
< 0 || first_block
>= UDF_SB_PARTLEN(sb
, partition
))
238 if (first_block
+ block_count
> UDF_SB_PARTLEN(sb
, partition
))
239 block_count
= UDF_SB_PARTLEN(sb
, partition
) - first_block
;
242 nr_groups
= (UDF_SB_PARTLEN(sb
, partition
) +
243 (sizeof(struct spaceBitmapDesc
) << 3) +
244 (sb
->s_blocksize
* 8) - 1) / (sb
->s_blocksize
* 8);
245 block
= first_block
+ (sizeof(struct spaceBitmapDesc
) << 3);
246 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
247 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
249 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
252 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
254 bit
= block
% (sb
->s_blocksize
<< 3);
256 while (bit
< (sb
->s_blocksize
<< 3) && block_count
> 0) {
257 if (!udf_test_bit(bit
, bh
->b_data
))
259 else if (DQUOT_PREALLOC_BLOCK(inode
, 1))
261 else if (!udf_clear_bit(bit
, bh
->b_data
)) {
262 udf_debug("bit already cleared for block %d\n", bit
);
263 DQUOT_FREE_BLOCK(inode
, 1);
271 mark_buffer_dirty(bh
);
275 if (UDF_SB_LVIDBH(sb
)) {
276 UDF_SB_LVID(sb
)->freeSpaceTable
[partition
] =
277 cpu_to_le32(le32_to_cpu
278 (UDF_SB_LVID(sb
)->freeSpaceTable
[partition
]) -
280 mark_buffer_dirty(UDF_SB_LVIDBH(sb
));
283 mutex_unlock(&sbi
->s_alloc_mutex
);
287 static int udf_bitmap_new_block(struct super_block
*sb
,
289 struct udf_bitmap
*bitmap
, uint16_t partition
,
290 uint32_t goal
, int *err
)
292 struct udf_sb_info
*sbi
= UDF_SB(sb
);
293 int newbit
, bit
= 0, block
, block_group
, group_start
;
294 int end_goal
, nr_groups
, bitmap_nr
, i
;
295 struct buffer_head
*bh
= NULL
;
300 mutex_lock(&sbi
->s_alloc_mutex
);
303 if (goal
< 0 || goal
>= UDF_SB_PARTLEN(sb
, partition
))
306 nr_groups
= bitmap
->s_nr_groups
;
307 block
= goal
+ (sizeof(struct spaceBitmapDesc
) << 3);
308 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
309 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
311 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
314 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
316 memscan((char *)bh
->b_data
+ group_start
, 0xFF,
317 sb
->s_blocksize
- group_start
);
319 if ((ptr
- ((char *)bh
->b_data
)) < sb
->s_blocksize
) {
320 bit
= block
% (sb
->s_blocksize
<< 3);
322 if (udf_test_bit(bit
, bh
->b_data
)) {
325 end_goal
= (bit
+ 63) & ~63;
326 bit
= udf_find_next_one_bit(bh
->b_data
, end_goal
, bit
);
330 memscan((char *)bh
->b_data
+ (bit
>> 3), 0xFF,
331 sb
->s_blocksize
- ((bit
+ 7) >> 3));
332 newbit
= (ptr
- ((char *)bh
->b_data
)) << 3;
333 if (newbit
< sb
->s_blocksize
<< 3) {
338 udf_find_next_one_bit(bh
->b_data
, sb
->s_blocksize
<< 3,
340 if (newbit
< sb
->s_blocksize
<< 3) {
346 for (i
= 0; i
< (nr_groups
* 2); i
++) {
348 if (block_group
>= nr_groups
)
350 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
352 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
355 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
358 memscan((char *)bh
->b_data
+ group_start
, 0xFF,
359 sb
->s_blocksize
- group_start
);
360 if ((ptr
- ((char *)bh
->b_data
)) < sb
->s_blocksize
) {
361 bit
= (ptr
- ((char *)bh
->b_data
)) << 3;
366 udf_find_next_one_bit((char *)bh
->b_data
,
367 sb
->s_blocksize
<< 3,
369 if (bit
< sb
->s_blocksize
<< 3)
373 if (i
>= (nr_groups
* 2)) {
374 mutex_unlock(&sbi
->s_alloc_mutex
);
377 if (bit
< sb
->s_blocksize
<< 3)
381 udf_find_next_one_bit(bh
->b_data
, sb
->s_blocksize
<< 3,
383 if (bit
>= sb
->s_blocksize
<< 3) {
384 mutex_unlock(&sbi
->s_alloc_mutex
);
390 i
< 7 && bit
> (group_start
<< 3)
391 && udf_test_bit(bit
- 1, bh
->b_data
); i
++, bit
--) ;
396 * Check quota for allocation of this block.
398 if (inode
&& DQUOT_ALLOC_BLOCK(inode
, 1)) {
399 mutex_unlock(&sbi
->s_alloc_mutex
);
404 newblock
= bit
+ (block_group
<< (sb
->s_blocksize_bits
+ 3)) -
405 (sizeof(struct spaceBitmapDesc
) << 3);
407 if (!udf_clear_bit(bit
, bh
->b_data
)) {
408 udf_debug("bit already cleared for block %d\n", bit
);
412 mark_buffer_dirty(bh
);
414 if (UDF_SB_LVIDBH(sb
)) {
415 UDF_SB_LVID(sb
)->freeSpaceTable
[partition
] =
416 cpu_to_le32(le32_to_cpu
417 (UDF_SB_LVID(sb
)->freeSpaceTable
[partition
]) -
419 mark_buffer_dirty(UDF_SB_LVIDBH(sb
));
422 mutex_unlock(&sbi
->s_alloc_mutex
);
428 mutex_unlock(&sbi
->s_alloc_mutex
);
432 static void udf_table_free_blocks(struct super_block
*sb
,
435 kernel_lb_addr bloc
, uint32_t offset
,
438 struct udf_sb_info
*sbi
= UDF_SB(sb
);
442 struct extent_position oepos
, epos
;
446 mutex_lock(&sbi
->s_alloc_mutex
);
447 if (bloc
.logicalBlockNum
< 0 ||
448 (bloc
.logicalBlockNum
+ count
) > UDF_SB_PARTLEN(sb
,
450 partitionReferenceNum
))
452 udf_debug("%d < %d || %d + %d > %d\n", bloc
.logicalBlockNum
, 0,
453 bloc
.logicalBlockNum
, count
, UDF_SB_PARTLEN(sb
,
455 partitionReferenceNum
));
459 /* We do this up front - There are some error conditions that could occure,
462 DQUOT_FREE_BLOCK(inode
, count
);
463 if (UDF_SB_LVIDBH(sb
)) {
464 UDF_SB_LVID(sb
)->freeSpaceTable
[UDF_SB_PARTITION(sb
)] =
465 cpu_to_le32(le32_to_cpu
467 freeSpaceTable
[UDF_SB_PARTITION(sb
)]) + count
);
468 mark_buffer_dirty(UDF_SB_LVIDBH(sb
));
471 start
= bloc
.logicalBlockNum
+ offset
;
472 end
= bloc
.logicalBlockNum
+ offset
+ count
- 1;
474 epos
.offset
= oepos
.offset
= sizeof(struct unallocSpaceEntry
);
476 epos
.block
= oepos
.block
= UDF_I_LOCATION(table
);
477 epos
.bh
= oepos
.bh
= NULL
;
479 while (count
&& (etype
=
480 udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1) {
481 if (((eloc
.logicalBlockNum
+ (elen
>> sb
->s_blocksize_bits
)) ==
483 if ((0x3FFFFFFF - elen
) <
484 (count
<< sb
->s_blocksize_bits
)) {
487 elen
) >> sb
->s_blocksize_bits
);
490 elen
) >> sb
->s_blocksize_bits
);
492 (etype
<< 30) | (0x40000000 -
495 elen
= (etype
<< 30) |
496 (elen
+ (count
<< sb
->s_blocksize_bits
));
500 udf_write_aext(table
, &oepos
, eloc
, elen
, 1);
501 } else if (eloc
.logicalBlockNum
== (end
+ 1)) {
502 if ((0x3FFFFFFF - elen
) <
503 (count
<< sb
->s_blocksize_bits
)) {
506 elen
) >> sb
->s_blocksize_bits
);
509 elen
) >> sb
->s_blocksize_bits
);
510 eloc
.logicalBlockNum
-=
512 elen
) >> sb
->s_blocksize_bits
);
514 (etype
<< 30) | (0x40000000 -
517 eloc
.logicalBlockNum
= start
;
518 elen
= (etype
<< 30) |
519 (elen
+ (count
<< sb
->s_blocksize_bits
));
523 udf_write_aext(table
, &oepos
, eloc
, elen
, 1);
526 if (epos
.bh
!= oepos
.bh
) {
528 oepos
.block
= epos
.block
;
534 oepos
.offset
= epos
.offset
;
538 /* NOTE: we CANNOT use udf_add_aext here, as it can try to allocate
539 a new block, and since we hold the super block lock already
540 very bad things would happen :)
542 We copy the behavior of udf_add_aext, but instead of
543 trying to allocate a new block close to the existing one,
544 we just steal a block from the extent we are trying to add.
546 It would be nice if the blocks were close together, but it
551 short_ad
*sad
= NULL
;
553 struct allocExtDesc
*aed
;
555 eloc
.logicalBlockNum
= start
;
556 elen
= EXT_RECORDED_ALLOCATED
| (count
<< sb
->s_blocksize_bits
);
558 if (UDF_I_ALLOCTYPE(table
) == ICBTAG_FLAG_AD_SHORT
)
559 adsize
= sizeof(short_ad
);
560 else if (UDF_I_ALLOCTYPE(table
) == ICBTAG_FLAG_AD_LONG
)
561 adsize
= sizeof(long_ad
);
568 if (epos
.offset
+ (2 * adsize
) > sb
->s_blocksize
) {
575 /* Steal a block from the extent being free'd */
576 epos
.block
.logicalBlockNum
= eloc
.logicalBlockNum
;
577 eloc
.logicalBlockNum
++;
578 elen
-= sb
->s_blocksize
;
580 if (!(epos
.bh
= udf_tread(sb
,
581 udf_get_lb_pblock(sb
,
587 aed
= (struct allocExtDesc
*)(epos
.bh
->b_data
);
588 aed
->previousAllocExtLocation
=
589 cpu_to_le32(oepos
.block
.logicalBlockNum
);
590 if (epos
.offset
+ adsize
> sb
->s_blocksize
) {
591 loffset
= epos
.offset
;
592 aed
->lengthAllocDescs
= cpu_to_le32(adsize
);
593 sptr
= UDF_I_DATA(inode
) + epos
.offset
-
594 udf_file_entry_alloc_offset(inode
) +
595 UDF_I_LENEATTR(inode
) - adsize
;
598 sizeof(struct allocExtDesc
);
599 memcpy(dptr
, sptr
, adsize
);
601 sizeof(struct allocExtDesc
) + adsize
;
603 loffset
= epos
.offset
+ adsize
;
604 aed
->lengthAllocDescs
= cpu_to_le32(0);
605 sptr
= oepos
.bh
->b_data
+ epos
.offset
;
606 epos
.offset
= sizeof(struct allocExtDesc
);
610 (struct allocExtDesc
*)oepos
.bh
->
612 aed
->lengthAllocDescs
=
613 cpu_to_le32(le32_to_cpu
618 UDF_I_LENALLOC(table
) += adsize
;
619 mark_inode_dirty(table
);
622 if (UDF_SB_UDFREV(sb
) >= 0x0200)
623 udf_new_tag(epos
.bh
->b_data
, TAG_IDENT_AED
, 3,
624 1, epos
.block
.logicalBlockNum
,
627 udf_new_tag(epos
.bh
->b_data
, TAG_IDENT_AED
, 2,
628 1, epos
.block
.logicalBlockNum
,
630 switch (UDF_I_ALLOCTYPE(table
)) {
631 case ICBTAG_FLAG_AD_SHORT
:
633 sad
= (short_ad
*) sptr
;
636 (EXT_NEXT_EXTENT_ALLOCDECS
| sb
->
639 cpu_to_le32(epos
.block
.
643 case ICBTAG_FLAG_AD_LONG
:
645 lad
= (long_ad
*) sptr
;
648 (EXT_NEXT_EXTENT_ALLOCDECS
| sb
->
651 cpu_to_lelb(epos
.block
);
656 udf_update_tag(oepos
.bh
->b_data
, loffset
);
657 mark_buffer_dirty(oepos
.bh
);
659 mark_inode_dirty(table
);
662 if (elen
) { /* It's possible that stealing the block emptied the extent */
663 udf_write_aext(table
, &epos
, eloc
, elen
, 1);
666 UDF_I_LENALLOC(table
) += adsize
;
667 mark_inode_dirty(table
);
669 aed
= (struct allocExtDesc
*)epos
.bh
->b_data
;
670 aed
->lengthAllocDescs
=
671 cpu_to_le32(le32_to_cpu
672 (aed
->lengthAllocDescs
) +
674 udf_update_tag(epos
.bh
->b_data
, epos
.offset
);
675 mark_buffer_dirty(epos
.bh
);
685 mutex_unlock(&sbi
->s_alloc_mutex
);
689 static int udf_table_prealloc_blocks(struct super_block
*sb
,
691 struct inode
*table
, uint16_t partition
,
692 uint32_t first_block
, uint32_t block_count
)
694 struct udf_sb_info
*sbi
= UDF_SB(sb
);
696 uint32_t elen
, adsize
;
698 struct extent_position epos
;
701 if (first_block
< 0 || first_block
>= UDF_SB_PARTLEN(sb
, partition
))
704 if (UDF_I_ALLOCTYPE(table
) == ICBTAG_FLAG_AD_SHORT
)
705 adsize
= sizeof(short_ad
);
706 else if (UDF_I_ALLOCTYPE(table
) == ICBTAG_FLAG_AD_LONG
)
707 adsize
= sizeof(long_ad
);
711 mutex_lock(&sbi
->s_alloc_mutex
);
712 epos
.offset
= sizeof(struct unallocSpaceEntry
);
713 epos
.block
= UDF_I_LOCATION(table
);
715 eloc
.logicalBlockNum
= 0xFFFFFFFF;
717 while (first_block
!= eloc
.logicalBlockNum
&& (etype
=
724 udf_debug("eloc=%d, elen=%d, first_block=%d\n",
725 eloc
.logicalBlockNum
, elen
, first_block
);
726 ; /* empty loop body */
729 if (first_block
== eloc
.logicalBlockNum
) {
730 epos
.offset
-= adsize
;
732 alloc_count
= (elen
>> sb
->s_blocksize_bits
);
734 && DQUOT_PREALLOC_BLOCK(inode
,
736 block_count
? block_count
:
739 else if (alloc_count
> block_count
) {
740 alloc_count
= block_count
;
741 eloc
.logicalBlockNum
+= alloc_count
;
742 elen
-= (alloc_count
<< sb
->s_blocksize_bits
);
743 udf_write_aext(table
, &epos
, eloc
, (etype
<< 30) | elen
,
746 udf_delete_aext(table
, epos
, eloc
,
747 (etype
<< 30) | elen
);
753 if (alloc_count
&& UDF_SB_LVIDBH(sb
)) {
754 UDF_SB_LVID(sb
)->freeSpaceTable
[partition
] =
755 cpu_to_le32(le32_to_cpu
756 (UDF_SB_LVID(sb
)->freeSpaceTable
[partition
]) -
758 mark_buffer_dirty(UDF_SB_LVIDBH(sb
));
761 mutex_unlock(&sbi
->s_alloc_mutex
);
765 static int udf_table_new_block(struct super_block
*sb
,
767 struct inode
*table
, uint16_t partition
,
768 uint32_t goal
, int *err
)
770 struct udf_sb_info
*sbi
= UDF_SB(sb
);
771 uint32_t spread
= 0xFFFFFFFF, nspread
= 0xFFFFFFFF;
772 uint32_t newblock
= 0, adsize
;
773 uint32_t elen
, goal_elen
= 0;
774 kernel_lb_addr eloc
, goal_eloc
;
775 struct extent_position epos
, goal_epos
;
780 if (UDF_I_ALLOCTYPE(table
) == ICBTAG_FLAG_AD_SHORT
)
781 adsize
= sizeof(short_ad
);
782 else if (UDF_I_ALLOCTYPE(table
) == ICBTAG_FLAG_AD_LONG
)
783 adsize
= sizeof(long_ad
);
787 mutex_lock(&sbi
->s_alloc_mutex
);
788 if (goal
< 0 || goal
>= UDF_SB_PARTLEN(sb
, partition
))
791 /* We search for the closest matching block to goal. If we find a exact hit,
792 we stop. Otherwise we keep going till we run out of extents.
793 We store the buffer_head, bloc, and extoffset of the current closest
794 match and use that when we are done.
796 epos
.offset
= sizeof(struct unallocSpaceEntry
);
797 epos
.block
= UDF_I_LOCATION(table
);
798 epos
.bh
= goal_epos
.bh
= NULL
;
800 while (spread
&& (etype
=
801 udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1) {
802 if (goal
>= eloc
.logicalBlockNum
) {
804 eloc
.logicalBlockNum
+
805 (elen
>> sb
->s_blocksize_bits
))
808 nspread
= goal
- eloc
.logicalBlockNum
-
809 (elen
>> sb
->s_blocksize_bits
);
811 nspread
= eloc
.logicalBlockNum
- goal
;
813 if (nspread
< spread
) {
815 if (goal_epos
.bh
!= epos
.bh
) {
816 brelse(goal_epos
.bh
);
817 goal_epos
.bh
= epos
.bh
;
818 get_bh(goal_epos
.bh
);
820 goal_epos
.block
= epos
.block
;
821 goal_epos
.offset
= epos
.offset
- adsize
;
823 goal_elen
= (etype
<< 30) | elen
;
829 if (spread
== 0xFFFFFFFF) {
830 brelse(goal_epos
.bh
);
831 mutex_unlock(&sbi
->s_alloc_mutex
);
835 /* Only allocate blocks from the beginning of the extent.
836 That way, we only delete (empty) extents, never have to insert an
837 extent because of splitting */
838 /* This works, but very poorly.... */
840 newblock
= goal_eloc
.logicalBlockNum
;
841 goal_eloc
.logicalBlockNum
++;
842 goal_elen
-= sb
->s_blocksize
;
844 if (inode
&& DQUOT_ALLOC_BLOCK(inode
, 1)) {
845 brelse(goal_epos
.bh
);
846 mutex_unlock(&sbi
->s_alloc_mutex
);
852 udf_write_aext(table
, &goal_epos
, goal_eloc
, goal_elen
, 1);
854 udf_delete_aext(table
, goal_epos
, goal_eloc
, goal_elen
);
855 brelse(goal_epos
.bh
);
857 if (UDF_SB_LVIDBH(sb
)) {
858 UDF_SB_LVID(sb
)->freeSpaceTable
[partition
] =
859 cpu_to_le32(le32_to_cpu
860 (UDF_SB_LVID(sb
)->freeSpaceTable
[partition
]) -
862 mark_buffer_dirty(UDF_SB_LVIDBH(sb
));
866 mutex_unlock(&sbi
->s_alloc_mutex
);
871 inline void udf_free_blocks(struct super_block
*sb
,
873 kernel_lb_addr bloc
, uint32_t offset
,
876 uint16_t partition
= bloc
.partitionReferenceNum
;
878 if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_UNALLOC_BITMAP
) {
879 return udf_bitmap_free_blocks(sb
, inode
,
880 UDF_SB_PARTMAPS(sb
)[partition
].
881 s_uspace
.s_bitmap
, bloc
, offset
,
883 } else if (UDF_SB_PARTFLAGS(sb
, partition
) &
884 UDF_PART_FLAG_UNALLOC_TABLE
) {
885 return udf_table_free_blocks(sb
, inode
,
886 UDF_SB_PARTMAPS(sb
)[partition
].
887 s_uspace
.s_table
, bloc
, offset
,
889 } else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_FREED_BITMAP
) {
890 return udf_bitmap_free_blocks(sb
, inode
,
891 UDF_SB_PARTMAPS(sb
)[partition
].
892 s_fspace
.s_bitmap
, bloc
, offset
,
894 } else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_FREED_TABLE
) {
895 return udf_table_free_blocks(sb
, inode
,
896 UDF_SB_PARTMAPS(sb
)[partition
].
897 s_fspace
.s_table
, bloc
, offset
,
903 inline int udf_prealloc_blocks(struct super_block
*sb
,
905 uint16_t partition
, uint32_t first_block
,
906 uint32_t block_count
)
908 if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_UNALLOC_BITMAP
) {
909 return udf_bitmap_prealloc_blocks(sb
, inode
,
911 [partition
].s_uspace
.s_bitmap
,
912 partition
, first_block
,
914 } else if (UDF_SB_PARTFLAGS(sb
, partition
) &
915 UDF_PART_FLAG_UNALLOC_TABLE
) {
916 return udf_table_prealloc_blocks(sb
, inode
,
917 UDF_SB_PARTMAPS(sb
)[partition
].
918 s_uspace
.s_table
, partition
,
919 first_block
, block_count
);
920 } else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_FREED_BITMAP
) {
921 return udf_bitmap_prealloc_blocks(sb
, inode
,
923 [partition
].s_fspace
.s_bitmap
,
924 partition
, first_block
,
926 } else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_FREED_TABLE
) {
927 return udf_table_prealloc_blocks(sb
, inode
,
928 UDF_SB_PARTMAPS(sb
)[partition
].
929 s_fspace
.s_table
, partition
,
930 first_block
, block_count
);
935 inline int udf_new_block(struct super_block
*sb
,
937 uint16_t partition
, uint32_t goal
, int *err
)
941 if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_UNALLOC_BITMAP
) {
942 ret
= udf_bitmap_new_block(sb
, inode
,
943 UDF_SB_PARTMAPS(sb
)[partition
].
944 s_uspace
.s_bitmap
, partition
, goal
,
947 } else if (UDF_SB_PARTFLAGS(sb
, partition
) &
948 UDF_PART_FLAG_UNALLOC_TABLE
) {
949 return udf_table_new_block(sb
, inode
,
950 UDF_SB_PARTMAPS(sb
)[partition
].
951 s_uspace
.s_table
, partition
, goal
,
953 } else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_FREED_BITMAP
) {
954 return udf_bitmap_new_block(sb
, inode
,
955 UDF_SB_PARTMAPS(sb
)[partition
].
956 s_fspace
.s_bitmap
, partition
, goal
,
958 } else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_FREED_TABLE
) {
959 return udf_table_new_block(sb
, inode
,
960 UDF_SB_PARTMAPS(sb
)[partition
].
961 s_fspace
.s_table
, partition
, goal
,