Merge branch 'slabh' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / udf / balloc.c
blob9a9378b4eb5ae2c89495b9025d7be8c5aa04ef5b
1 /*
2 * balloc.c
4 * PURPOSE
5 * Block allocation handling routines for the OSTA-UDF(tm) filesystem.
7 * COPYRIGHT
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1999-2001 Ben Fennema
14 * (C) 1999 Stelias Computing Inc
16 * HISTORY
18 * 02/24/99 blf Created.
22 #include "udfdecl.h"
24 #include <linux/quotaops.h>
25 #include <linux/buffer_head.h>
26 #include <linux/bitops.h>
28 #include "udf_i.h"
29 #include "udf_sb.h"
31 #define udf_clear_bit(nr, addr) ext2_clear_bit(nr, addr)
32 #define udf_set_bit(nr, addr) ext2_set_bit(nr, addr)
33 #define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
34 #define udf_find_next_one_bit(addr, size, offset) \
35 ext2_find_next_bit(addr, size, offset)
37 static int read_block_bitmap(struct super_block *sb,
38 struct udf_bitmap *bitmap, unsigned int block,
39 unsigned long bitmap_nr)
41 struct buffer_head *bh = NULL;
42 int retval = 0;
43 struct kernel_lb_addr loc;
45 loc.logicalBlockNum = bitmap->s_extPosition;
46 loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
48 bh = udf_tread(sb, udf_get_lb_pblock(sb, &loc, block));
49 if (!bh)
50 retval = -EIO;
52 bitmap->s_block_bitmap[bitmap_nr] = bh;
53 return retval;
56 static int __load_block_bitmap(struct super_block *sb,
57 struct udf_bitmap *bitmap,
58 unsigned int block_group)
60 int retval = 0;
61 int nr_groups = bitmap->s_nr_groups;
63 if (block_group >= nr_groups) {
64 udf_debug("block_group (%d) > nr_groups (%d)\n", block_group,
65 nr_groups);
68 if (bitmap->s_block_bitmap[block_group]) {
69 return block_group;
70 } else {
71 retval = read_block_bitmap(sb, bitmap, block_group,
72 block_group);
73 if (retval < 0)
74 return retval;
75 return block_group;
79 static inline int load_block_bitmap(struct super_block *sb,
80 struct udf_bitmap *bitmap,
81 unsigned int block_group)
83 int slot;
85 slot = __load_block_bitmap(sb, bitmap, block_group);
87 if (slot < 0)
88 return slot;
90 if (!bitmap->s_block_bitmap[slot])
91 return -EIO;
93 return slot;
96 static void udf_add_free_space(struct super_block *sb, u16 partition, u32 cnt)
98 struct udf_sb_info *sbi = UDF_SB(sb);
99 struct logicalVolIntegrityDesc *lvid;
101 if (!sbi->s_lvid_bh)
102 return;
104 lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
105 le32_add_cpu(&lvid->freeSpaceTable[partition], cnt);
106 udf_updated_lvid(sb);
109 static void udf_bitmap_free_blocks(struct super_block *sb,
110 struct inode *inode,
111 struct udf_bitmap *bitmap,
112 struct kernel_lb_addr *bloc,
113 uint32_t offset,
114 uint32_t count)
116 struct udf_sb_info *sbi = UDF_SB(sb);
117 struct buffer_head *bh = NULL;
118 struct udf_part_map *partmap;
119 unsigned long block;
120 unsigned long block_group;
121 unsigned long bit;
122 unsigned long i;
123 int bitmap_nr;
124 unsigned long overflow;
126 mutex_lock(&sbi->s_alloc_mutex);
127 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
128 if (bloc->logicalBlockNum + count < count ||
129 (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
130 udf_debug("%d < %d || %d + %d > %d\n",
131 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
132 count, partmap->s_partition_len);
133 goto error_return;
136 block = bloc->logicalBlockNum + offset +
137 (sizeof(struct spaceBitmapDesc) << 3);
139 do {
140 overflow = 0;
141 block_group = block >> (sb->s_blocksize_bits + 3);
142 bit = block % (sb->s_blocksize << 3);
145 * Check to see if we are freeing blocks across a group boundary.
147 if (bit + count > (sb->s_blocksize << 3)) {
148 overflow = bit + count - (sb->s_blocksize << 3);
149 count -= overflow;
151 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
152 if (bitmap_nr < 0)
153 goto error_return;
155 bh = bitmap->s_block_bitmap[bitmap_nr];
156 for (i = 0; i < count; i++) {
157 if (udf_set_bit(bit + i, bh->b_data)) {
158 udf_debug("bit %ld already set\n", bit + i);
159 udf_debug("byte=%2x\n",
160 ((char *)bh->b_data)[(bit + i) >> 3]);
161 } else {
162 if (inode)
163 dquot_free_block(inode, 1);
164 udf_add_free_space(sb, sbi->s_partition, 1);
167 mark_buffer_dirty(bh);
168 if (overflow) {
169 block += count;
170 count = overflow;
172 } while (overflow);
174 error_return:
175 mutex_unlock(&sbi->s_alloc_mutex);
178 static int udf_bitmap_prealloc_blocks(struct super_block *sb,
179 struct inode *inode,
180 struct udf_bitmap *bitmap,
181 uint16_t partition, uint32_t first_block,
182 uint32_t block_count)
184 struct udf_sb_info *sbi = UDF_SB(sb);
185 int alloc_count = 0;
186 int bit, block, block_group, group_start;
187 int nr_groups, bitmap_nr;
188 struct buffer_head *bh;
189 __u32 part_len;
191 mutex_lock(&sbi->s_alloc_mutex);
192 part_len = sbi->s_partmaps[partition].s_partition_len;
193 if (first_block >= part_len)
194 goto out;
196 if (first_block + block_count > part_len)
197 block_count = part_len - first_block;
199 do {
200 nr_groups = udf_compute_nr_groups(sb, partition);
201 block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
202 block_group = block >> (sb->s_blocksize_bits + 3);
203 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
205 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
206 if (bitmap_nr < 0)
207 goto out;
208 bh = bitmap->s_block_bitmap[bitmap_nr];
210 bit = block % (sb->s_blocksize << 3);
212 while (bit < (sb->s_blocksize << 3) && block_count > 0) {
213 if (!udf_test_bit(bit, bh->b_data))
214 goto out;
215 else if (dquot_prealloc_block(inode, 1))
216 goto out;
217 else if (!udf_clear_bit(bit, bh->b_data)) {
218 udf_debug("bit already cleared for block %d\n", bit);
219 dquot_free_block(inode, 1);
220 goto out;
222 block_count--;
223 alloc_count++;
224 bit++;
225 block++;
227 mark_buffer_dirty(bh);
228 } while (block_count > 0);
230 out:
231 udf_add_free_space(sb, partition, -alloc_count);
232 mutex_unlock(&sbi->s_alloc_mutex);
233 return alloc_count;
236 static int udf_bitmap_new_block(struct super_block *sb,
237 struct inode *inode,
238 struct udf_bitmap *bitmap, uint16_t partition,
239 uint32_t goal, int *err)
241 struct udf_sb_info *sbi = UDF_SB(sb);
242 int newbit, bit = 0, block, block_group, group_start;
243 int end_goal, nr_groups, bitmap_nr, i;
244 struct buffer_head *bh = NULL;
245 char *ptr;
246 int newblock = 0;
248 *err = -ENOSPC;
249 mutex_lock(&sbi->s_alloc_mutex);
251 repeat:
252 if (goal >= sbi->s_partmaps[partition].s_partition_len)
253 goal = 0;
255 nr_groups = bitmap->s_nr_groups;
256 block = goal + (sizeof(struct spaceBitmapDesc) << 3);
257 block_group = block >> (sb->s_blocksize_bits + 3);
258 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
260 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
261 if (bitmap_nr < 0)
262 goto error_return;
263 bh = bitmap->s_block_bitmap[bitmap_nr];
264 ptr = memscan((char *)bh->b_data + group_start, 0xFF,
265 sb->s_blocksize - group_start);
267 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
268 bit = block % (sb->s_blocksize << 3);
269 if (udf_test_bit(bit, bh->b_data))
270 goto got_block;
272 end_goal = (bit + 63) & ~63;
273 bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
274 if (bit < end_goal)
275 goto got_block;
277 ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF,
278 sb->s_blocksize - ((bit + 7) >> 3));
279 newbit = (ptr - ((char *)bh->b_data)) << 3;
280 if (newbit < sb->s_blocksize << 3) {
281 bit = newbit;
282 goto search_back;
285 newbit = udf_find_next_one_bit(bh->b_data,
286 sb->s_blocksize << 3, bit);
287 if (newbit < sb->s_blocksize << 3) {
288 bit = newbit;
289 goto got_block;
293 for (i = 0; i < (nr_groups * 2); i++) {
294 block_group++;
295 if (block_group >= nr_groups)
296 block_group = 0;
297 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
299 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
300 if (bitmap_nr < 0)
301 goto error_return;
302 bh = bitmap->s_block_bitmap[bitmap_nr];
303 if (i < nr_groups) {
304 ptr = memscan((char *)bh->b_data + group_start, 0xFF,
305 sb->s_blocksize - group_start);
306 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
307 bit = (ptr - ((char *)bh->b_data)) << 3;
308 break;
310 } else {
311 bit = udf_find_next_one_bit((char *)bh->b_data,
312 sb->s_blocksize << 3,
313 group_start << 3);
314 if (bit < sb->s_blocksize << 3)
315 break;
318 if (i >= (nr_groups * 2)) {
319 mutex_unlock(&sbi->s_alloc_mutex);
320 return newblock;
322 if (bit < sb->s_blocksize << 3)
323 goto search_back;
324 else
325 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
326 group_start << 3);
327 if (bit >= sb->s_blocksize << 3) {
328 mutex_unlock(&sbi->s_alloc_mutex);
329 return 0;
332 search_back:
333 i = 0;
334 while (i < 7 && bit > (group_start << 3) &&
335 udf_test_bit(bit - 1, bh->b_data)) {
336 ++i;
337 --bit;
340 got_block:
343 * Check quota for allocation of this block.
345 if (inode) {
346 int ret = dquot_alloc_block(inode, 1);
348 if (ret) {
349 mutex_unlock(&sbi->s_alloc_mutex);
350 *err = ret;
351 return 0;
355 newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
356 (sizeof(struct spaceBitmapDesc) << 3);
358 if (!udf_clear_bit(bit, bh->b_data)) {
359 udf_debug("bit already cleared for block %d\n", bit);
360 goto repeat;
363 mark_buffer_dirty(bh);
365 udf_add_free_space(sb, partition, -1);
366 mutex_unlock(&sbi->s_alloc_mutex);
367 *err = 0;
368 return newblock;
370 error_return:
371 *err = -EIO;
372 mutex_unlock(&sbi->s_alloc_mutex);
373 return 0;
376 static void udf_table_free_blocks(struct super_block *sb,
377 struct inode *inode,
378 struct inode *table,
379 struct kernel_lb_addr *bloc,
380 uint32_t offset,
381 uint32_t count)
383 struct udf_sb_info *sbi = UDF_SB(sb);
384 struct udf_part_map *partmap;
385 uint32_t start, end;
386 uint32_t elen;
387 struct kernel_lb_addr eloc;
388 struct extent_position oepos, epos;
389 int8_t etype;
390 int i;
391 struct udf_inode_info *iinfo;
393 mutex_lock(&sbi->s_alloc_mutex);
394 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
395 if (bloc->logicalBlockNum + count < count ||
396 (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
397 udf_debug("%d < %d || %d + %d > %d\n",
398 bloc->logicalBlockNum, 0, bloc->logicalBlockNum, count,
399 partmap->s_partition_len);
400 goto error_return;
403 iinfo = UDF_I(table);
404 /* We do this up front - There are some error conditions that
405 could occure, but.. oh well */
406 if (inode)
407 dquot_free_block(inode, count);
408 udf_add_free_space(sb, sbi->s_partition, count);
410 start = bloc->logicalBlockNum + offset;
411 end = bloc->logicalBlockNum + offset + count - 1;
413 epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
414 elen = 0;
415 epos.block = oepos.block = iinfo->i_location;
416 epos.bh = oepos.bh = NULL;
418 while (count &&
419 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
420 if (((eloc.logicalBlockNum +
421 (elen >> sb->s_blocksize_bits)) == start)) {
422 if ((0x3FFFFFFF - elen) <
423 (count << sb->s_blocksize_bits)) {
424 uint32_t tmp = ((0x3FFFFFFF - elen) >>
425 sb->s_blocksize_bits);
426 count -= tmp;
427 start += tmp;
428 elen = (etype << 30) |
429 (0x40000000 - sb->s_blocksize);
430 } else {
431 elen = (etype << 30) |
432 (elen +
433 (count << sb->s_blocksize_bits));
434 start += count;
435 count = 0;
437 udf_write_aext(table, &oepos, &eloc, elen, 1);
438 } else if (eloc.logicalBlockNum == (end + 1)) {
439 if ((0x3FFFFFFF - elen) <
440 (count << sb->s_blocksize_bits)) {
441 uint32_t tmp = ((0x3FFFFFFF - elen) >>
442 sb->s_blocksize_bits);
443 count -= tmp;
444 end -= tmp;
445 eloc.logicalBlockNum -= tmp;
446 elen = (etype << 30) |
447 (0x40000000 - sb->s_blocksize);
448 } else {
449 eloc.logicalBlockNum = start;
450 elen = (etype << 30) |
451 (elen +
452 (count << sb->s_blocksize_bits));
453 end -= count;
454 count = 0;
456 udf_write_aext(table, &oepos, &eloc, elen, 1);
459 if (epos.bh != oepos.bh) {
460 i = -1;
461 oepos.block = epos.block;
462 brelse(oepos.bh);
463 get_bh(epos.bh);
464 oepos.bh = epos.bh;
465 oepos.offset = 0;
466 } else {
467 oepos.offset = epos.offset;
471 if (count) {
473 * NOTE: we CANNOT use udf_add_aext here, as it can try to
474 * allocate a new block, and since we hold the super block
475 * lock already very bad things would happen :)
477 * We copy the behavior of udf_add_aext, but instead of
478 * trying to allocate a new block close to the existing one,
479 * we just steal a block from the extent we are trying to add.
481 * It would be nice if the blocks were close together, but it
482 * isn't required.
485 int adsize;
486 struct short_ad *sad = NULL;
487 struct long_ad *lad = NULL;
488 struct allocExtDesc *aed;
490 eloc.logicalBlockNum = start;
491 elen = EXT_RECORDED_ALLOCATED |
492 (count << sb->s_blocksize_bits);
494 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
495 adsize = sizeof(struct short_ad);
496 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
497 adsize = sizeof(struct long_ad);
498 else {
499 brelse(oepos.bh);
500 brelse(epos.bh);
501 goto error_return;
504 if (epos.offset + (2 * adsize) > sb->s_blocksize) {
505 unsigned char *sptr, *dptr;
506 int loffset;
508 brelse(oepos.bh);
509 oepos = epos;
511 /* Steal a block from the extent being free'd */
512 epos.block.logicalBlockNum = eloc.logicalBlockNum;
513 eloc.logicalBlockNum++;
514 elen -= sb->s_blocksize;
516 epos.bh = udf_tread(sb,
517 udf_get_lb_pblock(sb, &epos.block, 0));
518 if (!epos.bh) {
519 brelse(oepos.bh);
520 goto error_return;
522 aed = (struct allocExtDesc *)(epos.bh->b_data);
523 aed->previousAllocExtLocation =
524 cpu_to_le32(oepos.block.logicalBlockNum);
525 if (epos.offset + adsize > sb->s_blocksize) {
526 loffset = epos.offset;
527 aed->lengthAllocDescs = cpu_to_le32(adsize);
528 sptr = iinfo->i_ext.i_data + epos.offset
529 - adsize;
530 dptr = epos.bh->b_data +
531 sizeof(struct allocExtDesc);
532 memcpy(dptr, sptr, adsize);
533 epos.offset = sizeof(struct allocExtDesc) +
534 adsize;
535 } else {
536 loffset = epos.offset + adsize;
537 aed->lengthAllocDescs = cpu_to_le32(0);
538 if (oepos.bh) {
539 sptr = oepos.bh->b_data + epos.offset;
540 aed = (struct allocExtDesc *)
541 oepos.bh->b_data;
542 le32_add_cpu(&aed->lengthAllocDescs,
543 adsize);
544 } else {
545 sptr = iinfo->i_ext.i_data +
546 epos.offset;
547 iinfo->i_lenAlloc += adsize;
548 mark_inode_dirty(table);
550 epos.offset = sizeof(struct allocExtDesc);
552 if (sbi->s_udfrev >= 0x0200)
553 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED,
554 3, 1, epos.block.logicalBlockNum,
555 sizeof(struct tag));
556 else
557 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED,
558 2, 1, epos.block.logicalBlockNum,
559 sizeof(struct tag));
561 switch (iinfo->i_alloc_type) {
562 case ICBTAG_FLAG_AD_SHORT:
563 sad = (struct short_ad *)sptr;
564 sad->extLength = cpu_to_le32(
565 EXT_NEXT_EXTENT_ALLOCDECS |
566 sb->s_blocksize);
567 sad->extPosition =
568 cpu_to_le32(epos.block.logicalBlockNum);
569 break;
570 case ICBTAG_FLAG_AD_LONG:
571 lad = (struct long_ad *)sptr;
572 lad->extLength = cpu_to_le32(
573 EXT_NEXT_EXTENT_ALLOCDECS |
574 sb->s_blocksize);
575 lad->extLocation =
576 cpu_to_lelb(epos.block);
577 break;
579 if (oepos.bh) {
580 udf_update_tag(oepos.bh->b_data, loffset);
581 mark_buffer_dirty(oepos.bh);
582 } else {
583 mark_inode_dirty(table);
587 /* It's possible that stealing the block emptied the extent */
588 if (elen) {
589 udf_write_aext(table, &epos, &eloc, elen, 1);
591 if (!epos.bh) {
592 iinfo->i_lenAlloc += adsize;
593 mark_inode_dirty(table);
594 } else {
595 aed = (struct allocExtDesc *)epos.bh->b_data;
596 le32_add_cpu(&aed->lengthAllocDescs, adsize);
597 udf_update_tag(epos.bh->b_data, epos.offset);
598 mark_buffer_dirty(epos.bh);
603 brelse(epos.bh);
604 brelse(oepos.bh);
606 error_return:
607 mutex_unlock(&sbi->s_alloc_mutex);
608 return;
611 static int udf_table_prealloc_blocks(struct super_block *sb,
612 struct inode *inode,
613 struct inode *table, uint16_t partition,
614 uint32_t first_block, uint32_t block_count)
616 struct udf_sb_info *sbi = UDF_SB(sb);
617 int alloc_count = 0;
618 uint32_t elen, adsize;
619 struct kernel_lb_addr eloc;
620 struct extent_position epos;
621 int8_t etype = -1;
622 struct udf_inode_info *iinfo;
624 if (first_block >= sbi->s_partmaps[partition].s_partition_len)
625 return 0;
627 iinfo = UDF_I(table);
628 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
629 adsize = sizeof(struct short_ad);
630 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
631 adsize = sizeof(struct long_ad);
632 else
633 return 0;
635 mutex_lock(&sbi->s_alloc_mutex);
636 epos.offset = sizeof(struct unallocSpaceEntry);
637 epos.block = iinfo->i_location;
638 epos.bh = NULL;
639 eloc.logicalBlockNum = 0xFFFFFFFF;
641 while (first_block != eloc.logicalBlockNum &&
642 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
643 udf_debug("eloc=%d, elen=%d, first_block=%d\n",
644 eloc.logicalBlockNum, elen, first_block);
645 ; /* empty loop body */
648 if (first_block == eloc.logicalBlockNum) {
649 epos.offset -= adsize;
651 alloc_count = (elen >> sb->s_blocksize_bits);
652 if (inode && dquot_prealloc_block(inode,
653 alloc_count > block_count ? block_count : alloc_count))
654 alloc_count = 0;
655 else if (alloc_count > block_count) {
656 alloc_count = block_count;
657 eloc.logicalBlockNum += alloc_count;
658 elen -= (alloc_count << sb->s_blocksize_bits);
659 udf_write_aext(table, &epos, &eloc,
660 (etype << 30) | elen, 1);
661 } else
662 udf_delete_aext(table, epos, eloc,
663 (etype << 30) | elen);
664 } else {
665 alloc_count = 0;
668 brelse(epos.bh);
670 if (alloc_count)
671 udf_add_free_space(sb, partition, -alloc_count);
672 mutex_unlock(&sbi->s_alloc_mutex);
673 return alloc_count;
676 static int udf_table_new_block(struct super_block *sb,
677 struct inode *inode,
678 struct inode *table, uint16_t partition,
679 uint32_t goal, int *err)
681 struct udf_sb_info *sbi = UDF_SB(sb);
682 uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
683 uint32_t newblock = 0, adsize;
684 uint32_t elen, goal_elen = 0;
685 struct kernel_lb_addr eloc, uninitialized_var(goal_eloc);
686 struct extent_position epos, goal_epos;
687 int8_t etype;
688 struct udf_inode_info *iinfo = UDF_I(table);
690 *err = -ENOSPC;
692 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
693 adsize = sizeof(struct short_ad);
694 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
695 adsize = sizeof(struct long_ad);
696 else
697 return newblock;
699 mutex_lock(&sbi->s_alloc_mutex);
700 if (goal >= sbi->s_partmaps[partition].s_partition_len)
701 goal = 0;
703 /* We search for the closest matching block to goal. If we find
704 a exact hit, we stop. Otherwise we keep going till we run out
705 of extents. We store the buffer_head, bloc, and extoffset
706 of the current closest match and use that when we are done.
708 epos.offset = sizeof(struct unallocSpaceEntry);
709 epos.block = iinfo->i_location;
710 epos.bh = goal_epos.bh = NULL;
712 while (spread &&
713 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
714 if (goal >= eloc.logicalBlockNum) {
715 if (goal < eloc.logicalBlockNum +
716 (elen >> sb->s_blocksize_bits))
717 nspread = 0;
718 else
719 nspread = goal - eloc.logicalBlockNum -
720 (elen >> sb->s_blocksize_bits);
721 } else {
722 nspread = eloc.logicalBlockNum - goal;
725 if (nspread < spread) {
726 spread = nspread;
727 if (goal_epos.bh != epos.bh) {
728 brelse(goal_epos.bh);
729 goal_epos.bh = epos.bh;
730 get_bh(goal_epos.bh);
732 goal_epos.block = epos.block;
733 goal_epos.offset = epos.offset - adsize;
734 goal_eloc = eloc;
735 goal_elen = (etype << 30) | elen;
739 brelse(epos.bh);
741 if (spread == 0xFFFFFFFF) {
742 brelse(goal_epos.bh);
743 mutex_unlock(&sbi->s_alloc_mutex);
744 return 0;
747 /* Only allocate blocks from the beginning of the extent.
748 That way, we only delete (empty) extents, never have to insert an
749 extent because of splitting */
750 /* This works, but very poorly.... */
752 newblock = goal_eloc.logicalBlockNum;
753 goal_eloc.logicalBlockNum++;
754 goal_elen -= sb->s_blocksize;
755 if (inode) {
756 *err = dquot_alloc_block(inode, 1);
757 if (*err) {
758 brelse(goal_epos.bh);
759 mutex_unlock(&sbi->s_alloc_mutex);
760 return 0;
764 if (goal_elen)
765 udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
766 else
767 udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
768 brelse(goal_epos.bh);
770 udf_add_free_space(sb, partition, -1);
772 mutex_unlock(&sbi->s_alloc_mutex);
773 *err = 0;
774 return newblock;
777 void udf_free_blocks(struct super_block *sb, struct inode *inode,
778 struct kernel_lb_addr *bloc, uint32_t offset,
779 uint32_t count)
781 uint16_t partition = bloc->partitionReferenceNum;
782 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
784 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
785 udf_bitmap_free_blocks(sb, inode, map->s_uspace.s_bitmap,
786 bloc, offset, count);
787 } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
788 udf_table_free_blocks(sb, inode, map->s_uspace.s_table,
789 bloc, offset, count);
790 } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
791 udf_bitmap_free_blocks(sb, inode, map->s_fspace.s_bitmap,
792 bloc, offset, count);
793 } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
794 udf_table_free_blocks(sb, inode, map->s_fspace.s_table,
795 bloc, offset, count);
799 inline int udf_prealloc_blocks(struct super_block *sb,
800 struct inode *inode,
801 uint16_t partition, uint32_t first_block,
802 uint32_t block_count)
804 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
806 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
807 return udf_bitmap_prealloc_blocks(sb, inode,
808 map->s_uspace.s_bitmap,
809 partition, first_block,
810 block_count);
811 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
812 return udf_table_prealloc_blocks(sb, inode,
813 map->s_uspace.s_table,
814 partition, first_block,
815 block_count);
816 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
817 return udf_bitmap_prealloc_blocks(sb, inode,
818 map->s_fspace.s_bitmap,
819 partition, first_block,
820 block_count);
821 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
822 return udf_table_prealloc_blocks(sb, inode,
823 map->s_fspace.s_table,
824 partition, first_block,
825 block_count);
826 else
827 return 0;
830 inline int udf_new_block(struct super_block *sb,
831 struct inode *inode,
832 uint16_t partition, uint32_t goal, int *err)
834 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
836 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
837 return udf_bitmap_new_block(sb, inode,
838 map->s_uspace.s_bitmap,
839 partition, goal, err);
840 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
841 return udf_table_new_block(sb, inode,
842 map->s_uspace.s_table,
843 partition, goal, err);
844 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
845 return udf_bitmap_new_block(sb, inode,
846 map->s_fspace.s_bitmap,
847 partition, goal, err);
848 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
849 return udf_table_new_block(sb, inode,
850 map->s_fspace.s_table,
851 partition, goal, err);
852 else {
853 *err = -EIO;
854 return 0;