ACPICA: Update version to 20080701
[linux-2.6/mini2440.git] / fs / udf / inode.c
blob6e74b117aaf0e58d078e59d88817562de8379a1d
1 /*
2 * inode.c
4 * PURPOSE
5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
7 * COPYRIGHT
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1998 Dave Boynton
14 * (C) 1998-2004 Ben Fennema
15 * (C) 1999-2000 Stelias Computing Inc
17 * HISTORY
19 * 10/04/98 dgb Added rudimentary directory functions
20 * 10/07/98 Fully working udf_block_map! It works!
21 * 11/25/98 bmap altered to better support extents
22 * 12/06/98 blf partition support in udf_iget, udf_block_map
23 * and udf_read_inode
24 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
25 * block boundaries (which is not actually allowed)
26 * 12/20/98 added support for strategy 4096
27 * 03/07/99 rewrote udf_block_map (again)
28 * New funcs, inode_bmap, udf_next_aext
29 * 04/19/99 Support for writing device EA's for major/minor #
32 #include "udfdecl.h"
33 #include <linux/mm.h>
34 #include <linux/smp_lock.h>
35 #include <linux/module.h>
36 #include <linux/pagemap.h>
37 #include <linux/buffer_head.h>
38 #include <linux/writeback.h>
39 #include <linux/slab.h>
40 #include <linux/crc-itu-t.h>
42 #include "udf_i.h"
43 #include "udf_sb.h"
45 MODULE_AUTHOR("Ben Fennema");
46 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
47 MODULE_LICENSE("GPL");
49 #define EXTENT_MERGE_SIZE 5
51 static mode_t udf_convert_permissions(struct fileEntry *);
52 static int udf_update_inode(struct inode *, int);
53 static void udf_fill_inode(struct inode *, struct buffer_head *);
54 static int udf_alloc_i_data(struct inode *inode, size_t size);
55 static struct buffer_head *inode_getblk(struct inode *, sector_t, int *,
56 sector_t *, int *);
57 static int8_t udf_insert_aext(struct inode *, struct extent_position,
58 kernel_lb_addr, uint32_t);
59 static void udf_split_extents(struct inode *, int *, int, int,
60 kernel_long_ad[EXTENT_MERGE_SIZE], int *);
61 static void udf_prealloc_extents(struct inode *, int, int,
62 kernel_long_ad[EXTENT_MERGE_SIZE], int *);
63 static void udf_merge_extents(struct inode *,
64 kernel_long_ad[EXTENT_MERGE_SIZE], int *);
65 static void udf_update_extents(struct inode *,
66 kernel_long_ad[EXTENT_MERGE_SIZE], int, int,
67 struct extent_position *);
68 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
71 void udf_delete_inode(struct inode *inode)
73 truncate_inode_pages(&inode->i_data, 0);
75 if (is_bad_inode(inode))
76 goto no_delete;
78 inode->i_size = 0;
79 udf_truncate(inode);
80 lock_kernel();
82 udf_update_inode(inode, IS_SYNC(inode));
83 udf_free_inode(inode);
85 unlock_kernel();
86 return;
88 no_delete:
89 clear_inode(inode);
93 * If we are going to release inode from memory, we discard preallocation and
94 * truncate last inode extent to proper length. We could use drop_inode() but
95 * it's called under inode_lock and thus we cannot mark inode dirty there. We
96 * use clear_inode() but we have to make sure to write inode as it's not written
97 * automatically.
99 void udf_clear_inode(struct inode *inode)
101 struct udf_inode_info *iinfo;
102 if (!(inode->i_sb->s_flags & MS_RDONLY)) {
103 lock_kernel();
104 /* Discard preallocation for directories, symlinks, etc. */
105 udf_discard_prealloc(inode);
106 udf_truncate_tail_extent(inode);
107 unlock_kernel();
108 write_inode_now(inode, 0);
110 iinfo = UDF_I(inode);
111 kfree(iinfo->i_ext.i_data);
112 iinfo->i_ext.i_data = NULL;
115 static int udf_writepage(struct page *page, struct writeback_control *wbc)
117 return block_write_full_page(page, udf_get_block, wbc);
120 static int udf_readpage(struct file *file, struct page *page)
122 return block_read_full_page(page, udf_get_block);
125 static int udf_write_begin(struct file *file, struct address_space *mapping,
126 loff_t pos, unsigned len, unsigned flags,
127 struct page **pagep, void **fsdata)
129 *pagep = NULL;
130 return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
131 udf_get_block);
134 static sector_t udf_bmap(struct address_space *mapping, sector_t block)
136 return generic_block_bmap(mapping, block, udf_get_block);
139 const struct address_space_operations udf_aops = {
140 .readpage = udf_readpage,
141 .writepage = udf_writepage,
142 .sync_page = block_sync_page,
143 .write_begin = udf_write_begin,
144 .write_end = generic_write_end,
145 .bmap = udf_bmap,
148 void udf_expand_file_adinicb(struct inode *inode, int newsize, int *err)
150 struct page *page;
151 char *kaddr;
152 struct udf_inode_info *iinfo = UDF_I(inode);
153 struct writeback_control udf_wbc = {
154 .sync_mode = WB_SYNC_NONE,
155 .nr_to_write = 1,
158 /* from now on we have normal address_space methods */
159 inode->i_data.a_ops = &udf_aops;
161 if (!iinfo->i_lenAlloc) {
162 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
163 iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
164 else
165 iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
166 mark_inode_dirty(inode);
167 return;
170 page = grab_cache_page(inode->i_mapping, 0);
171 BUG_ON(!PageLocked(page));
173 if (!PageUptodate(page)) {
174 kaddr = kmap(page);
175 memset(kaddr + iinfo->i_lenAlloc, 0x00,
176 PAGE_CACHE_SIZE - iinfo->i_lenAlloc);
177 memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr,
178 iinfo->i_lenAlloc);
179 flush_dcache_page(page);
180 SetPageUptodate(page);
181 kunmap(page);
183 memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0x00,
184 iinfo->i_lenAlloc);
185 iinfo->i_lenAlloc = 0;
186 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
187 iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
188 else
189 iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
191 inode->i_data.a_ops->writepage(page, &udf_wbc);
192 page_cache_release(page);
194 mark_inode_dirty(inode);
197 struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block,
198 int *err)
200 int newblock;
201 struct buffer_head *dbh = NULL;
202 kernel_lb_addr eloc;
203 uint32_t elen;
204 uint8_t alloctype;
205 struct extent_position epos;
207 struct udf_fileident_bh sfibh, dfibh;
208 loff_t f_pos = udf_ext0_offset(inode);
209 int size = udf_ext0_offset(inode) + inode->i_size;
210 struct fileIdentDesc cfi, *sfi, *dfi;
211 struct udf_inode_info *iinfo = UDF_I(inode);
213 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
214 alloctype = ICBTAG_FLAG_AD_SHORT;
215 else
216 alloctype = ICBTAG_FLAG_AD_LONG;
218 if (!inode->i_size) {
219 iinfo->i_alloc_type = alloctype;
220 mark_inode_dirty(inode);
221 return NULL;
224 /* alloc block, and copy data to it */
225 *block = udf_new_block(inode->i_sb, inode,
226 iinfo->i_location.partitionReferenceNum,
227 iinfo->i_location.logicalBlockNum, err);
228 if (!(*block))
229 return NULL;
230 newblock = udf_get_pblock(inode->i_sb, *block,
231 iinfo->i_location.partitionReferenceNum,
233 if (!newblock)
234 return NULL;
235 dbh = udf_tgetblk(inode->i_sb, newblock);
236 if (!dbh)
237 return NULL;
238 lock_buffer(dbh);
239 memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
240 set_buffer_uptodate(dbh);
241 unlock_buffer(dbh);
242 mark_buffer_dirty_inode(dbh, inode);
244 sfibh.soffset = sfibh.eoffset =
245 f_pos & (inode->i_sb->s_blocksize - 1);
246 sfibh.sbh = sfibh.ebh = NULL;
247 dfibh.soffset = dfibh.eoffset = 0;
248 dfibh.sbh = dfibh.ebh = dbh;
249 while (f_pos < size) {
250 iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
251 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL,
252 NULL, NULL, NULL);
253 if (!sfi) {
254 brelse(dbh);
255 return NULL;
257 iinfo->i_alloc_type = alloctype;
258 sfi->descTag.tagLocation = cpu_to_le32(*block);
259 dfibh.soffset = dfibh.eoffset;
260 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
261 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
262 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
263 sfi->fileIdent +
264 le16_to_cpu(sfi->lengthOfImpUse))) {
265 iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
266 brelse(dbh);
267 return NULL;
270 mark_buffer_dirty_inode(dbh, inode);
272 memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0,
273 iinfo->i_lenAlloc);
274 iinfo->i_lenAlloc = 0;
275 eloc.logicalBlockNum = *block;
276 eloc.partitionReferenceNum =
277 iinfo->i_location.partitionReferenceNum;
278 elen = inode->i_sb->s_blocksize;
279 iinfo->i_lenExtents = elen;
280 epos.bh = NULL;
281 epos.block = iinfo->i_location;
282 epos.offset = udf_file_entry_alloc_offset(inode);
283 udf_add_aext(inode, &epos, eloc, elen, 0);
284 /* UniqueID stuff */
286 brelse(epos.bh);
287 mark_inode_dirty(inode);
288 return dbh;
291 static int udf_get_block(struct inode *inode, sector_t block,
292 struct buffer_head *bh_result, int create)
294 int err, new;
295 struct buffer_head *bh;
296 sector_t phys = 0;
297 struct udf_inode_info *iinfo;
299 if (!create) {
300 phys = udf_block_map(inode, block);
301 if (phys)
302 map_bh(bh_result, inode->i_sb, phys);
303 return 0;
306 err = -EIO;
307 new = 0;
308 bh = NULL;
310 lock_kernel();
312 iinfo = UDF_I(inode);
313 if (block == iinfo->i_next_alloc_block + 1) {
314 iinfo->i_next_alloc_block++;
315 iinfo->i_next_alloc_goal++;
318 err = 0;
320 bh = inode_getblk(inode, block, &err, &phys, &new);
321 BUG_ON(bh);
322 if (err)
323 goto abort;
324 BUG_ON(!phys);
326 if (new)
327 set_buffer_new(bh_result);
328 map_bh(bh_result, inode->i_sb, phys);
330 abort:
331 unlock_kernel();
332 return err;
335 static struct buffer_head *udf_getblk(struct inode *inode, long block,
336 int create, int *err)
338 struct buffer_head *bh;
339 struct buffer_head dummy;
341 dummy.b_state = 0;
342 dummy.b_blocknr = -1000;
343 *err = udf_get_block(inode, block, &dummy, create);
344 if (!*err && buffer_mapped(&dummy)) {
345 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
346 if (buffer_new(&dummy)) {
347 lock_buffer(bh);
348 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
349 set_buffer_uptodate(bh);
350 unlock_buffer(bh);
351 mark_buffer_dirty_inode(bh, inode);
353 return bh;
356 return NULL;
359 /* Extend the file by 'blocks' blocks, return the number of extents added */
360 int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
361 kernel_long_ad *last_ext, sector_t blocks)
363 sector_t add;
364 int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
365 struct super_block *sb = inode->i_sb;
366 kernel_lb_addr prealloc_loc = {};
367 int prealloc_len = 0;
368 struct udf_inode_info *iinfo;
370 /* The previous extent is fake and we should not extend by anything
371 * - there's nothing to do... */
372 if (!blocks && fake)
373 return 0;
375 iinfo = UDF_I(inode);
376 /* Round the last extent up to a multiple of block size */
377 if (last_ext->extLength & (sb->s_blocksize - 1)) {
378 last_ext->extLength =
379 (last_ext->extLength & UDF_EXTENT_FLAG_MASK) |
380 (((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) +
381 sb->s_blocksize - 1) & ~(sb->s_blocksize - 1));
382 iinfo->i_lenExtents =
383 (iinfo->i_lenExtents + sb->s_blocksize - 1) &
384 ~(sb->s_blocksize - 1);
387 /* Last extent are just preallocated blocks? */
388 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
389 EXT_NOT_RECORDED_ALLOCATED) {
390 /* Save the extent so that we can reattach it to the end */
391 prealloc_loc = last_ext->extLocation;
392 prealloc_len = last_ext->extLength;
393 /* Mark the extent as a hole */
394 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
395 (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
396 last_ext->extLocation.logicalBlockNum = 0;
397 last_ext->extLocation.partitionReferenceNum = 0;
400 /* Can we merge with the previous extent? */
401 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
402 EXT_NOT_RECORDED_NOT_ALLOCATED) {
403 add = ((1 << 30) - sb->s_blocksize -
404 (last_ext->extLength & UDF_EXTENT_LENGTH_MASK)) >>
405 sb->s_blocksize_bits;
406 if (add > blocks)
407 add = blocks;
408 blocks -= add;
409 last_ext->extLength += add << sb->s_blocksize_bits;
412 if (fake) {
413 udf_add_aext(inode, last_pos, last_ext->extLocation,
414 last_ext->extLength, 1);
415 count++;
416 } else
417 udf_write_aext(inode, last_pos, last_ext->extLocation,
418 last_ext->extLength, 1);
420 /* Managed to do everything necessary? */
421 if (!blocks)
422 goto out;
424 /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
425 last_ext->extLocation.logicalBlockNum = 0;
426 last_ext->extLocation.partitionReferenceNum = 0;
427 add = (1 << (30-sb->s_blocksize_bits)) - 1;
428 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
429 (add << sb->s_blocksize_bits);
431 /* Create enough extents to cover the whole hole */
432 while (blocks > add) {
433 blocks -= add;
434 if (udf_add_aext(inode, last_pos, last_ext->extLocation,
435 last_ext->extLength, 1) == -1)
436 return -1;
437 count++;
439 if (blocks) {
440 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
441 (blocks << sb->s_blocksize_bits);
442 if (udf_add_aext(inode, last_pos, last_ext->extLocation,
443 last_ext->extLength, 1) == -1)
444 return -1;
445 count++;
448 out:
449 /* Do we have some preallocated blocks saved? */
450 if (prealloc_len) {
451 if (udf_add_aext(inode, last_pos, prealloc_loc,
452 prealloc_len, 1) == -1)
453 return -1;
454 last_ext->extLocation = prealloc_loc;
455 last_ext->extLength = prealloc_len;
456 count++;
459 /* last_pos should point to the last written extent... */
460 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
461 last_pos->offset -= sizeof(short_ad);
462 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
463 last_pos->offset -= sizeof(long_ad);
464 else
465 return -1;
467 return count;
470 static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
471 int *err, sector_t *phys, int *new)
473 static sector_t last_block;
474 struct buffer_head *result = NULL;
475 kernel_long_ad laarr[EXTENT_MERGE_SIZE];
476 struct extent_position prev_epos, cur_epos, next_epos;
477 int count = 0, startnum = 0, endnum = 0;
478 uint32_t elen = 0, tmpelen;
479 kernel_lb_addr eloc, tmpeloc;
480 int c = 1;
481 loff_t lbcount = 0, b_off = 0;
482 uint32_t newblocknum, newblock;
483 sector_t offset = 0;
484 int8_t etype;
485 struct udf_inode_info *iinfo = UDF_I(inode);
486 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
487 int lastblock = 0;
489 prev_epos.offset = udf_file_entry_alloc_offset(inode);
490 prev_epos.block = iinfo->i_location;
491 prev_epos.bh = NULL;
492 cur_epos = next_epos = prev_epos;
493 b_off = (loff_t)block << inode->i_sb->s_blocksize_bits;
495 /* find the extent which contains the block we are looking for.
496 alternate between laarr[0] and laarr[1] for locations of the
497 current extent, and the previous extent */
498 do {
499 if (prev_epos.bh != cur_epos.bh) {
500 brelse(prev_epos.bh);
501 get_bh(cur_epos.bh);
502 prev_epos.bh = cur_epos.bh;
504 if (cur_epos.bh != next_epos.bh) {
505 brelse(cur_epos.bh);
506 get_bh(next_epos.bh);
507 cur_epos.bh = next_epos.bh;
510 lbcount += elen;
512 prev_epos.block = cur_epos.block;
513 cur_epos.block = next_epos.block;
515 prev_epos.offset = cur_epos.offset;
516 cur_epos.offset = next_epos.offset;
518 etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1);
519 if (etype == -1)
520 break;
522 c = !c;
524 laarr[c].extLength = (etype << 30) | elen;
525 laarr[c].extLocation = eloc;
527 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
528 pgoal = eloc.logicalBlockNum +
529 ((elen + inode->i_sb->s_blocksize - 1) >>
530 inode->i_sb->s_blocksize_bits);
532 count++;
533 } while (lbcount + elen <= b_off);
535 b_off -= lbcount;
536 offset = b_off >> inode->i_sb->s_blocksize_bits;
538 * Move prev_epos and cur_epos into indirect extent if we are at
539 * the pointer to it
541 udf_next_aext(inode, &prev_epos, &tmpeloc, &tmpelen, 0);
542 udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, 0);
544 /* if the extent is allocated and recorded, return the block
545 if the extent is not a multiple of the blocksize, round up */
547 if (etype == (EXT_RECORDED_ALLOCATED >> 30)) {
548 if (elen & (inode->i_sb->s_blocksize - 1)) {
549 elen = EXT_RECORDED_ALLOCATED |
550 ((elen + inode->i_sb->s_blocksize - 1) &
551 ~(inode->i_sb->s_blocksize - 1));
552 etype = udf_write_aext(inode, &cur_epos, eloc, elen, 1);
554 brelse(prev_epos.bh);
555 brelse(cur_epos.bh);
556 brelse(next_epos.bh);
557 newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
558 *phys = newblock;
559 return NULL;
562 last_block = block;
563 /* Are we beyond EOF? */
564 if (etype == -1) {
565 int ret;
567 if (count) {
568 if (c)
569 laarr[0] = laarr[1];
570 startnum = 1;
571 } else {
572 /* Create a fake extent when there's not one */
573 memset(&laarr[0].extLocation, 0x00,
574 sizeof(kernel_lb_addr));
575 laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
576 /* Will udf_extend_file() create real extent from
577 a fake one? */
578 startnum = (offset > 0);
580 /* Create extents for the hole between EOF and offset */
581 ret = udf_extend_file(inode, &prev_epos, laarr, offset);
582 if (ret == -1) {
583 brelse(prev_epos.bh);
584 brelse(cur_epos.bh);
585 brelse(next_epos.bh);
586 /* We don't really know the error here so we just make
587 * something up */
588 *err = -ENOSPC;
589 return NULL;
591 c = 0;
592 offset = 0;
593 count += ret;
594 /* We are not covered by a preallocated extent? */
595 if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) !=
596 EXT_NOT_RECORDED_ALLOCATED) {
597 /* Is there any real extent? - otherwise we overwrite
598 * the fake one... */
599 if (count)
600 c = !c;
601 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
602 inode->i_sb->s_blocksize;
603 memset(&laarr[c].extLocation, 0x00,
604 sizeof(kernel_lb_addr));
605 count++;
606 endnum++;
608 endnum = c + 1;
609 lastblock = 1;
610 } else {
611 endnum = startnum = ((count > 2) ? 2 : count);
613 /* if the current extent is in position 0,
614 swap it with the previous */
615 if (!c && count != 1) {
616 laarr[2] = laarr[0];
617 laarr[0] = laarr[1];
618 laarr[1] = laarr[2];
619 c = 1;
622 /* if the current block is located in an extent,
623 read the next extent */
624 etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0);
625 if (etype != -1) {
626 laarr[c + 1].extLength = (etype << 30) | elen;
627 laarr[c + 1].extLocation = eloc;
628 count++;
629 startnum++;
630 endnum++;
631 } else
632 lastblock = 1;
635 /* if the current extent is not recorded but allocated, get the
636 * block in the extent corresponding to the requested block */
637 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
638 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
639 else { /* otherwise, allocate a new block */
640 if (iinfo->i_next_alloc_block == block)
641 goal = iinfo->i_next_alloc_goal;
643 if (!goal) {
644 if (!(goal = pgoal)) /* XXX: what was intended here? */
645 goal = iinfo->i_location.logicalBlockNum + 1;
648 newblocknum = udf_new_block(inode->i_sb, inode,
649 iinfo->i_location.partitionReferenceNum,
650 goal, err);
651 if (!newblocknum) {
652 brelse(prev_epos.bh);
653 *err = -ENOSPC;
654 return NULL;
656 iinfo->i_lenExtents += inode->i_sb->s_blocksize;
659 /* if the extent the requsted block is located in contains multiple
660 * blocks, split the extent into at most three extents. blocks prior
661 * to requested block, requested block, and blocks after requested
662 * block */
663 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
665 #ifdef UDF_PREALLOCATE
666 /* preallocate blocks */
667 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
668 #endif
670 /* merge any continuous blocks in laarr */
671 udf_merge_extents(inode, laarr, &endnum);
673 /* write back the new extents, inserting new extents if the new number
674 * of extents is greater than the old number, and deleting extents if
675 * the new number of extents is less than the old number */
676 udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
678 brelse(prev_epos.bh);
680 newblock = udf_get_pblock(inode->i_sb, newblocknum,
681 iinfo->i_location.partitionReferenceNum, 0);
682 if (!newblock)
683 return NULL;
684 *phys = newblock;
685 *err = 0;
686 *new = 1;
687 iinfo->i_next_alloc_block = block;
688 iinfo->i_next_alloc_goal = newblocknum;
689 inode->i_ctime = current_fs_time(inode->i_sb);
691 if (IS_SYNC(inode))
692 udf_sync_inode(inode);
693 else
694 mark_inode_dirty(inode);
696 return result;
699 static void udf_split_extents(struct inode *inode, int *c, int offset,
700 int newblocknum,
701 kernel_long_ad laarr[EXTENT_MERGE_SIZE],
702 int *endnum)
704 unsigned long blocksize = inode->i_sb->s_blocksize;
705 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
707 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
708 (laarr[*c].extLength >> 30) ==
709 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
710 int curr = *c;
711 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
712 blocksize - 1) >> blocksize_bits;
713 int8_t etype = (laarr[curr].extLength >> 30);
715 if (blen == 1)
717 else if (!offset || blen == offset + 1) {
718 laarr[curr + 2] = laarr[curr + 1];
719 laarr[curr + 1] = laarr[curr];
720 } else {
721 laarr[curr + 3] = laarr[curr + 1];
722 laarr[curr + 2] = laarr[curr + 1] = laarr[curr];
725 if (offset) {
726 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
727 udf_free_blocks(inode->i_sb, inode,
728 laarr[curr].extLocation,
729 0, offset);
730 laarr[curr].extLength =
731 EXT_NOT_RECORDED_NOT_ALLOCATED |
732 (offset << blocksize_bits);
733 laarr[curr].extLocation.logicalBlockNum = 0;
734 laarr[curr].extLocation.
735 partitionReferenceNum = 0;
736 } else
737 laarr[curr].extLength = (etype << 30) |
738 (offset << blocksize_bits);
739 curr++;
740 (*c)++;
741 (*endnum)++;
744 laarr[curr].extLocation.logicalBlockNum = newblocknum;
745 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
746 laarr[curr].extLocation.partitionReferenceNum =
747 UDF_I(inode)->i_location.partitionReferenceNum;
748 laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
749 blocksize;
750 curr++;
752 if (blen != offset + 1) {
753 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
754 laarr[curr].extLocation.logicalBlockNum +=
755 offset + 1;
756 laarr[curr].extLength = (etype << 30) |
757 ((blen - (offset + 1)) << blocksize_bits);
758 curr++;
759 (*endnum)++;
764 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
765 kernel_long_ad laarr[EXTENT_MERGE_SIZE],
766 int *endnum)
768 int start, length = 0, currlength = 0, i;
770 if (*endnum >= (c + 1)) {
771 if (!lastblock)
772 return;
773 else
774 start = c;
775 } else {
776 if ((laarr[c + 1].extLength >> 30) ==
777 (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
778 start = c + 1;
779 length = currlength =
780 (((laarr[c + 1].extLength &
781 UDF_EXTENT_LENGTH_MASK) +
782 inode->i_sb->s_blocksize - 1) >>
783 inode->i_sb->s_blocksize_bits);
784 } else
785 start = c;
788 for (i = start + 1; i <= *endnum; i++) {
789 if (i == *endnum) {
790 if (lastblock)
791 length += UDF_DEFAULT_PREALLOC_BLOCKS;
792 } else if ((laarr[i].extLength >> 30) ==
793 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
794 length += (((laarr[i].extLength &
795 UDF_EXTENT_LENGTH_MASK) +
796 inode->i_sb->s_blocksize - 1) >>
797 inode->i_sb->s_blocksize_bits);
798 } else
799 break;
802 if (length) {
803 int next = laarr[start].extLocation.logicalBlockNum +
804 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
805 inode->i_sb->s_blocksize - 1) >>
806 inode->i_sb->s_blocksize_bits);
807 int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
808 laarr[start].extLocation.partitionReferenceNum,
809 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ?
810 length : UDF_DEFAULT_PREALLOC_BLOCKS) -
811 currlength);
812 if (numalloc) {
813 if (start == (c + 1))
814 laarr[start].extLength +=
815 (numalloc <<
816 inode->i_sb->s_blocksize_bits);
817 else {
818 memmove(&laarr[c + 2], &laarr[c + 1],
819 sizeof(long_ad) * (*endnum - (c + 1)));
820 (*endnum)++;
821 laarr[c + 1].extLocation.logicalBlockNum = next;
822 laarr[c + 1].extLocation.partitionReferenceNum =
823 laarr[c].extLocation.
824 partitionReferenceNum;
825 laarr[c + 1].extLength =
826 EXT_NOT_RECORDED_ALLOCATED |
827 (numalloc <<
828 inode->i_sb->s_blocksize_bits);
829 start = c + 1;
832 for (i = start + 1; numalloc && i < *endnum; i++) {
833 int elen = ((laarr[i].extLength &
834 UDF_EXTENT_LENGTH_MASK) +
835 inode->i_sb->s_blocksize - 1) >>
836 inode->i_sb->s_blocksize_bits;
838 if (elen > numalloc) {
839 laarr[i].extLength -=
840 (numalloc <<
841 inode->i_sb->s_blocksize_bits);
842 numalloc = 0;
843 } else {
844 numalloc -= elen;
845 if (*endnum > (i + 1))
846 memmove(&laarr[i],
847 &laarr[i + 1],
848 sizeof(long_ad) *
849 (*endnum - (i + 1)));
850 i--;
851 (*endnum)--;
854 UDF_I(inode)->i_lenExtents +=
855 numalloc << inode->i_sb->s_blocksize_bits;
860 static void udf_merge_extents(struct inode *inode,
861 kernel_long_ad laarr[EXTENT_MERGE_SIZE],
862 int *endnum)
864 int i;
865 unsigned long blocksize = inode->i_sb->s_blocksize;
866 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
868 for (i = 0; i < (*endnum - 1); i++) {
869 kernel_long_ad *li /*l[i]*/ = &laarr[i];
870 kernel_long_ad *lip1 /*l[i plus 1]*/ = &laarr[i + 1];
872 if (((li->extLength >> 30) == (lip1->extLength >> 30)) &&
873 (((li->extLength >> 30) ==
874 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
875 ((lip1->extLocation.logicalBlockNum -
876 li->extLocation.logicalBlockNum) ==
877 (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
878 blocksize - 1) >> blocksize_bits)))) {
880 if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
881 (lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
882 blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
883 lip1->extLength = (lip1->extLength -
884 (li->extLength &
885 UDF_EXTENT_LENGTH_MASK) +
886 UDF_EXTENT_LENGTH_MASK) &
887 ~(blocksize - 1);
888 li->extLength = (li->extLength &
889 UDF_EXTENT_FLAG_MASK) +
890 (UDF_EXTENT_LENGTH_MASK + 1) -
891 blocksize;
892 lip1->extLocation.logicalBlockNum =
893 li->extLocation.logicalBlockNum +
894 ((li->extLength &
895 UDF_EXTENT_LENGTH_MASK) >>
896 blocksize_bits);
897 } else {
898 li->extLength = lip1->extLength +
899 (((li->extLength &
900 UDF_EXTENT_LENGTH_MASK) +
901 blocksize - 1) & ~(blocksize - 1));
902 if (*endnum > (i + 2))
903 memmove(&laarr[i + 1], &laarr[i + 2],
904 sizeof(long_ad) *
905 (*endnum - (i + 2)));
906 i--;
907 (*endnum)--;
909 } else if (((li->extLength >> 30) ==
910 (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
911 ((lip1->extLength >> 30) ==
912 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) {
913 udf_free_blocks(inode->i_sb, inode, li->extLocation, 0,
914 ((li->extLength &
915 UDF_EXTENT_LENGTH_MASK) +
916 blocksize - 1) >> blocksize_bits);
917 li->extLocation.logicalBlockNum = 0;
918 li->extLocation.partitionReferenceNum = 0;
920 if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
921 (lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
922 blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
923 lip1->extLength = (lip1->extLength -
924 (li->extLength &
925 UDF_EXTENT_LENGTH_MASK) +
926 UDF_EXTENT_LENGTH_MASK) &
927 ~(blocksize - 1);
928 li->extLength = (li->extLength &
929 UDF_EXTENT_FLAG_MASK) +
930 (UDF_EXTENT_LENGTH_MASK + 1) -
931 blocksize;
932 } else {
933 li->extLength = lip1->extLength +
934 (((li->extLength &
935 UDF_EXTENT_LENGTH_MASK) +
936 blocksize - 1) & ~(blocksize - 1));
937 if (*endnum > (i + 2))
938 memmove(&laarr[i + 1], &laarr[i + 2],
939 sizeof(long_ad) *
940 (*endnum - (i + 2)));
941 i--;
942 (*endnum)--;
944 } else if ((li->extLength >> 30) ==
945 (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
946 udf_free_blocks(inode->i_sb, inode,
947 li->extLocation, 0,
948 ((li->extLength &
949 UDF_EXTENT_LENGTH_MASK) +
950 blocksize - 1) >> blocksize_bits);
951 li->extLocation.logicalBlockNum = 0;
952 li->extLocation.partitionReferenceNum = 0;
953 li->extLength = (li->extLength &
954 UDF_EXTENT_LENGTH_MASK) |
955 EXT_NOT_RECORDED_NOT_ALLOCATED;
960 static void udf_update_extents(struct inode *inode,
961 kernel_long_ad laarr[EXTENT_MERGE_SIZE],
962 int startnum, int endnum,
963 struct extent_position *epos)
965 int start = 0, i;
966 kernel_lb_addr tmploc;
967 uint32_t tmplen;
969 if (startnum > endnum) {
970 for (i = 0; i < (startnum - endnum); i++)
971 udf_delete_aext(inode, *epos, laarr[i].extLocation,
972 laarr[i].extLength);
973 } else if (startnum < endnum) {
974 for (i = 0; i < (endnum - startnum); i++) {
975 udf_insert_aext(inode, *epos, laarr[i].extLocation,
976 laarr[i].extLength);
977 udf_next_aext(inode, epos, &laarr[i].extLocation,
978 &laarr[i].extLength, 1);
979 start++;
983 for (i = start; i < endnum; i++) {
984 udf_next_aext(inode, epos, &tmploc, &tmplen, 0);
985 udf_write_aext(inode, epos, laarr[i].extLocation,
986 laarr[i].extLength, 1);
990 struct buffer_head *udf_bread(struct inode *inode, int block,
991 int create, int *err)
993 struct buffer_head *bh = NULL;
995 bh = udf_getblk(inode, block, create, err);
996 if (!bh)
997 return NULL;
999 if (buffer_uptodate(bh))
1000 return bh;
1002 ll_rw_block(READ, 1, &bh);
1004 wait_on_buffer(bh);
1005 if (buffer_uptodate(bh))
1006 return bh;
1008 brelse(bh);
1009 *err = -EIO;
1010 return NULL;
1013 void udf_truncate(struct inode *inode)
1015 int offset;
1016 int err;
1017 struct udf_inode_info *iinfo;
1019 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1020 S_ISLNK(inode->i_mode)))
1021 return;
1022 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1023 return;
1025 lock_kernel();
1026 iinfo = UDF_I(inode);
1027 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1028 if (inode->i_sb->s_blocksize <
1029 (udf_file_entry_alloc_offset(inode) +
1030 inode->i_size)) {
1031 udf_expand_file_adinicb(inode, inode->i_size, &err);
1032 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1033 inode->i_size = iinfo->i_lenAlloc;
1034 unlock_kernel();
1035 return;
1036 } else
1037 udf_truncate_extents(inode);
1038 } else {
1039 offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
1040 memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr + offset,
1041 0x00, inode->i_sb->s_blocksize -
1042 offset - udf_file_entry_alloc_offset(inode));
1043 iinfo->i_lenAlloc = inode->i_size;
1045 } else {
1046 block_truncate_page(inode->i_mapping, inode->i_size,
1047 udf_get_block);
1048 udf_truncate_extents(inode);
1051 inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
1052 if (IS_SYNC(inode))
1053 udf_sync_inode(inode);
1054 else
1055 mark_inode_dirty(inode);
1056 unlock_kernel();
1059 static void __udf_read_inode(struct inode *inode)
1061 struct buffer_head *bh = NULL;
1062 struct fileEntry *fe;
1063 uint16_t ident;
1064 struct udf_inode_info *iinfo = UDF_I(inode);
1067 * Set defaults, but the inode is still incomplete!
1068 * Note: get_new_inode() sets the following on a new inode:
1069 * i_sb = sb
1070 * i_no = ino
1071 * i_flags = sb->s_flags
1072 * i_state = 0
1073 * clean_inode(): zero fills and sets
1074 * i_count = 1
1075 * i_nlink = 1
1076 * i_op = NULL;
1078 bh = udf_read_ptagged(inode->i_sb, iinfo->i_location, 0, &ident);
1079 if (!bh) {
1080 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
1081 inode->i_ino);
1082 make_bad_inode(inode);
1083 return;
1086 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
1087 ident != TAG_IDENT_USE) {
1088 printk(KERN_ERR "udf: udf_read_inode(ino %ld) "
1089 "failed ident=%d\n", inode->i_ino, ident);
1090 brelse(bh);
1091 make_bad_inode(inode);
1092 return;
1095 fe = (struct fileEntry *)bh->b_data;
1097 if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
1098 struct buffer_head *ibh;
1100 ibh = udf_read_ptagged(inode->i_sb, iinfo->i_location, 1,
1101 &ident);
1102 if (ident == TAG_IDENT_IE && ibh) {
1103 struct buffer_head *nbh = NULL;
1104 kernel_lb_addr loc;
1105 struct indirectEntry *ie;
1107 ie = (struct indirectEntry *)ibh->b_data;
1108 loc = lelb_to_cpu(ie->indirectICB.extLocation);
1110 if (ie->indirectICB.extLength &&
1111 (nbh = udf_read_ptagged(inode->i_sb, loc, 0,
1112 &ident))) {
1113 if (ident == TAG_IDENT_FE ||
1114 ident == TAG_IDENT_EFE) {
1115 memcpy(&iinfo->i_location,
1116 &loc,
1117 sizeof(kernel_lb_addr));
1118 brelse(bh);
1119 brelse(ibh);
1120 brelse(nbh);
1121 __udf_read_inode(inode);
1122 return;
1124 brelse(nbh);
1127 brelse(ibh);
1128 } else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
1129 printk(KERN_ERR "udf: unsupported strategy type: %d\n",
1130 le16_to_cpu(fe->icbTag.strategyType));
1131 brelse(bh);
1132 make_bad_inode(inode);
1133 return;
1135 udf_fill_inode(inode, bh);
1137 brelse(bh);
1140 static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1142 struct fileEntry *fe;
1143 struct extendedFileEntry *efe;
1144 int offset;
1145 struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
1146 struct udf_inode_info *iinfo = UDF_I(inode);
1148 fe = (struct fileEntry *)bh->b_data;
1149 efe = (struct extendedFileEntry *)bh->b_data;
1151 if (fe->icbTag.strategyType == cpu_to_le16(4))
1152 iinfo->i_strat4096 = 0;
1153 else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
1154 iinfo->i_strat4096 = 1;
1156 iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) &
1157 ICBTAG_FLAG_AD_MASK;
1158 iinfo->i_unique = 0;
1159 iinfo->i_lenEAttr = 0;
1160 iinfo->i_lenExtents = 0;
1161 iinfo->i_lenAlloc = 0;
1162 iinfo->i_next_alloc_block = 0;
1163 iinfo->i_next_alloc_goal = 0;
1164 if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
1165 iinfo->i_efe = 1;
1166 iinfo->i_use = 0;
1167 if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
1168 sizeof(struct extendedFileEntry))) {
1169 make_bad_inode(inode);
1170 return;
1172 memcpy(iinfo->i_ext.i_data,
1173 bh->b_data + sizeof(struct extendedFileEntry),
1174 inode->i_sb->s_blocksize -
1175 sizeof(struct extendedFileEntry));
1176 } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
1177 iinfo->i_efe = 0;
1178 iinfo->i_use = 0;
1179 if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
1180 sizeof(struct fileEntry))) {
1181 make_bad_inode(inode);
1182 return;
1184 memcpy(iinfo->i_ext.i_data,
1185 bh->b_data + sizeof(struct fileEntry),
1186 inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1187 } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
1188 iinfo->i_efe = 0;
1189 iinfo->i_use = 1;
1190 iinfo->i_lenAlloc = le32_to_cpu(
1191 ((struct unallocSpaceEntry *)bh->b_data)->
1192 lengthAllocDescs);
1193 if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
1194 sizeof(struct unallocSpaceEntry))) {
1195 make_bad_inode(inode);
1196 return;
1198 memcpy(iinfo->i_ext.i_data,
1199 bh->b_data + sizeof(struct unallocSpaceEntry),
1200 inode->i_sb->s_blocksize -
1201 sizeof(struct unallocSpaceEntry));
1202 return;
1205 inode->i_uid = le32_to_cpu(fe->uid);
1206 if (inode->i_uid == -1 ||
1207 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) ||
1208 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET))
1209 inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
1211 inode->i_gid = le32_to_cpu(fe->gid);
1212 if (inode->i_gid == -1 ||
1213 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_IGNORE) ||
1214 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET))
1215 inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
1217 inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
1218 if (!inode->i_nlink)
1219 inode->i_nlink = 1;
1221 inode->i_size = le64_to_cpu(fe->informationLength);
1222 iinfo->i_lenExtents = inode->i_size;
1224 inode->i_mode = udf_convert_permissions(fe);
1225 inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
1227 if (iinfo->i_efe == 0) {
1228 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1229 (inode->i_sb->s_blocksize_bits - 9);
1231 if (!udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime))
1232 inode->i_atime = sbi->s_record_time;
1234 if (!udf_disk_stamp_to_time(&inode->i_mtime,
1235 fe->modificationTime))
1236 inode->i_mtime = sbi->s_record_time;
1238 if (!udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime))
1239 inode->i_ctime = sbi->s_record_time;
1241 iinfo->i_unique = le64_to_cpu(fe->uniqueID);
1242 iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
1243 iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs);
1244 offset = sizeof(struct fileEntry) + iinfo->i_lenEAttr;
1245 } else {
1246 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1247 (inode->i_sb->s_blocksize_bits - 9);
1249 if (!udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime))
1250 inode->i_atime = sbi->s_record_time;
1252 if (!udf_disk_stamp_to_time(&inode->i_mtime,
1253 efe->modificationTime))
1254 inode->i_mtime = sbi->s_record_time;
1256 if (!udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime))
1257 iinfo->i_crtime = sbi->s_record_time;
1259 if (!udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime))
1260 inode->i_ctime = sbi->s_record_time;
1262 iinfo->i_unique = le64_to_cpu(efe->uniqueID);
1263 iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
1264 iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs);
1265 offset = sizeof(struct extendedFileEntry) +
1266 iinfo->i_lenEAttr;
1269 switch (fe->icbTag.fileType) {
1270 case ICBTAG_FILE_TYPE_DIRECTORY:
1271 inode->i_op = &udf_dir_inode_operations;
1272 inode->i_fop = &udf_dir_operations;
1273 inode->i_mode |= S_IFDIR;
1274 inc_nlink(inode);
1275 break;
1276 case ICBTAG_FILE_TYPE_REALTIME:
1277 case ICBTAG_FILE_TYPE_REGULAR:
1278 case ICBTAG_FILE_TYPE_UNDEF:
1279 case ICBTAG_FILE_TYPE_VAT20:
1280 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
1281 inode->i_data.a_ops = &udf_adinicb_aops;
1282 else
1283 inode->i_data.a_ops = &udf_aops;
1284 inode->i_op = &udf_file_inode_operations;
1285 inode->i_fop = &udf_file_operations;
1286 inode->i_mode |= S_IFREG;
1287 break;
1288 case ICBTAG_FILE_TYPE_BLOCK:
1289 inode->i_mode |= S_IFBLK;
1290 break;
1291 case ICBTAG_FILE_TYPE_CHAR:
1292 inode->i_mode |= S_IFCHR;
1293 break;
1294 case ICBTAG_FILE_TYPE_FIFO:
1295 init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1296 break;
1297 case ICBTAG_FILE_TYPE_SOCKET:
1298 init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1299 break;
1300 case ICBTAG_FILE_TYPE_SYMLINK:
1301 inode->i_data.a_ops = &udf_symlink_aops;
1302 inode->i_op = &page_symlink_inode_operations;
1303 inode->i_mode = S_IFLNK | S_IRWXUGO;
1304 break;
1305 case ICBTAG_FILE_TYPE_MAIN:
1306 udf_debug("METADATA FILE-----\n");
1307 break;
1308 case ICBTAG_FILE_TYPE_MIRROR:
1309 udf_debug("METADATA MIRROR FILE-----\n");
1310 break;
1311 case ICBTAG_FILE_TYPE_BITMAP:
1312 udf_debug("METADATA BITMAP FILE-----\n");
1313 break;
1314 default:
1315 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown "
1316 "file type=%d\n", inode->i_ino,
1317 fe->icbTag.fileType);
1318 make_bad_inode(inode);
1319 return;
1321 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1322 struct deviceSpec *dsea =
1323 (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
1324 if (dsea) {
1325 init_special_inode(inode, inode->i_mode,
1326 MKDEV(le32_to_cpu(dsea->majorDeviceIdent),
1327 le32_to_cpu(dsea->minorDeviceIdent)));
1328 /* Developer ID ??? */
1329 } else
1330 make_bad_inode(inode);
1334 static int udf_alloc_i_data(struct inode *inode, size_t size)
1336 struct udf_inode_info *iinfo = UDF_I(inode);
1337 iinfo->i_ext.i_data = kmalloc(size, GFP_KERNEL);
1339 if (!iinfo->i_ext.i_data) {
1340 printk(KERN_ERR "udf:udf_alloc_i_data (ino %ld) "
1341 "no free memory\n", inode->i_ino);
1342 return -ENOMEM;
1345 return 0;
1348 static mode_t udf_convert_permissions(struct fileEntry *fe)
1350 mode_t mode;
1351 uint32_t permissions;
1352 uint32_t flags;
1354 permissions = le32_to_cpu(fe->permissions);
1355 flags = le16_to_cpu(fe->icbTag.flags);
1357 mode = ((permissions) & S_IRWXO) |
1358 ((permissions >> 2) & S_IRWXG) |
1359 ((permissions >> 4) & S_IRWXU) |
1360 ((flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1361 ((flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1362 ((flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1364 return mode;
1367 int udf_write_inode(struct inode *inode, int sync)
1369 int ret;
1371 lock_kernel();
1372 ret = udf_update_inode(inode, sync);
1373 unlock_kernel();
1375 return ret;
1378 int udf_sync_inode(struct inode *inode)
1380 return udf_update_inode(inode, 1);
1383 static int udf_update_inode(struct inode *inode, int do_sync)
1385 struct buffer_head *bh = NULL;
1386 struct fileEntry *fe;
1387 struct extendedFileEntry *efe;
1388 uint32_t udfperms;
1389 uint16_t icbflags;
1390 uint16_t crclen;
1391 int err = 0;
1392 struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
1393 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
1394 struct udf_inode_info *iinfo = UDF_I(inode);
1396 bh = udf_tread(inode->i_sb,
1397 udf_get_lb_pblock(inode->i_sb,
1398 iinfo->i_location, 0));
1399 if (!bh) {
1400 udf_debug("bread failure\n");
1401 return -EIO;
1404 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
1406 fe = (struct fileEntry *)bh->b_data;
1407 efe = (struct extendedFileEntry *)bh->b_data;
1409 if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
1410 struct unallocSpaceEntry *use =
1411 (struct unallocSpaceEntry *)bh->b_data;
1413 use->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1414 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry),
1415 iinfo->i_ext.i_data, inode->i_sb->s_blocksize -
1416 sizeof(struct unallocSpaceEntry));
1417 crclen = sizeof(struct unallocSpaceEntry) +
1418 iinfo->i_lenAlloc - sizeof(tag);
1419 use->descTag.tagLocation = cpu_to_le32(
1420 iinfo->i_location.
1421 logicalBlockNum);
1422 use->descTag.descCRCLength = cpu_to_le16(crclen);
1423 use->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)use +
1424 sizeof(tag),
1425 crclen));
1426 use->descTag.tagChecksum = udf_tag_checksum(&use->descTag);
1428 mark_buffer_dirty(bh);
1429 brelse(bh);
1430 return err;
1433 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
1434 fe->uid = cpu_to_le32(-1);
1435 else
1436 fe->uid = cpu_to_le32(inode->i_uid);
1438 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
1439 fe->gid = cpu_to_le32(-1);
1440 else
1441 fe->gid = cpu_to_le32(inode->i_gid);
1443 udfperms = ((inode->i_mode & S_IRWXO)) |
1444 ((inode->i_mode & S_IRWXG) << 2) |
1445 ((inode->i_mode & S_IRWXU) << 4);
1447 udfperms |= (le32_to_cpu(fe->permissions) &
1448 (FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1449 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1450 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1451 fe->permissions = cpu_to_le32(udfperms);
1453 if (S_ISDIR(inode->i_mode))
1454 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1455 else
1456 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1458 fe->informationLength = cpu_to_le64(inode->i_size);
1460 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1461 regid *eid;
1462 struct deviceSpec *dsea =
1463 (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
1464 if (!dsea) {
1465 dsea = (struct deviceSpec *)
1466 udf_add_extendedattr(inode,
1467 sizeof(struct deviceSpec) +
1468 sizeof(regid), 12, 0x3);
1469 dsea->attrType = cpu_to_le32(12);
1470 dsea->attrSubtype = 1;
1471 dsea->attrLength = cpu_to_le32(
1472 sizeof(struct deviceSpec) +
1473 sizeof(regid));
1474 dsea->impUseLength = cpu_to_le32(sizeof(regid));
1476 eid = (regid *)dsea->impUse;
1477 memset(eid, 0, sizeof(regid));
1478 strcpy(eid->ident, UDF_ID_DEVELOPER);
1479 eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1480 eid->identSuffix[1] = UDF_OS_ID_LINUX;
1481 dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1482 dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1485 if (iinfo->i_efe == 0) {
1486 memcpy(bh->b_data + sizeof(struct fileEntry),
1487 iinfo->i_ext.i_data,
1488 inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1489 fe->logicalBlocksRecorded = cpu_to_le64(
1490 (inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >>
1491 (blocksize_bits - 9));
1493 udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime);
1494 udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime);
1495 udf_time_to_disk_stamp(&fe->attrTime, inode->i_ctime);
1496 memset(&(fe->impIdent), 0, sizeof(regid));
1497 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1498 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1499 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1500 fe->uniqueID = cpu_to_le64(iinfo->i_unique);
1501 fe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
1502 fe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1503 fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1504 crclen = sizeof(struct fileEntry);
1505 } else {
1506 memcpy(bh->b_data + sizeof(struct extendedFileEntry),
1507 iinfo->i_ext.i_data,
1508 inode->i_sb->s_blocksize -
1509 sizeof(struct extendedFileEntry));
1510 efe->objectSize = cpu_to_le64(inode->i_size);
1511 efe->logicalBlocksRecorded = cpu_to_le64(
1512 (inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >>
1513 (blocksize_bits - 9));
1515 if (iinfo->i_crtime.tv_sec > inode->i_atime.tv_sec ||
1516 (iinfo->i_crtime.tv_sec == inode->i_atime.tv_sec &&
1517 iinfo->i_crtime.tv_nsec > inode->i_atime.tv_nsec))
1518 iinfo->i_crtime = inode->i_atime;
1520 if (iinfo->i_crtime.tv_sec > inode->i_mtime.tv_sec ||
1521 (iinfo->i_crtime.tv_sec == inode->i_mtime.tv_sec &&
1522 iinfo->i_crtime.tv_nsec > inode->i_mtime.tv_nsec))
1523 iinfo->i_crtime = inode->i_mtime;
1525 if (iinfo->i_crtime.tv_sec > inode->i_ctime.tv_sec ||
1526 (iinfo->i_crtime.tv_sec == inode->i_ctime.tv_sec &&
1527 iinfo->i_crtime.tv_nsec > inode->i_ctime.tv_nsec))
1528 iinfo->i_crtime = inode->i_ctime;
1530 udf_time_to_disk_stamp(&efe->accessTime, inode->i_atime);
1531 udf_time_to_disk_stamp(&efe->modificationTime, inode->i_mtime);
1532 udf_time_to_disk_stamp(&efe->createTime, iinfo->i_crtime);
1533 udf_time_to_disk_stamp(&efe->attrTime, inode->i_ctime);
1535 memset(&(efe->impIdent), 0, sizeof(regid));
1536 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1537 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1538 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1539 efe->uniqueID = cpu_to_le64(iinfo->i_unique);
1540 efe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
1541 efe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1542 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1543 crclen = sizeof(struct extendedFileEntry);
1545 if (iinfo->i_strat4096) {
1546 fe->icbTag.strategyType = cpu_to_le16(4096);
1547 fe->icbTag.strategyParameter = cpu_to_le16(1);
1548 fe->icbTag.numEntries = cpu_to_le16(2);
1549 } else {
1550 fe->icbTag.strategyType = cpu_to_le16(4);
1551 fe->icbTag.numEntries = cpu_to_le16(1);
1554 if (S_ISDIR(inode->i_mode))
1555 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1556 else if (S_ISREG(inode->i_mode))
1557 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1558 else if (S_ISLNK(inode->i_mode))
1559 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1560 else if (S_ISBLK(inode->i_mode))
1561 fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1562 else if (S_ISCHR(inode->i_mode))
1563 fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1564 else if (S_ISFIFO(inode->i_mode))
1565 fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1566 else if (S_ISSOCK(inode->i_mode))
1567 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1569 icbflags = iinfo->i_alloc_type |
1570 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1571 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1572 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1573 (le16_to_cpu(fe->icbTag.flags) &
1574 ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1575 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1577 fe->icbTag.flags = cpu_to_le16(icbflags);
1578 if (sbi->s_udfrev >= 0x0200)
1579 fe->descTag.descVersion = cpu_to_le16(3);
1580 else
1581 fe->descTag.descVersion = cpu_to_le16(2);
1582 fe->descTag.tagSerialNum = cpu_to_le16(sbi->s_serial_number);
1583 fe->descTag.tagLocation = cpu_to_le32(
1584 iinfo->i_location.logicalBlockNum);
1585 crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc -
1586 sizeof(tag);
1587 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1588 fe->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)fe + sizeof(tag),
1589 crclen));
1590 fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag);
1592 /* write the data blocks */
1593 mark_buffer_dirty(bh);
1594 if (do_sync) {
1595 sync_dirty_buffer(bh);
1596 if (buffer_req(bh) && !buffer_uptodate(bh)) {
1597 printk(KERN_WARNING "IO error syncing udf inode "
1598 "[%s:%08lx]\n", inode->i_sb->s_id,
1599 inode->i_ino);
1600 err = -EIO;
1603 brelse(bh);
1605 return err;
1608 struct inode *udf_iget(struct super_block *sb, kernel_lb_addr ino)
1610 unsigned long block = udf_get_lb_pblock(sb, ino, 0);
1611 struct inode *inode = iget_locked(sb, block);
1613 if (!inode)
1614 return NULL;
1616 if (inode->i_state & I_NEW) {
1617 memcpy(&UDF_I(inode)->i_location, &ino, sizeof(kernel_lb_addr));
1618 __udf_read_inode(inode);
1619 unlock_new_inode(inode);
1622 if (is_bad_inode(inode))
1623 goto out_iput;
1625 if (ino.logicalBlockNum >= UDF_SB(sb)->
1626 s_partmaps[ino.partitionReferenceNum].s_partition_len) {
1627 udf_debug("block=%d, partition=%d out of range\n",
1628 ino.logicalBlockNum, ino.partitionReferenceNum);
1629 make_bad_inode(inode);
1630 goto out_iput;
1633 return inode;
1635 out_iput:
1636 iput(inode);
1637 return NULL;
1640 int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
1641 kernel_lb_addr eloc, uint32_t elen, int inc)
1643 int adsize;
1644 short_ad *sad = NULL;
1645 long_ad *lad = NULL;
1646 struct allocExtDesc *aed;
1647 int8_t etype;
1648 uint8_t *ptr;
1649 struct udf_inode_info *iinfo = UDF_I(inode);
1651 if (!epos->bh)
1652 ptr = iinfo->i_ext.i_data + epos->offset -
1653 udf_file_entry_alloc_offset(inode) +
1654 iinfo->i_lenEAttr;
1655 else
1656 ptr = epos->bh->b_data + epos->offset;
1658 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
1659 adsize = sizeof(short_ad);
1660 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
1661 adsize = sizeof(long_ad);
1662 else
1663 return -1;
1665 if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize) {
1666 char *sptr, *dptr;
1667 struct buffer_head *nbh;
1668 int err, loffset;
1669 kernel_lb_addr obloc = epos->block;
1671 epos->block.logicalBlockNum = udf_new_block(inode->i_sb, NULL,
1672 obloc.partitionReferenceNum,
1673 obloc.logicalBlockNum, &err);
1674 if (!epos->block.logicalBlockNum)
1675 return -1;
1676 nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1677 epos->block,
1678 0));
1679 if (!nbh)
1680 return -1;
1681 lock_buffer(nbh);
1682 memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
1683 set_buffer_uptodate(nbh);
1684 unlock_buffer(nbh);
1685 mark_buffer_dirty_inode(nbh, inode);
1687 aed = (struct allocExtDesc *)(nbh->b_data);
1688 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
1689 aed->previousAllocExtLocation =
1690 cpu_to_le32(obloc.logicalBlockNum);
1691 if (epos->offset + adsize > inode->i_sb->s_blocksize) {
1692 loffset = epos->offset;
1693 aed->lengthAllocDescs = cpu_to_le32(adsize);
1694 sptr = ptr - adsize;
1695 dptr = nbh->b_data + sizeof(struct allocExtDesc);
1696 memcpy(dptr, sptr, adsize);
1697 epos->offset = sizeof(struct allocExtDesc) + adsize;
1698 } else {
1699 loffset = epos->offset + adsize;
1700 aed->lengthAllocDescs = cpu_to_le32(0);
1701 sptr = ptr;
1702 epos->offset = sizeof(struct allocExtDesc);
1704 if (epos->bh) {
1705 aed = (struct allocExtDesc *)epos->bh->b_data;
1706 le32_add_cpu(&aed->lengthAllocDescs, adsize);
1707 } else {
1708 iinfo->i_lenAlloc += adsize;
1709 mark_inode_dirty(inode);
1712 if (UDF_SB(inode->i_sb)->s_udfrev >= 0x0200)
1713 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
1714 epos->block.logicalBlockNum, sizeof(tag));
1715 else
1716 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
1717 epos->block.logicalBlockNum, sizeof(tag));
1718 switch (iinfo->i_alloc_type) {
1719 case ICBTAG_FLAG_AD_SHORT:
1720 sad = (short_ad *)sptr;
1721 sad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS |
1722 inode->i_sb->s_blocksize);
1723 sad->extPosition =
1724 cpu_to_le32(epos->block.logicalBlockNum);
1725 break;
1726 case ICBTAG_FLAG_AD_LONG:
1727 lad = (long_ad *)sptr;
1728 lad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS |
1729 inode->i_sb->s_blocksize);
1730 lad->extLocation = cpu_to_lelb(epos->block);
1731 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1732 break;
1734 if (epos->bh) {
1735 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1736 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
1737 udf_update_tag(epos->bh->b_data, loffset);
1738 else
1739 udf_update_tag(epos->bh->b_data,
1740 sizeof(struct allocExtDesc));
1741 mark_buffer_dirty_inode(epos->bh, inode);
1742 brelse(epos->bh);
1743 } else {
1744 mark_inode_dirty(inode);
1746 epos->bh = nbh;
1749 etype = udf_write_aext(inode, epos, eloc, elen, inc);
1751 if (!epos->bh) {
1752 iinfo->i_lenAlloc += adsize;
1753 mark_inode_dirty(inode);
1754 } else {
1755 aed = (struct allocExtDesc *)epos->bh->b_data;
1756 le32_add_cpu(&aed->lengthAllocDescs, adsize);
1757 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1758 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
1759 udf_update_tag(epos->bh->b_data,
1760 epos->offset + (inc ? 0 : adsize));
1761 else
1762 udf_update_tag(epos->bh->b_data,
1763 sizeof(struct allocExtDesc));
1764 mark_buffer_dirty_inode(epos->bh, inode);
1767 return etype;
1770 int8_t udf_write_aext(struct inode *inode, struct extent_position *epos,
1771 kernel_lb_addr eloc, uint32_t elen, int inc)
1773 int adsize;
1774 uint8_t *ptr;
1775 short_ad *sad;
1776 long_ad *lad;
1777 struct udf_inode_info *iinfo = UDF_I(inode);
1779 if (!epos->bh)
1780 ptr = iinfo->i_ext.i_data + epos->offset -
1781 udf_file_entry_alloc_offset(inode) +
1782 iinfo->i_lenEAttr;
1783 else
1784 ptr = epos->bh->b_data + epos->offset;
1786 switch (iinfo->i_alloc_type) {
1787 case ICBTAG_FLAG_AD_SHORT:
1788 sad = (short_ad *)ptr;
1789 sad->extLength = cpu_to_le32(elen);
1790 sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
1791 adsize = sizeof(short_ad);
1792 break;
1793 case ICBTAG_FLAG_AD_LONG:
1794 lad = (long_ad *)ptr;
1795 lad->extLength = cpu_to_le32(elen);
1796 lad->extLocation = cpu_to_lelb(eloc);
1797 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1798 adsize = sizeof(long_ad);
1799 break;
1800 default:
1801 return -1;
1804 if (epos->bh) {
1805 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1806 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) {
1807 struct allocExtDesc *aed =
1808 (struct allocExtDesc *)epos->bh->b_data;
1809 udf_update_tag(epos->bh->b_data,
1810 le32_to_cpu(aed->lengthAllocDescs) +
1811 sizeof(struct allocExtDesc));
1813 mark_buffer_dirty_inode(epos->bh, inode);
1814 } else {
1815 mark_inode_dirty(inode);
1818 if (inc)
1819 epos->offset += adsize;
1821 return (elen >> 30);
1824 int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
1825 kernel_lb_addr *eloc, uint32_t *elen, int inc)
1827 int8_t etype;
1829 while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
1830 (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
1831 int block;
1832 epos->block = *eloc;
1833 epos->offset = sizeof(struct allocExtDesc);
1834 brelse(epos->bh);
1835 block = udf_get_lb_pblock(inode->i_sb, epos->block, 0);
1836 epos->bh = udf_tread(inode->i_sb, block);
1837 if (!epos->bh) {
1838 udf_debug("reading block %d failed!\n", block);
1839 return -1;
1843 return etype;
1846 int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
1847 kernel_lb_addr *eloc, uint32_t *elen, int inc)
1849 int alen;
1850 int8_t etype;
1851 uint8_t *ptr;
1852 short_ad *sad;
1853 long_ad *lad;
1854 struct udf_inode_info *iinfo = UDF_I(inode);
1856 if (!epos->bh) {
1857 if (!epos->offset)
1858 epos->offset = udf_file_entry_alloc_offset(inode);
1859 ptr = iinfo->i_ext.i_data + epos->offset -
1860 udf_file_entry_alloc_offset(inode) +
1861 iinfo->i_lenEAttr;
1862 alen = udf_file_entry_alloc_offset(inode) +
1863 iinfo->i_lenAlloc;
1864 } else {
1865 if (!epos->offset)
1866 epos->offset = sizeof(struct allocExtDesc);
1867 ptr = epos->bh->b_data + epos->offset;
1868 alen = sizeof(struct allocExtDesc) +
1869 le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->
1870 lengthAllocDescs);
1873 switch (iinfo->i_alloc_type) {
1874 case ICBTAG_FLAG_AD_SHORT:
1875 sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc);
1876 if (!sad)
1877 return -1;
1878 etype = le32_to_cpu(sad->extLength) >> 30;
1879 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1880 eloc->partitionReferenceNum =
1881 iinfo->i_location.partitionReferenceNum;
1882 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1883 break;
1884 case ICBTAG_FLAG_AD_LONG:
1885 lad = udf_get_filelongad(ptr, alen, &epos->offset, inc);
1886 if (!lad)
1887 return -1;
1888 etype = le32_to_cpu(lad->extLength) >> 30;
1889 *eloc = lelb_to_cpu(lad->extLocation);
1890 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1891 break;
1892 default:
1893 udf_debug("alloc_type = %d unsupported\n",
1894 iinfo->i_alloc_type);
1895 return -1;
1898 return etype;
1901 static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos,
1902 kernel_lb_addr neloc, uint32_t nelen)
1904 kernel_lb_addr oeloc;
1905 uint32_t oelen;
1906 int8_t etype;
1908 if (epos.bh)
1909 get_bh(epos.bh);
1911 while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1) {
1912 udf_write_aext(inode, &epos, neloc, nelen, 1);
1913 neloc = oeloc;
1914 nelen = (etype << 30) | oelen;
1916 udf_add_aext(inode, &epos, neloc, nelen, 1);
1917 brelse(epos.bh);
1919 return (nelen >> 30);
1922 int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
1923 kernel_lb_addr eloc, uint32_t elen)
1925 struct extent_position oepos;
1926 int adsize;
1927 int8_t etype;
1928 struct allocExtDesc *aed;
1929 struct udf_inode_info *iinfo;
1931 if (epos.bh) {
1932 get_bh(epos.bh);
1933 get_bh(epos.bh);
1936 iinfo = UDF_I(inode);
1937 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
1938 adsize = sizeof(short_ad);
1939 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
1940 adsize = sizeof(long_ad);
1941 else
1942 adsize = 0;
1944 oepos = epos;
1945 if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1)
1946 return -1;
1948 while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
1949 udf_write_aext(inode, &oepos, eloc, (etype << 30) | elen, 1);
1950 if (oepos.bh != epos.bh) {
1951 oepos.block = epos.block;
1952 brelse(oepos.bh);
1953 get_bh(epos.bh);
1954 oepos.bh = epos.bh;
1955 oepos.offset = epos.offset - adsize;
1958 memset(&eloc, 0x00, sizeof(kernel_lb_addr));
1959 elen = 0;
1961 if (epos.bh != oepos.bh) {
1962 udf_free_blocks(inode->i_sb, inode, epos.block, 0, 1);
1963 udf_write_aext(inode, &oepos, eloc, elen, 1);
1964 udf_write_aext(inode, &oepos, eloc, elen, 1);
1965 if (!oepos.bh) {
1966 iinfo->i_lenAlloc -= (adsize * 2);
1967 mark_inode_dirty(inode);
1968 } else {
1969 aed = (struct allocExtDesc *)oepos.bh->b_data;
1970 le32_add_cpu(&aed->lengthAllocDescs, -(2 * adsize));
1971 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1972 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
1973 udf_update_tag(oepos.bh->b_data,
1974 oepos.offset - (2 * adsize));
1975 else
1976 udf_update_tag(oepos.bh->b_data,
1977 sizeof(struct allocExtDesc));
1978 mark_buffer_dirty_inode(oepos.bh, inode);
1980 } else {
1981 udf_write_aext(inode, &oepos, eloc, elen, 1);
1982 if (!oepos.bh) {
1983 iinfo->i_lenAlloc -= adsize;
1984 mark_inode_dirty(inode);
1985 } else {
1986 aed = (struct allocExtDesc *)oepos.bh->b_data;
1987 le32_add_cpu(&aed->lengthAllocDescs, -adsize);
1988 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1989 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
1990 udf_update_tag(oepos.bh->b_data,
1991 epos.offset - adsize);
1992 else
1993 udf_update_tag(oepos.bh->b_data,
1994 sizeof(struct allocExtDesc));
1995 mark_buffer_dirty_inode(oepos.bh, inode);
1999 brelse(epos.bh);
2000 brelse(oepos.bh);
2002 return (elen >> 30);
2005 int8_t inode_bmap(struct inode *inode, sector_t block,
2006 struct extent_position *pos, kernel_lb_addr *eloc,
2007 uint32_t *elen, sector_t *offset)
2009 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
2010 loff_t lbcount = 0, bcount =
2011 (loff_t) block << blocksize_bits;
2012 int8_t etype;
2013 struct udf_inode_info *iinfo;
2015 iinfo = UDF_I(inode);
2016 pos->offset = 0;
2017 pos->block = iinfo->i_location;
2018 pos->bh = NULL;
2019 *elen = 0;
2021 do {
2022 etype = udf_next_aext(inode, pos, eloc, elen, 1);
2023 if (etype == -1) {
2024 *offset = (bcount - lbcount) >> blocksize_bits;
2025 iinfo->i_lenExtents = lbcount;
2026 return -1;
2028 lbcount += *elen;
2029 } while (lbcount <= bcount);
2031 *offset = (bcount + *elen - lbcount) >> blocksize_bits;
2033 return etype;
2036 long udf_block_map(struct inode *inode, sector_t block)
2038 kernel_lb_addr eloc;
2039 uint32_t elen;
2040 sector_t offset;
2041 struct extent_position epos = {};
2042 int ret;
2044 lock_kernel();
2046 if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) ==
2047 (EXT_RECORDED_ALLOCATED >> 30))
2048 ret = udf_get_lb_pblock(inode->i_sb, eloc, offset);
2049 else
2050 ret = 0;
2052 unlock_kernel();
2053 brelse(epos.bh);
2055 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2056 return udf_fixed_to_variable(ret);
2057 else
2058 return ret;