spi: davinci: remove unnecessary completion variable initialization
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / udf / inode.c
blobfc48f37aa2dd02dd4d10c281c77dee0f58fee98a
1 /*
2 * inode.c
4 * PURPOSE
5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
7 * COPYRIGHT
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1998 Dave Boynton
14 * (C) 1998-2004 Ben Fennema
15 * (C) 1999-2000 Stelias Computing Inc
17 * HISTORY
19 * 10/04/98 dgb Added rudimentary directory functions
20 * 10/07/98 Fully working udf_block_map! It works!
21 * 11/25/98 bmap altered to better support extents
22 * 12/06/98 blf partition support in udf_iget, udf_block_map
23 * and udf_read_inode
24 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
25 * block boundaries (which is not actually allowed)
26 * 12/20/98 added support for strategy 4096
27 * 03/07/99 rewrote udf_block_map (again)
28 * New funcs, inode_bmap, udf_next_aext
29 * 04/19/99 Support for writing device EA's for major/minor #
32 #include "udfdecl.h"
33 #include <linux/mm.h>
34 #include <linux/smp_lock.h>
35 #include <linux/module.h>
36 #include <linux/pagemap.h>
37 #include <linux/buffer_head.h>
38 #include <linux/writeback.h>
39 #include <linux/slab.h>
40 #include <linux/crc-itu-t.h>
42 #include "udf_i.h"
43 #include "udf_sb.h"
45 MODULE_AUTHOR("Ben Fennema");
46 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
47 MODULE_LICENSE("GPL");
49 #define EXTENT_MERGE_SIZE 5
51 static mode_t udf_convert_permissions(struct fileEntry *);
52 static int udf_update_inode(struct inode *, int);
53 static void udf_fill_inode(struct inode *, struct buffer_head *);
54 static int udf_alloc_i_data(struct inode *inode, size_t size);
55 static struct buffer_head *inode_getblk(struct inode *, sector_t, int *,
56 sector_t *, int *);
57 static int8_t udf_insert_aext(struct inode *, struct extent_position,
58 struct kernel_lb_addr, uint32_t);
59 static void udf_split_extents(struct inode *, int *, int, int,
60 struct kernel_long_ad[EXTENT_MERGE_SIZE], int *);
61 static void udf_prealloc_extents(struct inode *, int, int,
62 struct kernel_long_ad[EXTENT_MERGE_SIZE], int *);
63 static void udf_merge_extents(struct inode *,
64 struct kernel_long_ad[EXTENT_MERGE_SIZE], int *);
65 static void udf_update_extents(struct inode *,
66 struct kernel_long_ad[EXTENT_MERGE_SIZE], int, int,
67 struct extent_position *);
68 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
71 void udf_evict_inode(struct inode *inode)
73 struct udf_inode_info *iinfo = UDF_I(inode);
74 int want_delete = 0;
76 truncate_inode_pages(&inode->i_data, 0);
78 if (!inode->i_nlink && !is_bad_inode(inode)) {
79 want_delete = 1;
80 inode->i_size = 0;
81 udf_truncate(inode);
82 lock_kernel();
83 udf_update_inode(inode, IS_SYNC(inode));
84 unlock_kernel();
86 invalidate_inode_buffers(inode);
87 end_writeback(inode);
88 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
89 inode->i_size != iinfo->i_lenExtents) {
90 printk(KERN_WARNING "UDF-fs (%s): Inode %lu (mode %o) has "
91 "inode size %llu different from extent length %llu. "
92 "Filesystem need not be standards compliant.\n",
93 inode->i_sb->s_id, inode->i_ino, inode->i_mode,
94 (unsigned long long)inode->i_size,
95 (unsigned long long)iinfo->i_lenExtents);
97 kfree(iinfo->i_ext.i_data);
98 iinfo->i_ext.i_data = NULL;
99 if (want_delete) {
100 lock_kernel();
101 udf_free_inode(inode);
102 unlock_kernel();
106 static int udf_writepage(struct page *page, struct writeback_control *wbc)
108 return block_write_full_page(page, udf_get_block, wbc);
111 static int udf_readpage(struct file *file, struct page *page)
113 return block_read_full_page(page, udf_get_block);
116 static int udf_write_begin(struct file *file, struct address_space *mapping,
117 loff_t pos, unsigned len, unsigned flags,
118 struct page **pagep, void **fsdata)
120 int ret;
122 ret = block_write_begin(mapping, pos, len, flags, pagep, udf_get_block);
123 if (unlikely(ret)) {
124 loff_t isize = mapping->host->i_size;
125 if (pos + len > isize)
126 vmtruncate(mapping->host, isize);
129 return ret;
132 static sector_t udf_bmap(struct address_space *mapping, sector_t block)
134 return generic_block_bmap(mapping, block, udf_get_block);
137 const struct address_space_operations udf_aops = {
138 .readpage = udf_readpage,
139 .writepage = udf_writepage,
140 .sync_page = block_sync_page,
141 .write_begin = udf_write_begin,
142 .write_end = generic_write_end,
143 .bmap = udf_bmap,
146 void udf_expand_file_adinicb(struct inode *inode, int newsize, int *err)
148 struct page *page;
149 char *kaddr;
150 struct udf_inode_info *iinfo = UDF_I(inode);
151 struct writeback_control udf_wbc = {
152 .sync_mode = WB_SYNC_NONE,
153 .nr_to_write = 1,
156 /* from now on we have normal address_space methods */
157 inode->i_data.a_ops = &udf_aops;
159 if (!iinfo->i_lenAlloc) {
160 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
161 iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
162 else
163 iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
164 mark_inode_dirty(inode);
165 return;
168 page = grab_cache_page(inode->i_mapping, 0);
169 BUG_ON(!PageLocked(page));
171 if (!PageUptodate(page)) {
172 kaddr = kmap(page);
173 memset(kaddr + iinfo->i_lenAlloc, 0x00,
174 PAGE_CACHE_SIZE - iinfo->i_lenAlloc);
175 memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr,
176 iinfo->i_lenAlloc);
177 flush_dcache_page(page);
178 SetPageUptodate(page);
179 kunmap(page);
181 memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0x00,
182 iinfo->i_lenAlloc);
183 iinfo->i_lenAlloc = 0;
184 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
185 iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
186 else
187 iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
189 inode->i_data.a_ops->writepage(page, &udf_wbc);
190 page_cache_release(page);
192 mark_inode_dirty(inode);
195 struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block,
196 int *err)
198 int newblock;
199 struct buffer_head *dbh = NULL;
200 struct kernel_lb_addr eloc;
201 uint8_t alloctype;
202 struct extent_position epos;
204 struct udf_fileident_bh sfibh, dfibh;
205 loff_t f_pos = udf_ext0_offset(inode);
206 int size = udf_ext0_offset(inode) + inode->i_size;
207 struct fileIdentDesc cfi, *sfi, *dfi;
208 struct udf_inode_info *iinfo = UDF_I(inode);
210 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
211 alloctype = ICBTAG_FLAG_AD_SHORT;
212 else
213 alloctype = ICBTAG_FLAG_AD_LONG;
215 if (!inode->i_size) {
216 iinfo->i_alloc_type = alloctype;
217 mark_inode_dirty(inode);
218 return NULL;
221 /* alloc block, and copy data to it */
222 *block = udf_new_block(inode->i_sb, inode,
223 iinfo->i_location.partitionReferenceNum,
224 iinfo->i_location.logicalBlockNum, err);
225 if (!(*block))
226 return NULL;
227 newblock = udf_get_pblock(inode->i_sb, *block,
228 iinfo->i_location.partitionReferenceNum,
230 if (!newblock)
231 return NULL;
232 dbh = udf_tgetblk(inode->i_sb, newblock);
233 if (!dbh)
234 return NULL;
235 lock_buffer(dbh);
236 memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
237 set_buffer_uptodate(dbh);
238 unlock_buffer(dbh);
239 mark_buffer_dirty_inode(dbh, inode);
241 sfibh.soffset = sfibh.eoffset =
242 f_pos & (inode->i_sb->s_blocksize - 1);
243 sfibh.sbh = sfibh.ebh = NULL;
244 dfibh.soffset = dfibh.eoffset = 0;
245 dfibh.sbh = dfibh.ebh = dbh;
246 while (f_pos < size) {
247 iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
248 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL,
249 NULL, NULL, NULL);
250 if (!sfi) {
251 brelse(dbh);
252 return NULL;
254 iinfo->i_alloc_type = alloctype;
255 sfi->descTag.tagLocation = cpu_to_le32(*block);
256 dfibh.soffset = dfibh.eoffset;
257 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
258 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
259 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
260 sfi->fileIdent +
261 le16_to_cpu(sfi->lengthOfImpUse))) {
262 iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
263 brelse(dbh);
264 return NULL;
267 mark_buffer_dirty_inode(dbh, inode);
269 memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0,
270 iinfo->i_lenAlloc);
271 iinfo->i_lenAlloc = 0;
272 eloc.logicalBlockNum = *block;
273 eloc.partitionReferenceNum =
274 iinfo->i_location.partitionReferenceNum;
275 iinfo->i_lenExtents = inode->i_size;
276 epos.bh = NULL;
277 epos.block = iinfo->i_location;
278 epos.offset = udf_file_entry_alloc_offset(inode);
279 udf_add_aext(inode, &epos, &eloc, inode->i_size, 0);
280 /* UniqueID stuff */
282 brelse(epos.bh);
283 mark_inode_dirty(inode);
284 return dbh;
287 static int udf_get_block(struct inode *inode, sector_t block,
288 struct buffer_head *bh_result, int create)
290 int err, new;
291 struct buffer_head *bh;
292 sector_t phys = 0;
293 struct udf_inode_info *iinfo;
295 if (!create) {
296 phys = udf_block_map(inode, block);
297 if (phys)
298 map_bh(bh_result, inode->i_sb, phys);
299 return 0;
302 err = -EIO;
303 new = 0;
304 bh = NULL;
306 lock_kernel();
308 iinfo = UDF_I(inode);
309 if (block == iinfo->i_next_alloc_block + 1) {
310 iinfo->i_next_alloc_block++;
311 iinfo->i_next_alloc_goal++;
314 err = 0;
316 bh = inode_getblk(inode, block, &err, &phys, &new);
317 BUG_ON(bh);
318 if (err)
319 goto abort;
320 BUG_ON(!phys);
322 if (new)
323 set_buffer_new(bh_result);
324 map_bh(bh_result, inode->i_sb, phys);
326 abort:
327 unlock_kernel();
328 return err;
331 static struct buffer_head *udf_getblk(struct inode *inode, long block,
332 int create, int *err)
334 struct buffer_head *bh;
335 struct buffer_head dummy;
337 dummy.b_state = 0;
338 dummy.b_blocknr = -1000;
339 *err = udf_get_block(inode, block, &dummy, create);
340 if (!*err && buffer_mapped(&dummy)) {
341 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
342 if (buffer_new(&dummy)) {
343 lock_buffer(bh);
344 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
345 set_buffer_uptodate(bh);
346 unlock_buffer(bh);
347 mark_buffer_dirty_inode(bh, inode);
349 return bh;
352 return NULL;
355 /* Extend the file by 'blocks' blocks, return the number of extents added */
356 int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
357 struct kernel_long_ad *last_ext, sector_t blocks)
359 sector_t add;
360 int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
361 struct super_block *sb = inode->i_sb;
362 struct kernel_lb_addr prealloc_loc = {};
363 int prealloc_len = 0;
364 struct udf_inode_info *iinfo;
366 /* The previous extent is fake and we should not extend by anything
367 * - there's nothing to do... */
368 if (!blocks && fake)
369 return 0;
371 iinfo = UDF_I(inode);
372 /* Round the last extent up to a multiple of block size */
373 if (last_ext->extLength & (sb->s_blocksize - 1)) {
374 last_ext->extLength =
375 (last_ext->extLength & UDF_EXTENT_FLAG_MASK) |
376 (((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) +
377 sb->s_blocksize - 1) & ~(sb->s_blocksize - 1));
378 iinfo->i_lenExtents =
379 (iinfo->i_lenExtents + sb->s_blocksize - 1) &
380 ~(sb->s_blocksize - 1);
383 /* Last extent are just preallocated blocks? */
384 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
385 EXT_NOT_RECORDED_ALLOCATED) {
386 /* Save the extent so that we can reattach it to the end */
387 prealloc_loc = last_ext->extLocation;
388 prealloc_len = last_ext->extLength;
389 /* Mark the extent as a hole */
390 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
391 (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
392 last_ext->extLocation.logicalBlockNum = 0;
393 last_ext->extLocation.partitionReferenceNum = 0;
396 /* Can we merge with the previous extent? */
397 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
398 EXT_NOT_RECORDED_NOT_ALLOCATED) {
399 add = ((1 << 30) - sb->s_blocksize -
400 (last_ext->extLength & UDF_EXTENT_LENGTH_MASK)) >>
401 sb->s_blocksize_bits;
402 if (add > blocks)
403 add = blocks;
404 blocks -= add;
405 last_ext->extLength += add << sb->s_blocksize_bits;
408 if (fake) {
409 udf_add_aext(inode, last_pos, &last_ext->extLocation,
410 last_ext->extLength, 1);
411 count++;
412 } else
413 udf_write_aext(inode, last_pos, &last_ext->extLocation,
414 last_ext->extLength, 1);
416 /* Managed to do everything necessary? */
417 if (!blocks)
418 goto out;
420 /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
421 last_ext->extLocation.logicalBlockNum = 0;
422 last_ext->extLocation.partitionReferenceNum = 0;
423 add = (1 << (30-sb->s_blocksize_bits)) - 1;
424 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
425 (add << sb->s_blocksize_bits);
427 /* Create enough extents to cover the whole hole */
428 while (blocks > add) {
429 blocks -= add;
430 if (udf_add_aext(inode, last_pos, &last_ext->extLocation,
431 last_ext->extLength, 1) == -1)
432 return -1;
433 count++;
435 if (blocks) {
436 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
437 (blocks << sb->s_blocksize_bits);
438 if (udf_add_aext(inode, last_pos, &last_ext->extLocation,
439 last_ext->extLength, 1) == -1)
440 return -1;
441 count++;
444 out:
445 /* Do we have some preallocated blocks saved? */
446 if (prealloc_len) {
447 if (udf_add_aext(inode, last_pos, &prealloc_loc,
448 prealloc_len, 1) == -1)
449 return -1;
450 last_ext->extLocation = prealloc_loc;
451 last_ext->extLength = prealloc_len;
452 count++;
455 /* last_pos should point to the last written extent... */
456 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
457 last_pos->offset -= sizeof(struct short_ad);
458 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
459 last_pos->offset -= sizeof(struct long_ad);
460 else
461 return -1;
463 return count;
466 static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
467 int *err, sector_t *phys, int *new)
469 static sector_t last_block;
470 struct buffer_head *result = NULL;
471 struct kernel_long_ad laarr[EXTENT_MERGE_SIZE];
472 struct extent_position prev_epos, cur_epos, next_epos;
473 int count = 0, startnum = 0, endnum = 0;
474 uint32_t elen = 0, tmpelen;
475 struct kernel_lb_addr eloc, tmpeloc;
476 int c = 1;
477 loff_t lbcount = 0, b_off = 0;
478 uint32_t newblocknum, newblock;
479 sector_t offset = 0;
480 int8_t etype;
481 struct udf_inode_info *iinfo = UDF_I(inode);
482 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
483 int lastblock = 0;
485 prev_epos.offset = udf_file_entry_alloc_offset(inode);
486 prev_epos.block = iinfo->i_location;
487 prev_epos.bh = NULL;
488 cur_epos = next_epos = prev_epos;
489 b_off = (loff_t)block << inode->i_sb->s_blocksize_bits;
491 /* find the extent which contains the block we are looking for.
492 alternate between laarr[0] and laarr[1] for locations of the
493 current extent, and the previous extent */
494 do {
495 if (prev_epos.bh != cur_epos.bh) {
496 brelse(prev_epos.bh);
497 get_bh(cur_epos.bh);
498 prev_epos.bh = cur_epos.bh;
500 if (cur_epos.bh != next_epos.bh) {
501 brelse(cur_epos.bh);
502 get_bh(next_epos.bh);
503 cur_epos.bh = next_epos.bh;
506 lbcount += elen;
508 prev_epos.block = cur_epos.block;
509 cur_epos.block = next_epos.block;
511 prev_epos.offset = cur_epos.offset;
512 cur_epos.offset = next_epos.offset;
514 etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1);
515 if (etype == -1)
516 break;
518 c = !c;
520 laarr[c].extLength = (etype << 30) | elen;
521 laarr[c].extLocation = eloc;
523 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
524 pgoal = eloc.logicalBlockNum +
525 ((elen + inode->i_sb->s_blocksize - 1) >>
526 inode->i_sb->s_blocksize_bits);
528 count++;
529 } while (lbcount + elen <= b_off);
531 b_off -= lbcount;
532 offset = b_off >> inode->i_sb->s_blocksize_bits;
534 * Move prev_epos and cur_epos into indirect extent if we are at
535 * the pointer to it
537 udf_next_aext(inode, &prev_epos, &tmpeloc, &tmpelen, 0);
538 udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, 0);
540 /* if the extent is allocated and recorded, return the block
541 if the extent is not a multiple of the blocksize, round up */
543 if (etype == (EXT_RECORDED_ALLOCATED >> 30)) {
544 if (elen & (inode->i_sb->s_blocksize - 1)) {
545 elen = EXT_RECORDED_ALLOCATED |
546 ((elen + inode->i_sb->s_blocksize - 1) &
547 ~(inode->i_sb->s_blocksize - 1));
548 etype = udf_write_aext(inode, &cur_epos, &eloc, elen, 1);
550 brelse(prev_epos.bh);
551 brelse(cur_epos.bh);
552 brelse(next_epos.bh);
553 newblock = udf_get_lb_pblock(inode->i_sb, &eloc, offset);
554 *phys = newblock;
555 return NULL;
558 last_block = block;
559 /* Are we beyond EOF? */
560 if (etype == -1) {
561 int ret;
563 if (count) {
564 if (c)
565 laarr[0] = laarr[1];
566 startnum = 1;
567 } else {
568 /* Create a fake extent when there's not one */
569 memset(&laarr[0].extLocation, 0x00,
570 sizeof(struct kernel_lb_addr));
571 laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
572 /* Will udf_extend_file() create real extent from
573 a fake one? */
574 startnum = (offset > 0);
576 /* Create extents for the hole between EOF and offset */
577 ret = udf_extend_file(inode, &prev_epos, laarr, offset);
578 if (ret == -1) {
579 brelse(prev_epos.bh);
580 brelse(cur_epos.bh);
581 brelse(next_epos.bh);
582 /* We don't really know the error here so we just make
583 * something up */
584 *err = -ENOSPC;
585 return NULL;
587 c = 0;
588 offset = 0;
589 count += ret;
590 /* We are not covered by a preallocated extent? */
591 if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) !=
592 EXT_NOT_RECORDED_ALLOCATED) {
593 /* Is there any real extent? - otherwise we overwrite
594 * the fake one... */
595 if (count)
596 c = !c;
597 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
598 inode->i_sb->s_blocksize;
599 memset(&laarr[c].extLocation, 0x00,
600 sizeof(struct kernel_lb_addr));
601 count++;
602 endnum++;
604 endnum = c + 1;
605 lastblock = 1;
606 } else {
607 endnum = startnum = ((count > 2) ? 2 : count);
609 /* if the current extent is in position 0,
610 swap it with the previous */
611 if (!c && count != 1) {
612 laarr[2] = laarr[0];
613 laarr[0] = laarr[1];
614 laarr[1] = laarr[2];
615 c = 1;
618 /* if the current block is located in an extent,
619 read the next extent */
620 etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0);
621 if (etype != -1) {
622 laarr[c + 1].extLength = (etype << 30) | elen;
623 laarr[c + 1].extLocation = eloc;
624 count++;
625 startnum++;
626 endnum++;
627 } else
628 lastblock = 1;
631 /* if the current extent is not recorded but allocated, get the
632 * block in the extent corresponding to the requested block */
633 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
634 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
635 else { /* otherwise, allocate a new block */
636 if (iinfo->i_next_alloc_block == block)
637 goal = iinfo->i_next_alloc_goal;
639 if (!goal) {
640 if (!(goal = pgoal)) /* XXX: what was intended here? */
641 goal = iinfo->i_location.logicalBlockNum + 1;
644 newblocknum = udf_new_block(inode->i_sb, inode,
645 iinfo->i_location.partitionReferenceNum,
646 goal, err);
647 if (!newblocknum) {
648 brelse(prev_epos.bh);
649 *err = -ENOSPC;
650 return NULL;
652 iinfo->i_lenExtents += inode->i_sb->s_blocksize;
655 /* if the extent the requsted block is located in contains multiple
656 * blocks, split the extent into at most three extents. blocks prior
657 * to requested block, requested block, and blocks after requested
658 * block */
659 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
661 #ifdef UDF_PREALLOCATE
662 /* We preallocate blocks only for regular files. It also makes sense
663 * for directories but there's a problem when to drop the
664 * preallocation. We might use some delayed work for that but I feel
665 * it's overengineering for a filesystem like UDF. */
666 if (S_ISREG(inode->i_mode))
667 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
668 #endif
670 /* merge any continuous blocks in laarr */
671 udf_merge_extents(inode, laarr, &endnum);
673 /* write back the new extents, inserting new extents if the new number
674 * of extents is greater than the old number, and deleting extents if
675 * the new number of extents is less than the old number */
676 udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
678 brelse(prev_epos.bh);
680 newblock = udf_get_pblock(inode->i_sb, newblocknum,
681 iinfo->i_location.partitionReferenceNum, 0);
682 if (!newblock)
683 return NULL;
684 *phys = newblock;
685 *err = 0;
686 *new = 1;
687 iinfo->i_next_alloc_block = block;
688 iinfo->i_next_alloc_goal = newblocknum;
689 inode->i_ctime = current_fs_time(inode->i_sb);
691 if (IS_SYNC(inode))
692 udf_sync_inode(inode);
693 else
694 mark_inode_dirty(inode);
696 return result;
699 static void udf_split_extents(struct inode *inode, int *c, int offset,
700 int newblocknum,
701 struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
702 int *endnum)
704 unsigned long blocksize = inode->i_sb->s_blocksize;
705 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
707 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
708 (laarr[*c].extLength >> 30) ==
709 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
710 int curr = *c;
711 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
712 blocksize - 1) >> blocksize_bits;
713 int8_t etype = (laarr[curr].extLength >> 30);
715 if (blen == 1)
717 else if (!offset || blen == offset + 1) {
718 laarr[curr + 2] = laarr[curr + 1];
719 laarr[curr + 1] = laarr[curr];
720 } else {
721 laarr[curr + 3] = laarr[curr + 1];
722 laarr[curr + 2] = laarr[curr + 1] = laarr[curr];
725 if (offset) {
726 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
727 udf_free_blocks(inode->i_sb, inode,
728 &laarr[curr].extLocation,
729 0, offset);
730 laarr[curr].extLength =
731 EXT_NOT_RECORDED_NOT_ALLOCATED |
732 (offset << blocksize_bits);
733 laarr[curr].extLocation.logicalBlockNum = 0;
734 laarr[curr].extLocation.
735 partitionReferenceNum = 0;
736 } else
737 laarr[curr].extLength = (etype << 30) |
738 (offset << blocksize_bits);
739 curr++;
740 (*c)++;
741 (*endnum)++;
744 laarr[curr].extLocation.logicalBlockNum = newblocknum;
745 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
746 laarr[curr].extLocation.partitionReferenceNum =
747 UDF_I(inode)->i_location.partitionReferenceNum;
748 laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
749 blocksize;
750 curr++;
752 if (blen != offset + 1) {
753 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
754 laarr[curr].extLocation.logicalBlockNum +=
755 offset + 1;
756 laarr[curr].extLength = (etype << 30) |
757 ((blen - (offset + 1)) << blocksize_bits);
758 curr++;
759 (*endnum)++;
764 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
765 struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
766 int *endnum)
768 int start, length = 0, currlength = 0, i;
770 if (*endnum >= (c + 1)) {
771 if (!lastblock)
772 return;
773 else
774 start = c;
775 } else {
776 if ((laarr[c + 1].extLength >> 30) ==
777 (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
778 start = c + 1;
779 length = currlength =
780 (((laarr[c + 1].extLength &
781 UDF_EXTENT_LENGTH_MASK) +
782 inode->i_sb->s_blocksize - 1) >>
783 inode->i_sb->s_blocksize_bits);
784 } else
785 start = c;
788 for (i = start + 1; i <= *endnum; i++) {
789 if (i == *endnum) {
790 if (lastblock)
791 length += UDF_DEFAULT_PREALLOC_BLOCKS;
792 } else if ((laarr[i].extLength >> 30) ==
793 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
794 length += (((laarr[i].extLength &
795 UDF_EXTENT_LENGTH_MASK) +
796 inode->i_sb->s_blocksize - 1) >>
797 inode->i_sb->s_blocksize_bits);
798 } else
799 break;
802 if (length) {
803 int next = laarr[start].extLocation.logicalBlockNum +
804 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
805 inode->i_sb->s_blocksize - 1) >>
806 inode->i_sb->s_blocksize_bits);
807 int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
808 laarr[start].extLocation.partitionReferenceNum,
809 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ?
810 length : UDF_DEFAULT_PREALLOC_BLOCKS) -
811 currlength);
812 if (numalloc) {
813 if (start == (c + 1))
814 laarr[start].extLength +=
815 (numalloc <<
816 inode->i_sb->s_blocksize_bits);
817 else {
818 memmove(&laarr[c + 2], &laarr[c + 1],
819 sizeof(struct long_ad) * (*endnum - (c + 1)));
820 (*endnum)++;
821 laarr[c + 1].extLocation.logicalBlockNum = next;
822 laarr[c + 1].extLocation.partitionReferenceNum =
823 laarr[c].extLocation.
824 partitionReferenceNum;
825 laarr[c + 1].extLength =
826 EXT_NOT_RECORDED_ALLOCATED |
827 (numalloc <<
828 inode->i_sb->s_blocksize_bits);
829 start = c + 1;
832 for (i = start + 1; numalloc && i < *endnum; i++) {
833 int elen = ((laarr[i].extLength &
834 UDF_EXTENT_LENGTH_MASK) +
835 inode->i_sb->s_blocksize - 1) >>
836 inode->i_sb->s_blocksize_bits;
838 if (elen > numalloc) {
839 laarr[i].extLength -=
840 (numalloc <<
841 inode->i_sb->s_blocksize_bits);
842 numalloc = 0;
843 } else {
844 numalloc -= elen;
845 if (*endnum > (i + 1))
846 memmove(&laarr[i],
847 &laarr[i + 1],
848 sizeof(struct long_ad) *
849 (*endnum - (i + 1)));
850 i--;
851 (*endnum)--;
854 UDF_I(inode)->i_lenExtents +=
855 numalloc << inode->i_sb->s_blocksize_bits;
860 static void udf_merge_extents(struct inode *inode,
861 struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
862 int *endnum)
864 int i;
865 unsigned long blocksize = inode->i_sb->s_blocksize;
866 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
868 for (i = 0; i < (*endnum - 1); i++) {
869 struct kernel_long_ad *li /*l[i]*/ = &laarr[i];
870 struct kernel_long_ad *lip1 /*l[i plus 1]*/ = &laarr[i + 1];
872 if (((li->extLength >> 30) == (lip1->extLength >> 30)) &&
873 (((li->extLength >> 30) ==
874 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
875 ((lip1->extLocation.logicalBlockNum -
876 li->extLocation.logicalBlockNum) ==
877 (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
878 blocksize - 1) >> blocksize_bits)))) {
880 if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
881 (lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
882 blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
883 lip1->extLength = (lip1->extLength -
884 (li->extLength &
885 UDF_EXTENT_LENGTH_MASK) +
886 UDF_EXTENT_LENGTH_MASK) &
887 ~(blocksize - 1);
888 li->extLength = (li->extLength &
889 UDF_EXTENT_FLAG_MASK) +
890 (UDF_EXTENT_LENGTH_MASK + 1) -
891 blocksize;
892 lip1->extLocation.logicalBlockNum =
893 li->extLocation.logicalBlockNum +
894 ((li->extLength &
895 UDF_EXTENT_LENGTH_MASK) >>
896 blocksize_bits);
897 } else {
898 li->extLength = lip1->extLength +
899 (((li->extLength &
900 UDF_EXTENT_LENGTH_MASK) +
901 blocksize - 1) & ~(blocksize - 1));
902 if (*endnum > (i + 2))
903 memmove(&laarr[i + 1], &laarr[i + 2],
904 sizeof(struct long_ad) *
905 (*endnum - (i + 2)));
906 i--;
907 (*endnum)--;
909 } else if (((li->extLength >> 30) ==
910 (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
911 ((lip1->extLength >> 30) ==
912 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) {
913 udf_free_blocks(inode->i_sb, inode, &li->extLocation, 0,
914 ((li->extLength &
915 UDF_EXTENT_LENGTH_MASK) +
916 blocksize - 1) >> blocksize_bits);
917 li->extLocation.logicalBlockNum = 0;
918 li->extLocation.partitionReferenceNum = 0;
920 if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
921 (lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
922 blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
923 lip1->extLength = (lip1->extLength -
924 (li->extLength &
925 UDF_EXTENT_LENGTH_MASK) +
926 UDF_EXTENT_LENGTH_MASK) &
927 ~(blocksize - 1);
928 li->extLength = (li->extLength &
929 UDF_EXTENT_FLAG_MASK) +
930 (UDF_EXTENT_LENGTH_MASK + 1) -
931 blocksize;
932 } else {
933 li->extLength = lip1->extLength +
934 (((li->extLength &
935 UDF_EXTENT_LENGTH_MASK) +
936 blocksize - 1) & ~(blocksize - 1));
937 if (*endnum > (i + 2))
938 memmove(&laarr[i + 1], &laarr[i + 2],
939 sizeof(struct long_ad) *
940 (*endnum - (i + 2)));
941 i--;
942 (*endnum)--;
944 } else if ((li->extLength >> 30) ==
945 (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
946 udf_free_blocks(inode->i_sb, inode,
947 &li->extLocation, 0,
948 ((li->extLength &
949 UDF_EXTENT_LENGTH_MASK) +
950 blocksize - 1) >> blocksize_bits);
951 li->extLocation.logicalBlockNum = 0;
952 li->extLocation.partitionReferenceNum = 0;
953 li->extLength = (li->extLength &
954 UDF_EXTENT_LENGTH_MASK) |
955 EXT_NOT_RECORDED_NOT_ALLOCATED;
960 static void udf_update_extents(struct inode *inode,
961 struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
962 int startnum, int endnum,
963 struct extent_position *epos)
965 int start = 0, i;
966 struct kernel_lb_addr tmploc;
967 uint32_t tmplen;
969 if (startnum > endnum) {
970 for (i = 0; i < (startnum - endnum); i++)
971 udf_delete_aext(inode, *epos, laarr[i].extLocation,
972 laarr[i].extLength);
973 } else if (startnum < endnum) {
974 for (i = 0; i < (endnum - startnum); i++) {
975 udf_insert_aext(inode, *epos, laarr[i].extLocation,
976 laarr[i].extLength);
977 udf_next_aext(inode, epos, &laarr[i].extLocation,
978 &laarr[i].extLength, 1);
979 start++;
983 for (i = start; i < endnum; i++) {
984 udf_next_aext(inode, epos, &tmploc, &tmplen, 0);
985 udf_write_aext(inode, epos, &laarr[i].extLocation,
986 laarr[i].extLength, 1);
990 struct buffer_head *udf_bread(struct inode *inode, int block,
991 int create, int *err)
993 struct buffer_head *bh = NULL;
995 bh = udf_getblk(inode, block, create, err);
996 if (!bh)
997 return NULL;
999 if (buffer_uptodate(bh))
1000 return bh;
1002 ll_rw_block(READ, 1, &bh);
1004 wait_on_buffer(bh);
1005 if (buffer_uptodate(bh))
1006 return bh;
1008 brelse(bh);
1009 *err = -EIO;
1010 return NULL;
1013 void udf_truncate(struct inode *inode)
1015 int offset;
1016 int err;
1017 struct udf_inode_info *iinfo;
1019 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1020 S_ISLNK(inode->i_mode)))
1021 return;
1022 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1023 return;
1025 lock_kernel();
1026 iinfo = UDF_I(inode);
1027 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1028 if (inode->i_sb->s_blocksize <
1029 (udf_file_entry_alloc_offset(inode) +
1030 inode->i_size)) {
1031 udf_expand_file_adinicb(inode, inode->i_size, &err);
1032 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1033 inode->i_size = iinfo->i_lenAlloc;
1034 unlock_kernel();
1035 return;
1036 } else
1037 udf_truncate_extents(inode);
1038 } else {
1039 offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
1040 memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr + offset,
1041 0x00, inode->i_sb->s_blocksize -
1042 offset - udf_file_entry_alloc_offset(inode));
1043 iinfo->i_lenAlloc = inode->i_size;
1045 } else {
1046 block_truncate_page(inode->i_mapping, inode->i_size,
1047 udf_get_block);
1048 udf_truncate_extents(inode);
1051 inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
1052 if (IS_SYNC(inode))
1053 udf_sync_inode(inode);
1054 else
1055 mark_inode_dirty(inode);
1056 unlock_kernel();
1059 static void __udf_read_inode(struct inode *inode)
1061 struct buffer_head *bh = NULL;
1062 struct fileEntry *fe;
1063 uint16_t ident;
1064 struct udf_inode_info *iinfo = UDF_I(inode);
1067 * Set defaults, but the inode is still incomplete!
1068 * Note: get_new_inode() sets the following on a new inode:
1069 * i_sb = sb
1070 * i_no = ino
1071 * i_flags = sb->s_flags
1072 * i_state = 0
1073 * clean_inode(): zero fills and sets
1074 * i_count = 1
1075 * i_nlink = 1
1076 * i_op = NULL;
1078 bh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 0, &ident);
1079 if (!bh) {
1080 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
1081 inode->i_ino);
1082 make_bad_inode(inode);
1083 return;
1086 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
1087 ident != TAG_IDENT_USE) {
1088 printk(KERN_ERR "udf: udf_read_inode(ino %ld) "
1089 "failed ident=%d\n", inode->i_ino, ident);
1090 brelse(bh);
1091 make_bad_inode(inode);
1092 return;
1095 fe = (struct fileEntry *)bh->b_data;
1097 if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
1098 struct buffer_head *ibh;
1100 ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1,
1101 &ident);
1102 if (ident == TAG_IDENT_IE && ibh) {
1103 struct buffer_head *nbh = NULL;
1104 struct kernel_lb_addr loc;
1105 struct indirectEntry *ie;
1107 ie = (struct indirectEntry *)ibh->b_data;
1108 loc = lelb_to_cpu(ie->indirectICB.extLocation);
1110 if (ie->indirectICB.extLength &&
1111 (nbh = udf_read_ptagged(inode->i_sb, &loc, 0,
1112 &ident))) {
1113 if (ident == TAG_IDENT_FE ||
1114 ident == TAG_IDENT_EFE) {
1115 memcpy(&iinfo->i_location,
1116 &loc,
1117 sizeof(struct kernel_lb_addr));
1118 brelse(bh);
1119 brelse(ibh);
1120 brelse(nbh);
1121 __udf_read_inode(inode);
1122 return;
1124 brelse(nbh);
1127 brelse(ibh);
1128 } else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
1129 printk(KERN_ERR "udf: unsupported strategy type: %d\n",
1130 le16_to_cpu(fe->icbTag.strategyType));
1131 brelse(bh);
1132 make_bad_inode(inode);
1133 return;
1135 udf_fill_inode(inode, bh);
1137 brelse(bh);
1140 static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1142 struct fileEntry *fe;
1143 struct extendedFileEntry *efe;
1144 int offset;
1145 struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
1146 struct udf_inode_info *iinfo = UDF_I(inode);
1148 fe = (struct fileEntry *)bh->b_data;
1149 efe = (struct extendedFileEntry *)bh->b_data;
1151 if (fe->icbTag.strategyType == cpu_to_le16(4))
1152 iinfo->i_strat4096 = 0;
1153 else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
1154 iinfo->i_strat4096 = 1;
1156 iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) &
1157 ICBTAG_FLAG_AD_MASK;
1158 iinfo->i_unique = 0;
1159 iinfo->i_lenEAttr = 0;
1160 iinfo->i_lenExtents = 0;
1161 iinfo->i_lenAlloc = 0;
1162 iinfo->i_next_alloc_block = 0;
1163 iinfo->i_next_alloc_goal = 0;
1164 if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
1165 iinfo->i_efe = 1;
1166 iinfo->i_use = 0;
1167 if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
1168 sizeof(struct extendedFileEntry))) {
1169 make_bad_inode(inode);
1170 return;
1172 memcpy(iinfo->i_ext.i_data,
1173 bh->b_data + sizeof(struct extendedFileEntry),
1174 inode->i_sb->s_blocksize -
1175 sizeof(struct extendedFileEntry));
1176 } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
1177 iinfo->i_efe = 0;
1178 iinfo->i_use = 0;
1179 if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
1180 sizeof(struct fileEntry))) {
1181 make_bad_inode(inode);
1182 return;
1184 memcpy(iinfo->i_ext.i_data,
1185 bh->b_data + sizeof(struct fileEntry),
1186 inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1187 } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
1188 iinfo->i_efe = 0;
1189 iinfo->i_use = 1;
1190 iinfo->i_lenAlloc = le32_to_cpu(
1191 ((struct unallocSpaceEntry *)bh->b_data)->
1192 lengthAllocDescs);
1193 if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
1194 sizeof(struct unallocSpaceEntry))) {
1195 make_bad_inode(inode);
1196 return;
1198 memcpy(iinfo->i_ext.i_data,
1199 bh->b_data + sizeof(struct unallocSpaceEntry),
1200 inode->i_sb->s_blocksize -
1201 sizeof(struct unallocSpaceEntry));
1202 return;
1205 inode->i_uid = le32_to_cpu(fe->uid);
1206 if (inode->i_uid == -1 ||
1207 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) ||
1208 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET))
1209 inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
1211 inode->i_gid = le32_to_cpu(fe->gid);
1212 if (inode->i_gid == -1 ||
1213 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_IGNORE) ||
1214 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET))
1215 inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
1217 inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
1218 if (!inode->i_nlink)
1219 inode->i_nlink = 1;
1221 inode->i_size = le64_to_cpu(fe->informationLength);
1222 iinfo->i_lenExtents = inode->i_size;
1224 if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY &&
1225 sbi->s_fmode != UDF_INVALID_MODE)
1226 inode->i_mode = sbi->s_fmode;
1227 else if (fe->icbTag.fileType == ICBTAG_FILE_TYPE_DIRECTORY &&
1228 sbi->s_dmode != UDF_INVALID_MODE)
1229 inode->i_mode = sbi->s_dmode;
1230 else
1231 inode->i_mode = udf_convert_permissions(fe);
1232 inode->i_mode &= ~sbi->s_umask;
1234 if (iinfo->i_efe == 0) {
1235 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1236 (inode->i_sb->s_blocksize_bits - 9);
1238 if (!udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime))
1239 inode->i_atime = sbi->s_record_time;
1241 if (!udf_disk_stamp_to_time(&inode->i_mtime,
1242 fe->modificationTime))
1243 inode->i_mtime = sbi->s_record_time;
1245 if (!udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime))
1246 inode->i_ctime = sbi->s_record_time;
1248 iinfo->i_unique = le64_to_cpu(fe->uniqueID);
1249 iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
1250 iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs);
1251 offset = sizeof(struct fileEntry) + iinfo->i_lenEAttr;
1252 } else {
1253 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1254 (inode->i_sb->s_blocksize_bits - 9);
1256 if (!udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime))
1257 inode->i_atime = sbi->s_record_time;
1259 if (!udf_disk_stamp_to_time(&inode->i_mtime,
1260 efe->modificationTime))
1261 inode->i_mtime = sbi->s_record_time;
1263 if (!udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime))
1264 iinfo->i_crtime = sbi->s_record_time;
1266 if (!udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime))
1267 inode->i_ctime = sbi->s_record_time;
1269 iinfo->i_unique = le64_to_cpu(efe->uniqueID);
1270 iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
1271 iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs);
1272 offset = sizeof(struct extendedFileEntry) +
1273 iinfo->i_lenEAttr;
1276 switch (fe->icbTag.fileType) {
1277 case ICBTAG_FILE_TYPE_DIRECTORY:
1278 inode->i_op = &udf_dir_inode_operations;
1279 inode->i_fop = &udf_dir_operations;
1280 inode->i_mode |= S_IFDIR;
1281 inc_nlink(inode);
1282 break;
1283 case ICBTAG_FILE_TYPE_REALTIME:
1284 case ICBTAG_FILE_TYPE_REGULAR:
1285 case ICBTAG_FILE_TYPE_UNDEF:
1286 case ICBTAG_FILE_TYPE_VAT20:
1287 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
1288 inode->i_data.a_ops = &udf_adinicb_aops;
1289 else
1290 inode->i_data.a_ops = &udf_aops;
1291 inode->i_op = &udf_file_inode_operations;
1292 inode->i_fop = &udf_file_operations;
1293 inode->i_mode |= S_IFREG;
1294 break;
1295 case ICBTAG_FILE_TYPE_BLOCK:
1296 inode->i_mode |= S_IFBLK;
1297 break;
1298 case ICBTAG_FILE_TYPE_CHAR:
1299 inode->i_mode |= S_IFCHR;
1300 break;
1301 case ICBTAG_FILE_TYPE_FIFO:
1302 init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1303 break;
1304 case ICBTAG_FILE_TYPE_SOCKET:
1305 init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1306 break;
1307 case ICBTAG_FILE_TYPE_SYMLINK:
1308 inode->i_data.a_ops = &udf_symlink_aops;
1309 inode->i_op = &udf_symlink_inode_operations;
1310 inode->i_mode = S_IFLNK | S_IRWXUGO;
1311 break;
1312 case ICBTAG_FILE_TYPE_MAIN:
1313 udf_debug("METADATA FILE-----\n");
1314 break;
1315 case ICBTAG_FILE_TYPE_MIRROR:
1316 udf_debug("METADATA MIRROR FILE-----\n");
1317 break;
1318 case ICBTAG_FILE_TYPE_BITMAP:
1319 udf_debug("METADATA BITMAP FILE-----\n");
1320 break;
1321 default:
1322 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown "
1323 "file type=%d\n", inode->i_ino,
1324 fe->icbTag.fileType);
1325 make_bad_inode(inode);
1326 return;
1328 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1329 struct deviceSpec *dsea =
1330 (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
1331 if (dsea) {
1332 init_special_inode(inode, inode->i_mode,
1333 MKDEV(le32_to_cpu(dsea->majorDeviceIdent),
1334 le32_to_cpu(dsea->minorDeviceIdent)));
1335 /* Developer ID ??? */
1336 } else
1337 make_bad_inode(inode);
1341 static int udf_alloc_i_data(struct inode *inode, size_t size)
1343 struct udf_inode_info *iinfo = UDF_I(inode);
1344 iinfo->i_ext.i_data = kmalloc(size, GFP_KERNEL);
1346 if (!iinfo->i_ext.i_data) {
1347 printk(KERN_ERR "udf:udf_alloc_i_data (ino %ld) "
1348 "no free memory\n", inode->i_ino);
1349 return -ENOMEM;
1352 return 0;
1355 static mode_t udf_convert_permissions(struct fileEntry *fe)
1357 mode_t mode;
1358 uint32_t permissions;
1359 uint32_t flags;
1361 permissions = le32_to_cpu(fe->permissions);
1362 flags = le16_to_cpu(fe->icbTag.flags);
1364 mode = ((permissions) & S_IRWXO) |
1365 ((permissions >> 2) & S_IRWXG) |
1366 ((permissions >> 4) & S_IRWXU) |
1367 ((flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1368 ((flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1369 ((flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1371 return mode;
1374 int udf_write_inode(struct inode *inode, struct writeback_control *wbc)
1376 int ret;
1378 lock_kernel();
1379 ret = udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1380 unlock_kernel();
1382 return ret;
1385 int udf_sync_inode(struct inode *inode)
1387 return udf_update_inode(inode, 1);
1390 static int udf_update_inode(struct inode *inode, int do_sync)
1392 struct buffer_head *bh = NULL;
1393 struct fileEntry *fe;
1394 struct extendedFileEntry *efe;
1395 uint32_t udfperms;
1396 uint16_t icbflags;
1397 uint16_t crclen;
1398 int err = 0;
1399 struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
1400 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
1401 struct udf_inode_info *iinfo = UDF_I(inode);
1403 bh = udf_tgetblk(inode->i_sb,
1404 udf_get_lb_pblock(inode->i_sb, &iinfo->i_location, 0));
1405 if (!bh) {
1406 udf_debug("getblk failure\n");
1407 return -ENOMEM;
1410 lock_buffer(bh);
1411 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1412 fe = (struct fileEntry *)bh->b_data;
1413 efe = (struct extendedFileEntry *)bh->b_data;
1415 if (iinfo->i_use) {
1416 struct unallocSpaceEntry *use =
1417 (struct unallocSpaceEntry *)bh->b_data;
1419 use->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1420 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry),
1421 iinfo->i_ext.i_data, inode->i_sb->s_blocksize -
1422 sizeof(struct unallocSpaceEntry));
1423 use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE);
1424 use->descTag.tagLocation =
1425 cpu_to_le32(iinfo->i_location.logicalBlockNum);
1426 crclen = sizeof(struct unallocSpaceEntry) +
1427 iinfo->i_lenAlloc - sizeof(struct tag);
1428 use->descTag.descCRCLength = cpu_to_le16(crclen);
1429 use->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)use +
1430 sizeof(struct tag),
1431 crclen));
1432 use->descTag.tagChecksum = udf_tag_checksum(&use->descTag);
1434 goto out;
1437 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
1438 fe->uid = cpu_to_le32(-1);
1439 else
1440 fe->uid = cpu_to_le32(inode->i_uid);
1442 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
1443 fe->gid = cpu_to_le32(-1);
1444 else
1445 fe->gid = cpu_to_le32(inode->i_gid);
1447 udfperms = ((inode->i_mode & S_IRWXO)) |
1448 ((inode->i_mode & S_IRWXG) << 2) |
1449 ((inode->i_mode & S_IRWXU) << 4);
1451 udfperms |= (le32_to_cpu(fe->permissions) &
1452 (FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1453 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1454 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1455 fe->permissions = cpu_to_le32(udfperms);
1457 if (S_ISDIR(inode->i_mode))
1458 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1459 else
1460 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1462 fe->informationLength = cpu_to_le64(inode->i_size);
1464 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1465 struct regid *eid;
1466 struct deviceSpec *dsea =
1467 (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
1468 if (!dsea) {
1469 dsea = (struct deviceSpec *)
1470 udf_add_extendedattr(inode,
1471 sizeof(struct deviceSpec) +
1472 sizeof(struct regid), 12, 0x3);
1473 dsea->attrType = cpu_to_le32(12);
1474 dsea->attrSubtype = 1;
1475 dsea->attrLength = cpu_to_le32(
1476 sizeof(struct deviceSpec) +
1477 sizeof(struct regid));
1478 dsea->impUseLength = cpu_to_le32(sizeof(struct regid));
1480 eid = (struct regid *)dsea->impUse;
1481 memset(eid, 0, sizeof(struct regid));
1482 strcpy(eid->ident, UDF_ID_DEVELOPER);
1483 eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1484 eid->identSuffix[1] = UDF_OS_ID_LINUX;
1485 dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1486 dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1489 if (iinfo->i_efe == 0) {
1490 memcpy(bh->b_data + sizeof(struct fileEntry),
1491 iinfo->i_ext.i_data,
1492 inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1493 fe->logicalBlocksRecorded = cpu_to_le64(
1494 (inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >>
1495 (blocksize_bits - 9));
1497 udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime);
1498 udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime);
1499 udf_time_to_disk_stamp(&fe->attrTime, inode->i_ctime);
1500 memset(&(fe->impIdent), 0, sizeof(struct regid));
1501 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1502 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1503 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1504 fe->uniqueID = cpu_to_le64(iinfo->i_unique);
1505 fe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
1506 fe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1507 fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1508 crclen = sizeof(struct fileEntry);
1509 } else {
1510 memcpy(bh->b_data + sizeof(struct extendedFileEntry),
1511 iinfo->i_ext.i_data,
1512 inode->i_sb->s_blocksize -
1513 sizeof(struct extendedFileEntry));
1514 efe->objectSize = cpu_to_le64(inode->i_size);
1515 efe->logicalBlocksRecorded = cpu_to_le64(
1516 (inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >>
1517 (blocksize_bits - 9));
1519 if (iinfo->i_crtime.tv_sec > inode->i_atime.tv_sec ||
1520 (iinfo->i_crtime.tv_sec == inode->i_atime.tv_sec &&
1521 iinfo->i_crtime.tv_nsec > inode->i_atime.tv_nsec))
1522 iinfo->i_crtime = inode->i_atime;
1524 if (iinfo->i_crtime.tv_sec > inode->i_mtime.tv_sec ||
1525 (iinfo->i_crtime.tv_sec == inode->i_mtime.tv_sec &&
1526 iinfo->i_crtime.tv_nsec > inode->i_mtime.tv_nsec))
1527 iinfo->i_crtime = inode->i_mtime;
1529 if (iinfo->i_crtime.tv_sec > inode->i_ctime.tv_sec ||
1530 (iinfo->i_crtime.tv_sec == inode->i_ctime.tv_sec &&
1531 iinfo->i_crtime.tv_nsec > inode->i_ctime.tv_nsec))
1532 iinfo->i_crtime = inode->i_ctime;
1534 udf_time_to_disk_stamp(&efe->accessTime, inode->i_atime);
1535 udf_time_to_disk_stamp(&efe->modificationTime, inode->i_mtime);
1536 udf_time_to_disk_stamp(&efe->createTime, iinfo->i_crtime);
1537 udf_time_to_disk_stamp(&efe->attrTime, inode->i_ctime);
1539 memset(&(efe->impIdent), 0, sizeof(struct regid));
1540 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1541 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1542 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1543 efe->uniqueID = cpu_to_le64(iinfo->i_unique);
1544 efe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
1545 efe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1546 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1547 crclen = sizeof(struct extendedFileEntry);
1549 if (iinfo->i_strat4096) {
1550 fe->icbTag.strategyType = cpu_to_le16(4096);
1551 fe->icbTag.strategyParameter = cpu_to_le16(1);
1552 fe->icbTag.numEntries = cpu_to_le16(2);
1553 } else {
1554 fe->icbTag.strategyType = cpu_to_le16(4);
1555 fe->icbTag.numEntries = cpu_to_le16(1);
1558 if (S_ISDIR(inode->i_mode))
1559 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1560 else if (S_ISREG(inode->i_mode))
1561 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1562 else if (S_ISLNK(inode->i_mode))
1563 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1564 else if (S_ISBLK(inode->i_mode))
1565 fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1566 else if (S_ISCHR(inode->i_mode))
1567 fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1568 else if (S_ISFIFO(inode->i_mode))
1569 fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1570 else if (S_ISSOCK(inode->i_mode))
1571 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1573 icbflags = iinfo->i_alloc_type |
1574 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1575 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1576 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1577 (le16_to_cpu(fe->icbTag.flags) &
1578 ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1579 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1581 fe->icbTag.flags = cpu_to_le16(icbflags);
1582 if (sbi->s_udfrev >= 0x0200)
1583 fe->descTag.descVersion = cpu_to_le16(3);
1584 else
1585 fe->descTag.descVersion = cpu_to_le16(2);
1586 fe->descTag.tagSerialNum = cpu_to_le16(sbi->s_serial_number);
1587 fe->descTag.tagLocation = cpu_to_le32(
1588 iinfo->i_location.logicalBlockNum);
1589 crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc - sizeof(struct tag);
1590 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1591 fe->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)fe + sizeof(struct tag),
1592 crclen));
1593 fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag);
1595 out:
1596 set_buffer_uptodate(bh);
1597 unlock_buffer(bh);
1599 /* write the data blocks */
1600 mark_buffer_dirty(bh);
1601 if (do_sync) {
1602 sync_dirty_buffer(bh);
1603 if (buffer_write_io_error(bh)) {
1604 printk(KERN_WARNING "IO error syncing udf inode "
1605 "[%s:%08lx]\n", inode->i_sb->s_id,
1606 inode->i_ino);
1607 err = -EIO;
1610 brelse(bh);
1612 return err;
1615 struct inode *udf_iget(struct super_block *sb, struct kernel_lb_addr *ino)
1617 unsigned long block = udf_get_lb_pblock(sb, ino, 0);
1618 struct inode *inode = iget_locked(sb, block);
1620 if (!inode)
1621 return NULL;
1623 if (inode->i_state & I_NEW) {
1624 memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr));
1625 __udf_read_inode(inode);
1626 unlock_new_inode(inode);
1629 if (is_bad_inode(inode))
1630 goto out_iput;
1632 if (ino->logicalBlockNum >= UDF_SB(sb)->
1633 s_partmaps[ino->partitionReferenceNum].s_partition_len) {
1634 udf_debug("block=%d, partition=%d out of range\n",
1635 ino->logicalBlockNum, ino->partitionReferenceNum);
1636 make_bad_inode(inode);
1637 goto out_iput;
1640 return inode;
1642 out_iput:
1643 iput(inode);
1644 return NULL;
1647 int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
1648 struct kernel_lb_addr *eloc, uint32_t elen, int inc)
1650 int adsize;
1651 struct short_ad *sad = NULL;
1652 struct long_ad *lad = NULL;
1653 struct allocExtDesc *aed;
1654 int8_t etype;
1655 uint8_t *ptr;
1656 struct udf_inode_info *iinfo = UDF_I(inode);
1658 if (!epos->bh)
1659 ptr = iinfo->i_ext.i_data + epos->offset -
1660 udf_file_entry_alloc_offset(inode) +
1661 iinfo->i_lenEAttr;
1662 else
1663 ptr = epos->bh->b_data + epos->offset;
1665 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
1666 adsize = sizeof(struct short_ad);
1667 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
1668 adsize = sizeof(struct long_ad);
1669 else
1670 return -1;
1672 if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize) {
1673 unsigned char *sptr, *dptr;
1674 struct buffer_head *nbh;
1675 int err, loffset;
1676 struct kernel_lb_addr obloc = epos->block;
1678 epos->block.logicalBlockNum = udf_new_block(inode->i_sb, NULL,
1679 obloc.partitionReferenceNum,
1680 obloc.logicalBlockNum, &err);
1681 if (!epos->block.logicalBlockNum)
1682 return -1;
1683 nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1684 &epos->block,
1685 0));
1686 if (!nbh)
1687 return -1;
1688 lock_buffer(nbh);
1689 memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
1690 set_buffer_uptodate(nbh);
1691 unlock_buffer(nbh);
1692 mark_buffer_dirty_inode(nbh, inode);
1694 aed = (struct allocExtDesc *)(nbh->b_data);
1695 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
1696 aed->previousAllocExtLocation =
1697 cpu_to_le32(obloc.logicalBlockNum);
1698 if (epos->offset + adsize > inode->i_sb->s_blocksize) {
1699 loffset = epos->offset;
1700 aed->lengthAllocDescs = cpu_to_le32(adsize);
1701 sptr = ptr - adsize;
1702 dptr = nbh->b_data + sizeof(struct allocExtDesc);
1703 memcpy(dptr, sptr, adsize);
1704 epos->offset = sizeof(struct allocExtDesc) + adsize;
1705 } else {
1706 loffset = epos->offset + adsize;
1707 aed->lengthAllocDescs = cpu_to_le32(0);
1708 sptr = ptr;
1709 epos->offset = sizeof(struct allocExtDesc);
1711 if (epos->bh) {
1712 aed = (struct allocExtDesc *)epos->bh->b_data;
1713 le32_add_cpu(&aed->lengthAllocDescs, adsize);
1714 } else {
1715 iinfo->i_lenAlloc += adsize;
1716 mark_inode_dirty(inode);
1719 if (UDF_SB(inode->i_sb)->s_udfrev >= 0x0200)
1720 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
1721 epos->block.logicalBlockNum, sizeof(struct tag));
1722 else
1723 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
1724 epos->block.logicalBlockNum, sizeof(struct tag));
1725 switch (iinfo->i_alloc_type) {
1726 case ICBTAG_FLAG_AD_SHORT:
1727 sad = (struct short_ad *)sptr;
1728 sad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS |
1729 inode->i_sb->s_blocksize);
1730 sad->extPosition =
1731 cpu_to_le32(epos->block.logicalBlockNum);
1732 break;
1733 case ICBTAG_FLAG_AD_LONG:
1734 lad = (struct long_ad *)sptr;
1735 lad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS |
1736 inode->i_sb->s_blocksize);
1737 lad->extLocation = cpu_to_lelb(epos->block);
1738 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1739 break;
1741 if (epos->bh) {
1742 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1743 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
1744 udf_update_tag(epos->bh->b_data, loffset);
1745 else
1746 udf_update_tag(epos->bh->b_data,
1747 sizeof(struct allocExtDesc));
1748 mark_buffer_dirty_inode(epos->bh, inode);
1749 brelse(epos->bh);
1750 } else {
1751 mark_inode_dirty(inode);
1753 epos->bh = nbh;
1756 etype = udf_write_aext(inode, epos, eloc, elen, inc);
1758 if (!epos->bh) {
1759 iinfo->i_lenAlloc += adsize;
1760 mark_inode_dirty(inode);
1761 } else {
1762 aed = (struct allocExtDesc *)epos->bh->b_data;
1763 le32_add_cpu(&aed->lengthAllocDescs, adsize);
1764 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1765 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
1766 udf_update_tag(epos->bh->b_data,
1767 epos->offset + (inc ? 0 : adsize));
1768 else
1769 udf_update_tag(epos->bh->b_data,
1770 sizeof(struct allocExtDesc));
1771 mark_buffer_dirty_inode(epos->bh, inode);
1774 return etype;
1777 int8_t udf_write_aext(struct inode *inode, struct extent_position *epos,
1778 struct kernel_lb_addr *eloc, uint32_t elen, int inc)
1780 int adsize;
1781 uint8_t *ptr;
1782 struct short_ad *sad;
1783 struct long_ad *lad;
1784 struct udf_inode_info *iinfo = UDF_I(inode);
1786 if (!epos->bh)
1787 ptr = iinfo->i_ext.i_data + epos->offset -
1788 udf_file_entry_alloc_offset(inode) +
1789 iinfo->i_lenEAttr;
1790 else
1791 ptr = epos->bh->b_data + epos->offset;
1793 switch (iinfo->i_alloc_type) {
1794 case ICBTAG_FLAG_AD_SHORT:
1795 sad = (struct short_ad *)ptr;
1796 sad->extLength = cpu_to_le32(elen);
1797 sad->extPosition = cpu_to_le32(eloc->logicalBlockNum);
1798 adsize = sizeof(struct short_ad);
1799 break;
1800 case ICBTAG_FLAG_AD_LONG:
1801 lad = (struct long_ad *)ptr;
1802 lad->extLength = cpu_to_le32(elen);
1803 lad->extLocation = cpu_to_lelb(*eloc);
1804 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1805 adsize = sizeof(struct long_ad);
1806 break;
1807 default:
1808 return -1;
1811 if (epos->bh) {
1812 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1813 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) {
1814 struct allocExtDesc *aed =
1815 (struct allocExtDesc *)epos->bh->b_data;
1816 udf_update_tag(epos->bh->b_data,
1817 le32_to_cpu(aed->lengthAllocDescs) +
1818 sizeof(struct allocExtDesc));
1820 mark_buffer_dirty_inode(epos->bh, inode);
1821 } else {
1822 mark_inode_dirty(inode);
1825 if (inc)
1826 epos->offset += adsize;
1828 return (elen >> 30);
1831 int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
1832 struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
1834 int8_t etype;
1836 while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
1837 (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
1838 int block;
1839 epos->block = *eloc;
1840 epos->offset = sizeof(struct allocExtDesc);
1841 brelse(epos->bh);
1842 block = udf_get_lb_pblock(inode->i_sb, &epos->block, 0);
1843 epos->bh = udf_tread(inode->i_sb, block);
1844 if (!epos->bh) {
1845 udf_debug("reading block %d failed!\n", block);
1846 return -1;
1850 return etype;
1853 int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
1854 struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
1856 int alen;
1857 int8_t etype;
1858 uint8_t *ptr;
1859 struct short_ad *sad;
1860 struct long_ad *lad;
1861 struct udf_inode_info *iinfo = UDF_I(inode);
1863 if (!epos->bh) {
1864 if (!epos->offset)
1865 epos->offset = udf_file_entry_alloc_offset(inode);
1866 ptr = iinfo->i_ext.i_data + epos->offset -
1867 udf_file_entry_alloc_offset(inode) +
1868 iinfo->i_lenEAttr;
1869 alen = udf_file_entry_alloc_offset(inode) +
1870 iinfo->i_lenAlloc;
1871 } else {
1872 if (!epos->offset)
1873 epos->offset = sizeof(struct allocExtDesc);
1874 ptr = epos->bh->b_data + epos->offset;
1875 alen = sizeof(struct allocExtDesc) +
1876 le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->
1877 lengthAllocDescs);
1880 switch (iinfo->i_alloc_type) {
1881 case ICBTAG_FLAG_AD_SHORT:
1882 sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc);
1883 if (!sad)
1884 return -1;
1885 etype = le32_to_cpu(sad->extLength) >> 30;
1886 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1887 eloc->partitionReferenceNum =
1888 iinfo->i_location.partitionReferenceNum;
1889 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1890 break;
1891 case ICBTAG_FLAG_AD_LONG:
1892 lad = udf_get_filelongad(ptr, alen, &epos->offset, inc);
1893 if (!lad)
1894 return -1;
1895 etype = le32_to_cpu(lad->extLength) >> 30;
1896 *eloc = lelb_to_cpu(lad->extLocation);
1897 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1898 break;
1899 default:
1900 udf_debug("alloc_type = %d unsupported\n",
1901 iinfo->i_alloc_type);
1902 return -1;
1905 return etype;
1908 static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos,
1909 struct kernel_lb_addr neloc, uint32_t nelen)
1911 struct kernel_lb_addr oeloc;
1912 uint32_t oelen;
1913 int8_t etype;
1915 if (epos.bh)
1916 get_bh(epos.bh);
1918 while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1) {
1919 udf_write_aext(inode, &epos, &neloc, nelen, 1);
1920 neloc = oeloc;
1921 nelen = (etype << 30) | oelen;
1923 udf_add_aext(inode, &epos, &neloc, nelen, 1);
1924 brelse(epos.bh);
1926 return (nelen >> 30);
1929 int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
1930 struct kernel_lb_addr eloc, uint32_t elen)
1932 struct extent_position oepos;
1933 int adsize;
1934 int8_t etype;
1935 struct allocExtDesc *aed;
1936 struct udf_inode_info *iinfo;
1938 if (epos.bh) {
1939 get_bh(epos.bh);
1940 get_bh(epos.bh);
1943 iinfo = UDF_I(inode);
1944 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
1945 adsize = sizeof(struct short_ad);
1946 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
1947 adsize = sizeof(struct long_ad);
1948 else
1949 adsize = 0;
1951 oepos = epos;
1952 if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1)
1953 return -1;
1955 while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
1956 udf_write_aext(inode, &oepos, &eloc, (etype << 30) | elen, 1);
1957 if (oepos.bh != epos.bh) {
1958 oepos.block = epos.block;
1959 brelse(oepos.bh);
1960 get_bh(epos.bh);
1961 oepos.bh = epos.bh;
1962 oepos.offset = epos.offset - adsize;
1965 memset(&eloc, 0x00, sizeof(struct kernel_lb_addr));
1966 elen = 0;
1968 if (epos.bh != oepos.bh) {
1969 udf_free_blocks(inode->i_sb, inode, &epos.block, 0, 1);
1970 udf_write_aext(inode, &oepos, &eloc, elen, 1);
1971 udf_write_aext(inode, &oepos, &eloc, elen, 1);
1972 if (!oepos.bh) {
1973 iinfo->i_lenAlloc -= (adsize * 2);
1974 mark_inode_dirty(inode);
1975 } else {
1976 aed = (struct allocExtDesc *)oepos.bh->b_data;
1977 le32_add_cpu(&aed->lengthAllocDescs, -(2 * adsize));
1978 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1979 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
1980 udf_update_tag(oepos.bh->b_data,
1981 oepos.offset - (2 * adsize));
1982 else
1983 udf_update_tag(oepos.bh->b_data,
1984 sizeof(struct allocExtDesc));
1985 mark_buffer_dirty_inode(oepos.bh, inode);
1987 } else {
1988 udf_write_aext(inode, &oepos, &eloc, elen, 1);
1989 if (!oepos.bh) {
1990 iinfo->i_lenAlloc -= adsize;
1991 mark_inode_dirty(inode);
1992 } else {
1993 aed = (struct allocExtDesc *)oepos.bh->b_data;
1994 le32_add_cpu(&aed->lengthAllocDescs, -adsize);
1995 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
1996 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
1997 udf_update_tag(oepos.bh->b_data,
1998 epos.offset - adsize);
1999 else
2000 udf_update_tag(oepos.bh->b_data,
2001 sizeof(struct allocExtDesc));
2002 mark_buffer_dirty_inode(oepos.bh, inode);
2006 brelse(epos.bh);
2007 brelse(oepos.bh);
2009 return (elen >> 30);
2012 int8_t inode_bmap(struct inode *inode, sector_t block,
2013 struct extent_position *pos, struct kernel_lb_addr *eloc,
2014 uint32_t *elen, sector_t *offset)
2016 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
2017 loff_t lbcount = 0, bcount =
2018 (loff_t) block << blocksize_bits;
2019 int8_t etype;
2020 struct udf_inode_info *iinfo;
2022 iinfo = UDF_I(inode);
2023 pos->offset = 0;
2024 pos->block = iinfo->i_location;
2025 pos->bh = NULL;
2026 *elen = 0;
2028 do {
2029 etype = udf_next_aext(inode, pos, eloc, elen, 1);
2030 if (etype == -1) {
2031 *offset = (bcount - lbcount) >> blocksize_bits;
2032 iinfo->i_lenExtents = lbcount;
2033 return -1;
2035 lbcount += *elen;
2036 } while (lbcount <= bcount);
2038 *offset = (bcount + *elen - lbcount) >> blocksize_bits;
2040 return etype;
2043 long udf_block_map(struct inode *inode, sector_t block)
2045 struct kernel_lb_addr eloc;
2046 uint32_t elen;
2047 sector_t offset;
2048 struct extent_position epos = {};
2049 int ret;
2051 lock_kernel();
2053 if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) ==
2054 (EXT_RECORDED_ALLOCATED >> 30))
2055 ret = udf_get_lb_pblock(inode->i_sb, &eloc, offset);
2056 else
2057 ret = 0;
2059 unlock_kernel();
2060 brelse(epos.bh);
2062 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2063 return udf_fixed_to_variable(ret);
2064 else
2065 return ret;