Linux-2.6.12-rc2
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / udf / inode.c
blob0506e117378450ed524253b5c933bbed3dd27696
1 /*
2 * inode.c
4 * PURPOSE
5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
7 * CONTACTS
8 * E-mail regarding any portion of the Linux UDF file system should be
9 * directed to the development team mailing list (run by majordomo):
10 * linux_udf@hpesjro.fc.hp.com
12 * COPYRIGHT
13 * This file is distributed under the terms of the GNU General Public
14 * License (GPL). Copies of the GPL can be obtained from:
15 * ftp://prep.ai.mit.edu/pub/gnu/GPL
16 * Each contributing author retains all rights to their own work.
18 * (C) 1998 Dave Boynton
19 * (C) 1998-2004 Ben Fennema
20 * (C) 1999-2000 Stelias Computing Inc
22 * HISTORY
24 * 10/04/98 dgb Added rudimentary directory functions
25 * 10/07/98 Fully working udf_block_map! It works!
26 * 11/25/98 bmap altered to better support extents
27 * 12/06/98 blf partition support in udf_iget, udf_block_map and udf_read_inode
28 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
29 * block boundaries (which is not actually allowed)
30 * 12/20/98 added support for strategy 4096
31 * 03/07/99 rewrote udf_block_map (again)
32 * New funcs, inode_bmap, udf_next_aext
33 * 04/19/99 Support for writing device EA's for major/minor #
36 #include "udfdecl.h"
37 #include <linux/mm.h>
38 #include <linux/smp_lock.h>
39 #include <linux/module.h>
40 #include <linux/pagemap.h>
41 #include <linux/buffer_head.h>
42 #include <linux/writeback.h>
43 #include <linux/slab.h>
45 #include "udf_i.h"
46 #include "udf_sb.h"
48 MODULE_AUTHOR("Ben Fennema");
49 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
50 MODULE_LICENSE("GPL");
52 #define EXTENT_MERGE_SIZE 5
54 static mode_t udf_convert_permissions(struct fileEntry *);
55 static int udf_update_inode(struct inode *, int);
56 static void udf_fill_inode(struct inode *, struct buffer_head *);
57 static struct buffer_head *inode_getblk(struct inode *, long, int *,
58 long *, int *);
59 static int8_t udf_insert_aext(struct inode *, kernel_lb_addr, int,
60 kernel_lb_addr, uint32_t, struct buffer_head *);
61 static void udf_split_extents(struct inode *, int *, int, int,
62 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
63 static void udf_prealloc_extents(struct inode *, int, int,
64 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
65 static void udf_merge_extents(struct inode *,
66 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
67 static void udf_update_extents(struct inode *,
68 kernel_long_ad [EXTENT_MERGE_SIZE], int, int,
69 kernel_lb_addr, uint32_t, struct buffer_head **);
70 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
73 * udf_delete_inode
75 * PURPOSE
76 * Clean-up before the specified inode is destroyed.
78 * DESCRIPTION
79 * This routine is called when the kernel destroys an inode structure
80 * ie. when iput() finds i_count == 0.
82 * HISTORY
83 * July 1, 1997 - Andrew E. Mileski
84 * Written, tested, and released.
86 * Called at the last iput() if i_nlink is zero.
88 void udf_delete_inode(struct inode * inode)
90 if (is_bad_inode(inode))
91 goto no_delete;
93 inode->i_size = 0;
94 udf_truncate(inode);
95 lock_kernel();
97 udf_update_inode(inode, IS_SYNC(inode));
98 udf_free_inode(inode);
100 unlock_kernel();
101 return;
102 no_delete:
103 clear_inode(inode);
106 void udf_clear_inode(struct inode *inode)
108 if (!(inode->i_sb->s_flags & MS_RDONLY)) {
109 lock_kernel();
110 udf_discard_prealloc(inode);
111 unlock_kernel();
114 kfree(UDF_I_DATA(inode));
115 UDF_I_DATA(inode) = NULL;
118 static int udf_writepage(struct page *page, struct writeback_control *wbc)
120 return block_write_full_page(page, udf_get_block, wbc);
123 static int udf_readpage(struct file *file, struct page *page)
125 return block_read_full_page(page, udf_get_block);
128 static int udf_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
130 return block_prepare_write(page, from, to, udf_get_block);
133 static sector_t udf_bmap(struct address_space *mapping, sector_t block)
135 return generic_block_bmap(mapping,block,udf_get_block);
138 struct address_space_operations udf_aops = {
139 .readpage = udf_readpage,
140 .writepage = udf_writepage,
141 .sync_page = block_sync_page,
142 .prepare_write = udf_prepare_write,
143 .commit_write = generic_commit_write,
144 .bmap = udf_bmap,
147 void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
149 struct page *page;
150 char *kaddr;
151 struct writeback_control udf_wbc = {
152 .sync_mode = WB_SYNC_NONE,
153 .nr_to_write = 1,
156 /* from now on we have normal address_space methods */
157 inode->i_data.a_ops = &udf_aops;
159 if (!UDF_I_LENALLOC(inode))
161 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
162 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
163 else
164 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
165 mark_inode_dirty(inode);
166 return;
169 page = grab_cache_page(inode->i_mapping, 0);
170 if (!PageLocked(page))
171 PAGE_BUG(page);
172 if (!PageUptodate(page))
174 kaddr = kmap(page);
175 memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
176 PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
177 memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode),
178 UDF_I_LENALLOC(inode));
179 flush_dcache_page(page);
180 SetPageUptodate(page);
181 kunmap(page);
183 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00,
184 UDF_I_LENALLOC(inode));
185 UDF_I_LENALLOC(inode) = 0;
186 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
187 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
188 else
189 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
191 inode->i_data.a_ops->writepage(page, &udf_wbc);
192 page_cache_release(page);
194 mark_inode_dirty(inode);
197 struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err)
199 int newblock;
200 struct buffer_head *sbh = NULL, *dbh = NULL;
201 kernel_lb_addr bloc, eloc;
202 uint32_t elen, extoffset;
203 uint8_t alloctype;
205 struct udf_fileident_bh sfibh, dfibh;
206 loff_t f_pos = udf_ext0_offset(inode) >> 2;
207 int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
208 struct fileIdentDesc cfi, *sfi, *dfi;
210 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
211 alloctype = ICBTAG_FLAG_AD_SHORT;
212 else
213 alloctype = ICBTAG_FLAG_AD_LONG;
215 if (!inode->i_size)
217 UDF_I_ALLOCTYPE(inode) = alloctype;
218 mark_inode_dirty(inode);
219 return NULL;
222 /* alloc block, and copy data to it */
223 *block = udf_new_block(inode->i_sb, inode,
224 UDF_I_LOCATION(inode).partitionReferenceNum,
225 UDF_I_LOCATION(inode).logicalBlockNum, err);
227 if (!(*block))
228 return NULL;
229 newblock = udf_get_pblock(inode->i_sb, *block,
230 UDF_I_LOCATION(inode).partitionReferenceNum, 0);
231 if (!newblock)
232 return NULL;
233 dbh = udf_tgetblk(inode->i_sb, newblock);
234 if (!dbh)
235 return NULL;
236 lock_buffer(dbh);
237 memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
238 set_buffer_uptodate(dbh);
239 unlock_buffer(dbh);
240 mark_buffer_dirty_inode(dbh, inode);
242 sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
243 sbh = sfibh.sbh = sfibh.ebh = NULL;
244 dfibh.soffset = dfibh.eoffset = 0;
245 dfibh.sbh = dfibh.ebh = dbh;
246 while ( (f_pos < size) )
248 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
249 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL, NULL, NULL);
250 if (!sfi)
252 udf_release_data(dbh);
253 return NULL;
255 UDF_I_ALLOCTYPE(inode) = alloctype;
256 sfi->descTag.tagLocation = cpu_to_le32(*block);
257 dfibh.soffset = dfibh.eoffset;
258 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
259 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
260 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
261 sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse)))
263 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
264 udf_release_data(dbh);
265 return NULL;
268 mark_buffer_dirty_inode(dbh, inode);
270 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode));
271 UDF_I_LENALLOC(inode) = 0;
272 bloc = UDF_I_LOCATION(inode);
273 eloc.logicalBlockNum = *block;
274 eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
275 elen = inode->i_size;
276 UDF_I_LENEXTENTS(inode) = elen;
277 extoffset = udf_file_entry_alloc_offset(inode);
278 udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &sbh, 0);
279 /* UniqueID stuff */
281 udf_release_data(sbh);
282 mark_inode_dirty(inode);
283 return dbh;
286 static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
288 int err, new;
289 struct buffer_head *bh;
290 unsigned long phys;
292 if (!create)
294 phys = udf_block_map(inode, block);
295 if (phys)
296 map_bh(bh_result, inode->i_sb, phys);
297 return 0;
300 err = -EIO;
301 new = 0;
302 bh = NULL;
304 lock_kernel();
306 if (block < 0)
307 goto abort_negative;
309 if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1)
311 UDF_I_NEXT_ALLOC_BLOCK(inode) ++;
312 UDF_I_NEXT_ALLOC_GOAL(inode) ++;
315 err = 0;
317 bh = inode_getblk(inode, block, &err, &phys, &new);
318 if (bh)
319 BUG();
320 if (err)
321 goto abort;
322 if (!phys)
323 BUG();
325 if (new)
326 set_buffer_new(bh_result);
327 map_bh(bh_result, inode->i_sb, phys);
328 abort:
329 unlock_kernel();
330 return err;
332 abort_negative:
333 udf_warning(inode->i_sb, "udf_get_block", "block < 0");
334 goto abort;
337 static struct buffer_head *
338 udf_getblk(struct inode *inode, long block, int create, int *err)
340 struct buffer_head dummy;
342 dummy.b_state = 0;
343 dummy.b_blocknr = -1000;
344 *err = udf_get_block(inode, block, &dummy, create);
345 if (!*err && buffer_mapped(&dummy))
347 struct buffer_head *bh;
348 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
349 if (buffer_new(&dummy))
351 lock_buffer(bh);
352 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
353 set_buffer_uptodate(bh);
354 unlock_buffer(bh);
355 mark_buffer_dirty_inode(bh, inode);
357 return bh;
359 return NULL;
362 static struct buffer_head * inode_getblk(struct inode * inode, long block,
363 int *err, long *phys, int *new)
365 struct buffer_head *pbh = NULL, *cbh = NULL, *nbh = NULL, *result = NULL;
366 kernel_long_ad laarr[EXTENT_MERGE_SIZE];
367 uint32_t pextoffset = 0, cextoffset = 0, nextoffset = 0;
368 int count = 0, startnum = 0, endnum = 0;
369 uint32_t elen = 0;
370 kernel_lb_addr eloc, pbloc, cbloc, nbloc;
371 int c = 1;
372 uint64_t lbcount = 0, b_off = 0;
373 uint32_t newblocknum, newblock, offset = 0;
374 int8_t etype;
375 int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum;
376 char lastblock = 0;
378 pextoffset = cextoffset = nextoffset = udf_file_entry_alloc_offset(inode);
379 b_off = (uint64_t)block << inode->i_sb->s_blocksize_bits;
380 pbloc = cbloc = nbloc = UDF_I_LOCATION(inode);
382 /* find the extent which contains the block we are looking for.
383 alternate between laarr[0] and laarr[1] for locations of the
384 current extent, and the previous extent */
387 if (pbh != cbh)
389 udf_release_data(pbh);
390 atomic_inc(&cbh->b_count);
391 pbh = cbh;
393 if (cbh != nbh)
395 udf_release_data(cbh);
396 atomic_inc(&nbh->b_count);
397 cbh = nbh;
400 lbcount += elen;
402 pbloc = cbloc;
403 cbloc = nbloc;
405 pextoffset = cextoffset;
406 cextoffset = nextoffset;
408 if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) == -1)
409 break;
411 c = !c;
413 laarr[c].extLength = (etype << 30) | elen;
414 laarr[c].extLocation = eloc;
416 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
417 pgoal = eloc.logicalBlockNum +
418 ((elen + inode->i_sb->s_blocksize - 1) >>
419 inode->i_sb->s_blocksize_bits);
421 count ++;
422 } while (lbcount + elen <= b_off);
424 b_off -= lbcount;
425 offset = b_off >> inode->i_sb->s_blocksize_bits;
427 /* if the extent is allocated and recorded, return the block
428 if the extent is not a multiple of the blocksize, round up */
430 if (etype == (EXT_RECORDED_ALLOCATED >> 30))
432 if (elen & (inode->i_sb->s_blocksize - 1))
434 elen = EXT_RECORDED_ALLOCATED |
435 ((elen + inode->i_sb->s_blocksize - 1) &
436 ~(inode->i_sb->s_blocksize - 1));
437 etype = udf_write_aext(inode, nbloc, &cextoffset, eloc, elen, nbh, 1);
439 udf_release_data(pbh);
440 udf_release_data(cbh);
441 udf_release_data(nbh);
442 newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
443 *phys = newblock;
444 return NULL;
447 if (etype == -1)
449 endnum = startnum = ((count > 1) ? 1 : count);
450 if (laarr[c].extLength & (inode->i_sb->s_blocksize - 1))
452 laarr[c].extLength =
453 (laarr[c].extLength & UDF_EXTENT_FLAG_MASK) |
454 (((laarr[c].extLength & UDF_EXTENT_LENGTH_MASK) +
455 inode->i_sb->s_blocksize - 1) &
456 ~(inode->i_sb->s_blocksize - 1));
457 UDF_I_LENEXTENTS(inode) =
458 (UDF_I_LENEXTENTS(inode) + inode->i_sb->s_blocksize - 1) &
459 ~(inode->i_sb->s_blocksize - 1);
461 c = !c;
462 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
463 ((offset + 1) << inode->i_sb->s_blocksize_bits);
464 memset(&laarr[c].extLocation, 0x00, sizeof(kernel_lb_addr));
465 count ++;
466 endnum ++;
467 lastblock = 1;
469 else
470 endnum = startnum = ((count > 2) ? 2 : count);
472 /* if the current extent is in position 0, swap it with the previous */
473 if (!c && count != 1)
475 laarr[2] = laarr[0];
476 laarr[0] = laarr[1];
477 laarr[1] = laarr[2];
478 c = 1;
481 /* if the current block is located in a extent, read the next extent */
482 if (etype != -1)
484 if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 0)) != -1)
486 laarr[c+1].extLength = (etype << 30) | elen;
487 laarr[c+1].extLocation = eloc;
488 count ++;
489 startnum ++;
490 endnum ++;
492 else
493 lastblock = 1;
495 udf_release_data(cbh);
496 udf_release_data(nbh);
498 /* if the current extent is not recorded but allocated, get the
499 block in the extent corresponding to the requested block */
500 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
501 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
502 else /* otherwise, allocate a new block */
504 if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
505 goal = UDF_I_NEXT_ALLOC_GOAL(inode);
507 if (!goal)
509 if (!(goal = pgoal))
510 goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
513 if (!(newblocknum = udf_new_block(inode->i_sb, inode,
514 UDF_I_LOCATION(inode).partitionReferenceNum, goal, err)))
516 udf_release_data(pbh);
517 *err = -ENOSPC;
518 return NULL;
520 UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize;
523 /* if the extent the requsted block is located in contains multiple blocks,
524 split the extent into at most three extents. blocks prior to requested
525 block, requested block, and blocks after requested block */
526 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
528 #ifdef UDF_PREALLOCATE
529 /* preallocate blocks */
530 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
531 #endif
533 /* merge any continuous blocks in laarr */
534 udf_merge_extents(inode, laarr, &endnum);
536 /* write back the new extents, inserting new extents if the new number
537 of extents is greater than the old number, and deleting extents if
538 the new number of extents is less than the old number */
539 udf_update_extents(inode, laarr, startnum, endnum, pbloc, pextoffset, &pbh);
541 udf_release_data(pbh);
543 if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
544 UDF_I_LOCATION(inode).partitionReferenceNum, 0)))
546 return NULL;
548 *phys = newblock;
549 *err = 0;
550 *new = 1;
551 UDF_I_NEXT_ALLOC_BLOCK(inode) = block;
552 UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum;
553 inode->i_ctime = current_fs_time(inode->i_sb);
555 if (IS_SYNC(inode))
556 udf_sync_inode(inode);
557 else
558 mark_inode_dirty(inode);
559 return result;
562 static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum,
563 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
565 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
566 (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
568 int curr = *c;
569 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
570 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
571 int8_t etype = (laarr[curr].extLength >> 30);
573 if (blen == 1)
575 else if (!offset || blen == offset + 1)
577 laarr[curr+2] = laarr[curr+1];
578 laarr[curr+1] = laarr[curr];
580 else
582 laarr[curr+3] = laarr[curr+1];
583 laarr[curr+2] = laarr[curr+1] = laarr[curr];
586 if (offset)
588 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
590 udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset);
591 laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
592 (offset << inode->i_sb->s_blocksize_bits);
593 laarr[curr].extLocation.logicalBlockNum = 0;
594 laarr[curr].extLocation.partitionReferenceNum = 0;
596 else
597 laarr[curr].extLength = (etype << 30) |
598 (offset << inode->i_sb->s_blocksize_bits);
599 curr ++;
600 (*c) ++;
601 (*endnum) ++;
604 laarr[curr].extLocation.logicalBlockNum = newblocknum;
605 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
606 laarr[curr].extLocation.partitionReferenceNum =
607 UDF_I_LOCATION(inode).partitionReferenceNum;
608 laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
609 inode->i_sb->s_blocksize;
610 curr ++;
612 if (blen != offset + 1)
614 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
615 laarr[curr].extLocation.logicalBlockNum += (offset + 1);
616 laarr[curr].extLength = (etype << 30) |
617 ((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
618 curr ++;
619 (*endnum) ++;
624 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
625 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
627 int start, length = 0, currlength = 0, i;
629 if (*endnum >= (c+1))
631 if (!lastblock)
632 return;
633 else
634 start = c;
636 else
638 if ((laarr[c+1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
640 start = c+1;
641 length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) +
642 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
644 else
645 start = c;
648 for (i=start+1; i<=*endnum; i++)
650 if (i == *endnum)
652 if (lastblock)
653 length += UDF_DEFAULT_PREALLOC_BLOCKS;
655 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
656 length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
657 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
658 else
659 break;
662 if (length)
664 int next = laarr[start].extLocation.logicalBlockNum +
665 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
666 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
667 int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
668 laarr[start].extLocation.partitionReferenceNum,
669 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
670 UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
672 if (numalloc)
674 if (start == (c+1))
675 laarr[start].extLength +=
676 (numalloc << inode->i_sb->s_blocksize_bits);
677 else
679 memmove(&laarr[c+2], &laarr[c+1],
680 sizeof(long_ad) * (*endnum - (c+1)));
681 (*endnum) ++;
682 laarr[c+1].extLocation.logicalBlockNum = next;
683 laarr[c+1].extLocation.partitionReferenceNum =
684 laarr[c].extLocation.partitionReferenceNum;
685 laarr[c+1].extLength = EXT_NOT_RECORDED_ALLOCATED |
686 (numalloc << inode->i_sb->s_blocksize_bits);
687 start = c+1;
690 for (i=start+1; numalloc && i<*endnum; i++)
692 int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
693 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
695 if (elen > numalloc)
697 laarr[i].extLength -=
698 (numalloc << inode->i_sb->s_blocksize_bits);
699 numalloc = 0;
701 else
703 numalloc -= elen;
704 if (*endnum > (i+1))
705 memmove(&laarr[i], &laarr[i+1],
706 sizeof(long_ad) * (*endnum - (i+1)));
707 i --;
708 (*endnum) --;
711 UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits;
716 static void udf_merge_extents(struct inode *inode,
717 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
719 int i;
721 for (i=0; i<(*endnum-1); i++)
723 if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30))
725 if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
726 ((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
727 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
728 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits)))
730 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
731 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
732 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
734 laarr[i+1].extLength = (laarr[i+1].extLength -
735 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
736 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
737 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
738 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
739 laarr[i+1].extLocation.logicalBlockNum =
740 laarr[i].extLocation.logicalBlockNum +
741 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
742 inode->i_sb->s_blocksize_bits);
744 else
746 laarr[i].extLength = laarr[i+1].extLength +
747 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
748 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
749 if (*endnum > (i+2))
750 memmove(&laarr[i+1], &laarr[i+2],
751 sizeof(long_ad) * (*endnum - (i+2)));
752 i --;
753 (*endnum) --;
757 else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
758 ((laarr[i+1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)))
760 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
761 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
762 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
763 laarr[i].extLocation.logicalBlockNum = 0;
764 laarr[i].extLocation.partitionReferenceNum = 0;
766 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
767 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
768 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
770 laarr[i+1].extLength = (laarr[i+1].extLength -
771 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
772 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
773 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
774 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
776 else
778 laarr[i].extLength = laarr[i+1].extLength +
779 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
780 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
781 if (*endnum > (i+2))
782 memmove(&laarr[i+1], &laarr[i+2],
783 sizeof(long_ad) * (*endnum - (i+2)));
784 i --;
785 (*endnum) --;
788 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
790 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
791 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
792 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
793 laarr[i].extLocation.logicalBlockNum = 0;
794 laarr[i].extLocation.partitionReferenceNum = 0;
795 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) |
796 EXT_NOT_RECORDED_NOT_ALLOCATED;
801 static void udf_update_extents(struct inode *inode,
802 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum,
803 kernel_lb_addr pbloc, uint32_t pextoffset, struct buffer_head **pbh)
805 int start = 0, i;
806 kernel_lb_addr tmploc;
807 uint32_t tmplen;
809 if (startnum > endnum)
811 for (i=0; i<(startnum-endnum); i++)
813 udf_delete_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
814 laarr[i].extLength, *pbh);
817 else if (startnum < endnum)
819 for (i=0; i<(endnum-startnum); i++)
821 udf_insert_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
822 laarr[i].extLength, *pbh);
823 udf_next_aext(inode, &pbloc, &pextoffset, &laarr[i].extLocation,
824 &laarr[i].extLength, pbh, 1);
825 start ++;
829 for (i=start; i<endnum; i++)
831 udf_next_aext(inode, &pbloc, &pextoffset, &tmploc, &tmplen, pbh, 0);
832 udf_write_aext(inode, pbloc, &pextoffset, laarr[i].extLocation,
833 laarr[i].extLength, *pbh, 1);
837 struct buffer_head * udf_bread(struct inode * inode, int block,
838 int create, int * err)
840 struct buffer_head * bh = NULL;
842 bh = udf_getblk(inode, block, create, err);
843 if (!bh)
844 return NULL;
846 if (buffer_uptodate(bh))
847 return bh;
848 ll_rw_block(READ, 1, &bh);
849 wait_on_buffer(bh);
850 if (buffer_uptodate(bh))
851 return bh;
852 brelse(bh);
853 *err = -EIO;
854 return NULL;
857 void udf_truncate(struct inode * inode)
859 int offset;
860 int err;
862 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
863 S_ISLNK(inode->i_mode)))
864 return;
865 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
866 return;
868 lock_kernel();
869 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
871 if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
872 inode->i_size))
874 udf_expand_file_adinicb(inode, inode->i_size, &err);
875 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
877 inode->i_size = UDF_I_LENALLOC(inode);
878 unlock_kernel();
879 return;
881 else
882 udf_truncate_extents(inode);
884 else
886 offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
887 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode));
888 UDF_I_LENALLOC(inode) = inode->i_size;
891 else
893 block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block);
894 udf_truncate_extents(inode);
897 inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
898 if (IS_SYNC(inode))
899 udf_sync_inode (inode);
900 else
901 mark_inode_dirty(inode);
902 unlock_kernel();
905 static void
906 __udf_read_inode(struct inode *inode)
908 struct buffer_head *bh = NULL;
909 struct fileEntry *fe;
910 uint16_t ident;
913 * Set defaults, but the inode is still incomplete!
914 * Note: get_new_inode() sets the following on a new inode:
915 * i_sb = sb
916 * i_no = ino
917 * i_flags = sb->s_flags
918 * i_state = 0
919 * clean_inode(): zero fills and sets
920 * i_count = 1
921 * i_nlink = 1
922 * i_op = NULL;
924 inode->i_blksize = PAGE_SIZE;
926 bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
928 if (!bh)
930 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
931 inode->i_ino);
932 make_bad_inode(inode);
933 return;
936 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
937 ident != TAG_IDENT_USE)
939 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
940 inode->i_ino, ident);
941 udf_release_data(bh);
942 make_bad_inode(inode);
943 return;
946 fe = (struct fileEntry *)bh->b_data;
948 if (le16_to_cpu(fe->icbTag.strategyType) == 4096)
950 struct buffer_head *ibh = NULL, *nbh = NULL;
951 struct indirectEntry *ie;
953 ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
954 if (ident == TAG_IDENT_IE)
956 if (ibh)
958 kernel_lb_addr loc;
959 ie = (struct indirectEntry *)ibh->b_data;
961 loc = lelb_to_cpu(ie->indirectICB.extLocation);
963 if (ie->indirectICB.extLength &&
964 (nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident)))
966 if (ident == TAG_IDENT_FE ||
967 ident == TAG_IDENT_EFE)
969 memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(kernel_lb_addr));
970 udf_release_data(bh);
971 udf_release_data(ibh);
972 udf_release_data(nbh);
973 __udf_read_inode(inode);
974 return;
976 else
978 udf_release_data(nbh);
979 udf_release_data(ibh);
982 else
983 udf_release_data(ibh);
986 else
987 udf_release_data(ibh);
989 else if (le16_to_cpu(fe->icbTag.strategyType) != 4)
991 printk(KERN_ERR "udf: unsupported strategy type: %d\n",
992 le16_to_cpu(fe->icbTag.strategyType));
993 udf_release_data(bh);
994 make_bad_inode(inode);
995 return;
997 udf_fill_inode(inode, bh);
998 udf_release_data(bh);
1001 static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1003 struct fileEntry *fe;
1004 struct extendedFileEntry *efe;
1005 time_t convtime;
1006 long convtime_usec;
1007 int offset;
1009 fe = (struct fileEntry *)bh->b_data;
1010 efe = (struct extendedFileEntry *)bh->b_data;
1012 if (le16_to_cpu(fe->icbTag.strategyType) == 4)
1013 UDF_I_STRAT4096(inode) = 0;
1014 else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
1015 UDF_I_STRAT4096(inode) = 1;
1017 UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
1018 UDF_I_UNIQUE(inode) = 0;
1019 UDF_I_LENEATTR(inode) = 0;
1020 UDF_I_LENEXTENTS(inode) = 0;
1021 UDF_I_LENALLOC(inode) = 0;
1022 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
1023 UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
1024 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE)
1026 UDF_I_EFE(inode) = 1;
1027 UDF_I_USE(inode) = 0;
1028 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
1029 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1031 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE)
1033 UDF_I_EFE(inode) = 0;
1034 UDF_I_USE(inode) = 0;
1035 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
1036 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1038 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1040 UDF_I_EFE(inode) = 0;
1041 UDF_I_USE(inode) = 1;
1042 UDF_I_LENALLOC(inode) =
1043 le32_to_cpu(
1044 ((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
1045 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry), GFP_KERNEL);
1046 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1047 return;
1050 inode->i_uid = le32_to_cpu(fe->uid);
1051 if ( inode->i_uid == -1 ) inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
1053 inode->i_gid = le32_to_cpu(fe->gid);
1054 if ( inode->i_gid == -1 ) inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
1056 inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
1057 if (!inode->i_nlink)
1058 inode->i_nlink = 1;
1060 inode->i_size = le64_to_cpu(fe->informationLength);
1061 UDF_I_LENEXTENTS(inode) = inode->i_size;
1063 inode->i_mode = udf_convert_permissions(fe);
1064 inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
1066 if (UDF_I_EFE(inode) == 0)
1068 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1069 (inode->i_sb->s_blocksize_bits - 9);
1071 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1072 lets_to_cpu(fe->accessTime)) )
1074 inode->i_atime.tv_sec = convtime;
1075 inode->i_atime.tv_nsec = convtime_usec * 1000;
1077 else
1079 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1082 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1083 lets_to_cpu(fe->modificationTime)) )
1085 inode->i_mtime.tv_sec = convtime;
1086 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1088 else
1090 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1093 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1094 lets_to_cpu(fe->attrTime)) )
1096 inode->i_ctime.tv_sec = convtime;
1097 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1099 else
1101 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1104 UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
1105 UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
1106 UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
1107 offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
1109 else
1111 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1112 (inode->i_sb->s_blocksize_bits - 9);
1114 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1115 lets_to_cpu(efe->accessTime)) )
1117 inode->i_atime.tv_sec = convtime;
1118 inode->i_atime.tv_nsec = convtime_usec * 1000;
1120 else
1122 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1125 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1126 lets_to_cpu(efe->modificationTime)) )
1128 inode->i_mtime.tv_sec = convtime;
1129 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1131 else
1133 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1136 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1137 lets_to_cpu(efe->createTime)) )
1139 UDF_I_CRTIME(inode).tv_sec = convtime;
1140 UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000;
1142 else
1144 UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb);
1147 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1148 lets_to_cpu(efe->attrTime)) )
1150 inode->i_ctime.tv_sec = convtime;
1151 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1153 else
1155 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1158 UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
1159 UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
1160 UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
1161 offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
1164 switch (fe->icbTag.fileType)
1166 case ICBTAG_FILE_TYPE_DIRECTORY:
1168 inode->i_op = &udf_dir_inode_operations;
1169 inode->i_fop = &udf_dir_operations;
1170 inode->i_mode |= S_IFDIR;
1171 inode->i_nlink ++;
1172 break;
1174 case ICBTAG_FILE_TYPE_REALTIME:
1175 case ICBTAG_FILE_TYPE_REGULAR:
1176 case ICBTAG_FILE_TYPE_UNDEF:
1178 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
1179 inode->i_data.a_ops = &udf_adinicb_aops;
1180 else
1181 inode->i_data.a_ops = &udf_aops;
1182 inode->i_op = &udf_file_inode_operations;
1183 inode->i_fop = &udf_file_operations;
1184 inode->i_mode |= S_IFREG;
1185 break;
1187 case ICBTAG_FILE_TYPE_BLOCK:
1189 inode->i_mode |= S_IFBLK;
1190 break;
1192 case ICBTAG_FILE_TYPE_CHAR:
1194 inode->i_mode |= S_IFCHR;
1195 break;
1197 case ICBTAG_FILE_TYPE_FIFO:
1199 init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1200 break;
1202 case ICBTAG_FILE_TYPE_SOCKET:
1204 init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1205 break;
1207 case ICBTAG_FILE_TYPE_SYMLINK:
1209 inode->i_data.a_ops = &udf_symlink_aops;
1210 inode->i_op = &page_symlink_inode_operations;
1211 inode->i_mode = S_IFLNK|S_IRWXUGO;
1212 break;
1214 default:
1216 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
1217 inode->i_ino, fe->icbTag.fileType);
1218 make_bad_inode(inode);
1219 return;
1222 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1224 struct deviceSpec *dsea =
1225 (struct deviceSpec *)
1226 udf_get_extendedattr(inode, 12, 1);
1228 if (dsea)
1230 init_special_inode(inode, inode->i_mode, MKDEV(
1231 le32_to_cpu(dsea->majorDeviceIdent),
1232 le32_to_cpu(dsea->minorDeviceIdent)));
1233 /* Developer ID ??? */
1235 else
1237 make_bad_inode(inode);
1242 static mode_t
1243 udf_convert_permissions(struct fileEntry *fe)
1245 mode_t mode;
1246 uint32_t permissions;
1247 uint32_t flags;
1249 permissions = le32_to_cpu(fe->permissions);
1250 flags = le16_to_cpu(fe->icbTag.flags);
1252 mode = (( permissions ) & S_IRWXO) |
1253 (( permissions >> 2 ) & S_IRWXG) |
1254 (( permissions >> 4 ) & S_IRWXU) |
1255 (( flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1256 (( flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1257 (( flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1259 return mode;
1263 * udf_write_inode
1265 * PURPOSE
1266 * Write out the specified inode.
1268 * DESCRIPTION
1269 * This routine is called whenever an inode is synced.
1270 * Currently this routine is just a placeholder.
1272 * HISTORY
1273 * July 1, 1997 - Andrew E. Mileski
1274 * Written, tested, and released.
1277 int udf_write_inode(struct inode * inode, int sync)
1279 int ret;
1280 lock_kernel();
1281 ret = udf_update_inode(inode, sync);
1282 unlock_kernel();
1283 return ret;
1286 int udf_sync_inode(struct inode * inode)
1288 return udf_update_inode(inode, 1);
1291 static int
1292 udf_update_inode(struct inode *inode, int do_sync)
1294 struct buffer_head *bh = NULL;
1295 struct fileEntry *fe;
1296 struct extendedFileEntry *efe;
1297 uint32_t udfperms;
1298 uint16_t icbflags;
1299 uint16_t crclen;
1300 int i;
1301 kernel_timestamp cpu_time;
1302 int err = 0;
1304 bh = udf_tread(inode->i_sb,
1305 udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
1307 if (!bh)
1309 udf_debug("bread failure\n");
1310 return -EIO;
1313 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
1315 fe = (struct fileEntry *)bh->b_data;
1316 efe = (struct extendedFileEntry *)bh->b_data;
1318 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1320 struct unallocSpaceEntry *use =
1321 (struct unallocSpaceEntry *)bh->b_data;
1323 use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1324 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1325 crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
1326 sizeof(tag);
1327 use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1328 use->descTag.descCRCLength = cpu_to_le16(crclen);
1329 use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
1331 use->descTag.tagChecksum = 0;
1332 for (i=0; i<16; i++)
1333 if (i != 4)
1334 use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i];
1336 mark_buffer_dirty(bh);
1337 udf_release_data(bh);
1338 return err;
1341 if (inode->i_uid != UDF_SB(inode->i_sb)->s_uid)
1342 fe->uid = cpu_to_le32(inode->i_uid);
1344 if (inode->i_gid != UDF_SB(inode->i_sb)->s_gid)
1345 fe->gid = cpu_to_le32(inode->i_gid);
1347 udfperms = ((inode->i_mode & S_IRWXO) ) |
1348 ((inode->i_mode & S_IRWXG) << 2) |
1349 ((inode->i_mode & S_IRWXU) << 4);
1351 udfperms |= (le32_to_cpu(fe->permissions) &
1352 (FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1353 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1354 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1355 fe->permissions = cpu_to_le32(udfperms);
1357 if (S_ISDIR(inode->i_mode))
1358 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1359 else
1360 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1362 fe->informationLength = cpu_to_le64(inode->i_size);
1364 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1366 regid *eid;
1367 struct deviceSpec *dsea =
1368 (struct deviceSpec *)
1369 udf_get_extendedattr(inode, 12, 1);
1371 if (!dsea)
1373 dsea = (struct deviceSpec *)
1374 udf_add_extendedattr(inode,
1375 sizeof(struct deviceSpec) +
1376 sizeof(regid), 12, 0x3);
1377 dsea->attrType = cpu_to_le32(12);
1378 dsea->attrSubtype = 1;
1379 dsea->attrLength = cpu_to_le32(sizeof(struct deviceSpec) +
1380 sizeof(regid));
1381 dsea->impUseLength = cpu_to_le32(sizeof(regid));
1383 eid = (regid *)dsea->impUse;
1384 memset(eid, 0, sizeof(regid));
1385 strcpy(eid->ident, UDF_ID_DEVELOPER);
1386 eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1387 eid->identSuffix[1] = UDF_OS_ID_LINUX;
1388 dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1389 dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1392 if (UDF_I_EFE(inode) == 0)
1394 memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1395 fe->logicalBlocksRecorded = cpu_to_le64(
1396 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1397 (inode->i_sb->s_blocksize_bits - 9));
1399 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1400 fe->accessTime = cpu_to_lets(cpu_time);
1401 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1402 fe->modificationTime = cpu_to_lets(cpu_time);
1403 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1404 fe->attrTime = cpu_to_lets(cpu_time);
1405 memset(&(fe->impIdent), 0, sizeof(regid));
1406 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1407 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1408 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1409 fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1410 fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1411 fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1412 fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1413 crclen = sizeof(struct fileEntry);
1415 else
1417 memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1418 efe->objectSize = cpu_to_le64(inode->i_size);
1419 efe->logicalBlocksRecorded = cpu_to_le64(
1420 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1421 (inode->i_sb->s_blocksize_bits - 9));
1423 if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec ||
1424 (UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec &&
1425 UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec))
1427 UDF_I_CRTIME(inode) = inode->i_atime;
1429 if (UDF_I_CRTIME(inode).tv_sec > inode->i_mtime.tv_sec ||
1430 (UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec &&
1431 UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec))
1433 UDF_I_CRTIME(inode) = inode->i_mtime;
1435 if (UDF_I_CRTIME(inode).tv_sec > inode->i_ctime.tv_sec ||
1436 (UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec &&
1437 UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec))
1439 UDF_I_CRTIME(inode) = inode->i_ctime;
1442 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1443 efe->accessTime = cpu_to_lets(cpu_time);
1444 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1445 efe->modificationTime = cpu_to_lets(cpu_time);
1446 if (udf_time_to_stamp(&cpu_time, UDF_I_CRTIME(inode)))
1447 efe->createTime = cpu_to_lets(cpu_time);
1448 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1449 efe->attrTime = cpu_to_lets(cpu_time);
1451 memset(&(efe->impIdent), 0, sizeof(regid));
1452 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1453 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1454 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1455 efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1456 efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1457 efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1458 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1459 crclen = sizeof(struct extendedFileEntry);
1461 if (UDF_I_STRAT4096(inode))
1463 fe->icbTag.strategyType = cpu_to_le16(4096);
1464 fe->icbTag.strategyParameter = cpu_to_le16(1);
1465 fe->icbTag.numEntries = cpu_to_le16(2);
1467 else
1469 fe->icbTag.strategyType = cpu_to_le16(4);
1470 fe->icbTag.numEntries = cpu_to_le16(1);
1473 if (S_ISDIR(inode->i_mode))
1474 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1475 else if (S_ISREG(inode->i_mode))
1476 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1477 else if (S_ISLNK(inode->i_mode))
1478 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1479 else if (S_ISBLK(inode->i_mode))
1480 fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1481 else if (S_ISCHR(inode->i_mode))
1482 fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1483 else if (S_ISFIFO(inode->i_mode))
1484 fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1485 else if (S_ISSOCK(inode->i_mode))
1486 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1488 icbflags = UDF_I_ALLOCTYPE(inode) |
1489 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1490 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1491 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1492 (le16_to_cpu(fe->icbTag.flags) &
1493 ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1494 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1496 fe->icbTag.flags = cpu_to_le16(icbflags);
1497 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1498 fe->descTag.descVersion = cpu_to_le16(3);
1499 else
1500 fe->descTag.descVersion = cpu_to_le16(2);
1501 fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
1502 fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1503 crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
1504 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1505 fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
1507 fe->descTag.tagChecksum = 0;
1508 for (i=0; i<16; i++)
1509 if (i != 4)
1510 fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i];
1512 /* write the data blocks */
1513 mark_buffer_dirty(bh);
1514 if (do_sync)
1516 sync_dirty_buffer(bh);
1517 if (buffer_req(bh) && !buffer_uptodate(bh))
1519 printk("IO error syncing udf inode [%s:%08lx]\n",
1520 inode->i_sb->s_id, inode->i_ino);
1521 err = -EIO;
1524 udf_release_data(bh);
1525 return err;
1528 struct inode *
1529 udf_iget(struct super_block *sb, kernel_lb_addr ino)
1531 unsigned long block = udf_get_lb_pblock(sb, ino, 0);
1532 struct inode *inode = iget_locked(sb, block);
1534 if (!inode)
1535 return NULL;
1537 if (inode->i_state & I_NEW) {
1538 memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(kernel_lb_addr));
1539 __udf_read_inode(inode);
1540 unlock_new_inode(inode);
1543 if (is_bad_inode(inode))
1544 goto out_iput;
1546 if (ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) {
1547 udf_debug("block=%d, partition=%d out of range\n",
1548 ino.logicalBlockNum, ino.partitionReferenceNum);
1549 make_bad_inode(inode);
1550 goto out_iput;
1553 return inode;
1555 out_iput:
1556 iput(inode);
1557 return NULL;
1560 int8_t udf_add_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1561 kernel_lb_addr eloc, uint32_t elen, struct buffer_head **bh, int inc)
1563 int adsize;
1564 short_ad *sad = NULL;
1565 long_ad *lad = NULL;
1566 struct allocExtDesc *aed;
1567 int8_t etype;
1568 uint8_t *ptr;
1570 if (!*bh)
1571 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1572 else
1573 ptr = (*bh)->b_data + *extoffset;
1575 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1576 adsize = sizeof(short_ad);
1577 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1578 adsize = sizeof(long_ad);
1579 else
1580 return -1;
1582 if (*extoffset + (2 * adsize) > inode->i_sb->s_blocksize)
1584 char *sptr, *dptr;
1585 struct buffer_head *nbh;
1586 int err, loffset;
1587 kernel_lb_addr obloc = *bloc;
1589 if (!(bloc->logicalBlockNum = udf_new_block(inode->i_sb, NULL,
1590 obloc.partitionReferenceNum, obloc.logicalBlockNum, &err)))
1592 return -1;
1594 if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1595 *bloc, 0))))
1597 return -1;
1599 lock_buffer(nbh);
1600 memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
1601 set_buffer_uptodate(nbh);
1602 unlock_buffer(nbh);
1603 mark_buffer_dirty_inode(nbh, inode);
1605 aed = (struct allocExtDesc *)(nbh->b_data);
1606 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
1607 aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
1608 if (*extoffset + adsize > inode->i_sb->s_blocksize)
1610 loffset = *extoffset;
1611 aed->lengthAllocDescs = cpu_to_le32(adsize);
1612 sptr = ptr - adsize;
1613 dptr = nbh->b_data + sizeof(struct allocExtDesc);
1614 memcpy(dptr, sptr, adsize);
1615 *extoffset = sizeof(struct allocExtDesc) + adsize;
1617 else
1619 loffset = *extoffset + adsize;
1620 aed->lengthAllocDescs = cpu_to_le32(0);
1621 sptr = ptr;
1622 *extoffset = sizeof(struct allocExtDesc);
1624 if (*bh)
1626 aed = (struct allocExtDesc *)(*bh)->b_data;
1627 aed->lengthAllocDescs =
1628 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1630 else
1632 UDF_I_LENALLOC(inode) += adsize;
1633 mark_inode_dirty(inode);
1636 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1637 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
1638 bloc->logicalBlockNum, sizeof(tag));
1639 else
1640 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
1641 bloc->logicalBlockNum, sizeof(tag));
1642 switch (UDF_I_ALLOCTYPE(inode))
1644 case ICBTAG_FLAG_AD_SHORT:
1646 sad = (short_ad *)sptr;
1647 sad->extLength = cpu_to_le32(
1648 EXT_NEXT_EXTENT_ALLOCDECS |
1649 inode->i_sb->s_blocksize);
1650 sad->extPosition = cpu_to_le32(bloc->logicalBlockNum);
1651 break;
1653 case ICBTAG_FLAG_AD_LONG:
1655 lad = (long_ad *)sptr;
1656 lad->extLength = cpu_to_le32(
1657 EXT_NEXT_EXTENT_ALLOCDECS |
1658 inode->i_sb->s_blocksize);
1659 lad->extLocation = cpu_to_lelb(*bloc);
1660 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1661 break;
1664 if (*bh)
1666 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1667 udf_update_tag((*bh)->b_data, loffset);
1668 else
1669 udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1670 mark_buffer_dirty_inode(*bh, inode);
1671 udf_release_data(*bh);
1673 else
1674 mark_inode_dirty(inode);
1675 *bh = nbh;
1678 etype = udf_write_aext(inode, *bloc, extoffset, eloc, elen, *bh, inc);
1680 if (!*bh)
1682 UDF_I_LENALLOC(inode) += adsize;
1683 mark_inode_dirty(inode);
1685 else
1687 aed = (struct allocExtDesc *)(*bh)->b_data;
1688 aed->lengthAllocDescs =
1689 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1690 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1691 udf_update_tag((*bh)->b_data, *extoffset + (inc ? 0 : adsize));
1692 else
1693 udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1694 mark_buffer_dirty_inode(*bh, inode);
1697 return etype;
1700 int8_t udf_write_aext(struct inode *inode, kernel_lb_addr bloc, int *extoffset,
1701 kernel_lb_addr eloc, uint32_t elen, struct buffer_head *bh, int inc)
1703 int adsize;
1704 uint8_t *ptr;
1706 if (!bh)
1707 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1708 else
1710 ptr = bh->b_data + *extoffset;
1711 atomic_inc(&bh->b_count);
1714 switch (UDF_I_ALLOCTYPE(inode))
1716 case ICBTAG_FLAG_AD_SHORT:
1718 short_ad *sad = (short_ad *)ptr;
1719 sad->extLength = cpu_to_le32(elen);
1720 sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
1721 adsize = sizeof(short_ad);
1722 break;
1724 case ICBTAG_FLAG_AD_LONG:
1726 long_ad *lad = (long_ad *)ptr;
1727 lad->extLength = cpu_to_le32(elen);
1728 lad->extLocation = cpu_to_lelb(eloc);
1729 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1730 adsize = sizeof(long_ad);
1731 break;
1733 default:
1734 return -1;
1737 if (bh)
1739 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1741 struct allocExtDesc *aed = (struct allocExtDesc *)(bh)->b_data;
1742 udf_update_tag((bh)->b_data,
1743 le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
1745 mark_buffer_dirty_inode(bh, inode);
1746 udf_release_data(bh);
1748 else
1749 mark_inode_dirty(inode);
1751 if (inc)
1752 *extoffset += adsize;
1753 return (elen >> 30);
1756 int8_t udf_next_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1757 kernel_lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1759 int8_t etype;
1761 while ((etype = udf_current_aext(inode, bloc, extoffset, eloc, elen, bh, inc)) ==
1762 (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
1764 *bloc = *eloc;
1765 *extoffset = sizeof(struct allocExtDesc);
1766 udf_release_data(*bh);
1767 if (!(*bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
1769 udf_debug("reading block %d failed!\n",
1770 udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1771 return -1;
1775 return etype;
1778 int8_t udf_current_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1779 kernel_lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1781 int alen;
1782 int8_t etype;
1783 uint8_t *ptr;
1785 if (!*bh)
1787 if (!(*extoffset))
1788 *extoffset = udf_file_entry_alloc_offset(inode);
1789 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1790 alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
1792 else
1794 if (!(*extoffset))
1795 *extoffset = sizeof(struct allocExtDesc);
1796 ptr = (*bh)->b_data + *extoffset;
1797 alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)(*bh)->b_data)->lengthAllocDescs);
1800 switch (UDF_I_ALLOCTYPE(inode))
1802 case ICBTAG_FLAG_AD_SHORT:
1804 short_ad *sad;
1806 if (!(sad = udf_get_fileshortad(ptr, alen, extoffset, inc)))
1807 return -1;
1809 etype = le32_to_cpu(sad->extLength) >> 30;
1810 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1811 eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1812 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1813 break;
1815 case ICBTAG_FLAG_AD_LONG:
1817 long_ad *lad;
1819 if (!(lad = udf_get_filelongad(ptr, alen, extoffset, inc)))
1820 return -1;
1822 etype = le32_to_cpu(lad->extLength) >> 30;
1823 *eloc = lelb_to_cpu(lad->extLocation);
1824 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1825 break;
1827 default:
1829 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1830 return -1;
1834 return etype;
1837 static int8_t
1838 udf_insert_aext(struct inode *inode, kernel_lb_addr bloc, int extoffset,
1839 kernel_lb_addr neloc, uint32_t nelen, struct buffer_head *bh)
1841 kernel_lb_addr oeloc;
1842 uint32_t oelen;
1843 int8_t etype;
1845 if (bh)
1846 atomic_inc(&bh->b_count);
1848 while ((etype = udf_next_aext(inode, &bloc, &extoffset, &oeloc, &oelen, &bh, 0)) != -1)
1850 udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1);
1852 neloc = oeloc;
1853 nelen = (etype << 30) | oelen;
1855 udf_add_aext(inode, &bloc, &extoffset, neloc, nelen, &bh, 1);
1856 udf_release_data(bh);
1857 return (nelen >> 30);
1860 int8_t udf_delete_aext(struct inode *inode, kernel_lb_addr nbloc, int nextoffset,
1861 kernel_lb_addr eloc, uint32_t elen, struct buffer_head *nbh)
1863 struct buffer_head *obh;
1864 kernel_lb_addr obloc;
1865 int oextoffset, adsize;
1866 int8_t etype;
1867 struct allocExtDesc *aed;
1869 if (nbh)
1871 atomic_inc(&nbh->b_count);
1872 atomic_inc(&nbh->b_count);
1875 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1876 adsize = sizeof(short_ad);
1877 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1878 adsize = sizeof(long_ad);
1879 else
1880 adsize = 0;
1882 obh = nbh;
1883 obloc = nbloc;
1884 oextoffset = nextoffset;
1886 if (udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1) == -1)
1887 return -1;
1889 while ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) != -1)
1891 udf_write_aext(inode, obloc, &oextoffset, eloc, (etype << 30) | elen, obh, 1);
1892 if (obh != nbh)
1894 obloc = nbloc;
1895 udf_release_data(obh);
1896 atomic_inc(&nbh->b_count);
1897 obh = nbh;
1898 oextoffset = nextoffset - adsize;
1901 memset(&eloc, 0x00, sizeof(kernel_lb_addr));
1902 elen = 0;
1904 if (nbh != obh)
1906 udf_free_blocks(inode->i_sb, inode, nbloc, 0, 1);
1907 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1908 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1909 if (!obh)
1911 UDF_I_LENALLOC(inode) -= (adsize * 2);
1912 mark_inode_dirty(inode);
1914 else
1916 aed = (struct allocExtDesc *)(obh)->b_data;
1917 aed->lengthAllocDescs =
1918 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize));
1919 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1920 udf_update_tag((obh)->b_data, oextoffset - (2*adsize));
1921 else
1922 udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
1923 mark_buffer_dirty_inode(obh, inode);
1926 else
1928 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1929 if (!obh)
1931 UDF_I_LENALLOC(inode) -= adsize;
1932 mark_inode_dirty(inode);
1934 else
1936 aed = (struct allocExtDesc *)(obh)->b_data;
1937 aed->lengthAllocDescs =
1938 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
1939 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1940 udf_update_tag((obh)->b_data, oextoffset - adsize);
1941 else
1942 udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
1943 mark_buffer_dirty_inode(obh, inode);
1947 udf_release_data(nbh);
1948 udf_release_data(obh);
1949 return (elen >> 30);
1952 int8_t inode_bmap(struct inode *inode, int block, kernel_lb_addr *bloc, uint32_t *extoffset,
1953 kernel_lb_addr *eloc, uint32_t *elen, uint32_t *offset, struct buffer_head **bh)
1955 uint64_t lbcount = 0, bcount = (uint64_t)block << inode->i_sb->s_blocksize_bits;
1956 int8_t etype;
1958 if (block < 0)
1960 printk(KERN_ERR "udf: inode_bmap: block < 0\n");
1961 return -1;
1963 if (!inode)
1965 printk(KERN_ERR "udf: inode_bmap: NULL inode\n");
1966 return -1;
1969 *extoffset = 0;
1970 *elen = 0;
1971 *bloc = UDF_I_LOCATION(inode);
1975 if ((etype = udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, 1)) == -1)
1977 *offset = bcount - lbcount;
1978 UDF_I_LENEXTENTS(inode) = lbcount;
1979 return -1;
1981 lbcount += *elen;
1982 } while (lbcount <= bcount);
1984 *offset = bcount + *elen - lbcount;
1986 return etype;
1989 long udf_block_map(struct inode *inode, long block)
1991 kernel_lb_addr eloc, bloc;
1992 uint32_t offset, extoffset, elen;
1993 struct buffer_head *bh = NULL;
1994 int ret;
1996 lock_kernel();
1998 if (inode_bmap(inode, block, &bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
1999 ret = udf_get_lb_pblock(inode->i_sb, eloc, offset >> inode->i_sb->s_blocksize_bits);
2000 else
2001 ret = 0;
2003 unlock_kernel();
2004 udf_release_data(bh);
2006 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2007 return udf_fixed_to_variable(ret);
2008 else
2009 return ret;