udf: use sector_t and loff_t for file offsets
[linux-2.6/cjktty.git] / fs / udf / inode.c
blob6b094250d805b26888b8825a48b8ee7ddfc0df68
1 /*
2 * inode.c
4 * PURPOSE
5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
7 * COPYRIGHT
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1998 Dave Boynton
14 * (C) 1998-2004 Ben Fennema
15 * (C) 1999-2000 Stelias Computing Inc
17 * HISTORY
19 * 10/04/98 dgb Added rudimentary directory functions
20 * 10/07/98 Fully working udf_block_map! It works!
21 * 11/25/98 bmap altered to better support extents
22 * 12/06/98 blf partition support in udf_iget, udf_block_map and udf_read_inode
23 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
24 * block boundaries (which is not actually allowed)
25 * 12/20/98 added support for strategy 4096
26 * 03/07/99 rewrote udf_block_map (again)
27 * New funcs, inode_bmap, udf_next_aext
28 * 04/19/99 Support for writing device EA's for major/minor #
31 #include "udfdecl.h"
32 #include <linux/mm.h>
33 #include <linux/smp_lock.h>
34 #include <linux/module.h>
35 #include <linux/pagemap.h>
36 #include <linux/buffer_head.h>
37 #include <linux/writeback.h>
38 #include <linux/slab.h>
40 #include "udf_i.h"
41 #include "udf_sb.h"
43 MODULE_AUTHOR("Ben Fennema");
44 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
45 MODULE_LICENSE("GPL");
47 #define EXTENT_MERGE_SIZE 5
49 static mode_t udf_convert_permissions(struct fileEntry *);
50 static int udf_update_inode(struct inode *, int);
51 static void udf_fill_inode(struct inode *, struct buffer_head *);
52 static struct buffer_head *inode_getblk(struct inode *, sector_t, int *,
53 long *, int *);
54 static int8_t udf_insert_aext(struct inode *, kernel_lb_addr, int,
55 kernel_lb_addr, uint32_t, struct buffer_head *);
56 static void udf_split_extents(struct inode *, int *, int, int,
57 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
58 static void udf_prealloc_extents(struct inode *, int, int,
59 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
60 static void udf_merge_extents(struct inode *,
61 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
62 static void udf_update_extents(struct inode *,
63 kernel_long_ad [EXTENT_MERGE_SIZE], int, int,
64 kernel_lb_addr, uint32_t, struct buffer_head **);
65 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
68 * udf_delete_inode
70 * PURPOSE
71 * Clean-up before the specified inode is destroyed.
73 * DESCRIPTION
74 * This routine is called when the kernel destroys an inode structure
75 * ie. when iput() finds i_count == 0.
77 * HISTORY
78 * July 1, 1997 - Andrew E. Mileski
79 * Written, tested, and released.
81 * Called at the last iput() if i_nlink is zero.
83 void udf_delete_inode(struct inode * inode)
85 truncate_inode_pages(&inode->i_data, 0);
87 if (is_bad_inode(inode))
88 goto no_delete;
90 inode->i_size = 0;
91 udf_truncate(inode);
92 lock_kernel();
94 udf_update_inode(inode, IS_SYNC(inode));
95 udf_free_inode(inode);
97 unlock_kernel();
98 return;
99 no_delete:
100 clear_inode(inode);
103 void udf_clear_inode(struct inode *inode)
105 if (!(inode->i_sb->s_flags & MS_RDONLY)) {
106 lock_kernel();
107 udf_discard_prealloc(inode);
108 unlock_kernel();
111 kfree(UDF_I_DATA(inode));
112 UDF_I_DATA(inode) = NULL;
115 static int udf_writepage(struct page *page, struct writeback_control *wbc)
117 return block_write_full_page(page, udf_get_block, wbc);
120 static int udf_readpage(struct file *file, struct page *page)
122 return block_read_full_page(page, udf_get_block);
125 static int udf_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
127 return block_prepare_write(page, from, to, udf_get_block);
130 static sector_t udf_bmap(struct address_space *mapping, sector_t block)
132 return generic_block_bmap(mapping,block,udf_get_block);
135 const struct address_space_operations udf_aops = {
136 .readpage = udf_readpage,
137 .writepage = udf_writepage,
138 .sync_page = block_sync_page,
139 .prepare_write = udf_prepare_write,
140 .commit_write = generic_commit_write,
141 .bmap = udf_bmap,
144 void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
146 struct page *page;
147 char *kaddr;
148 struct writeback_control udf_wbc = {
149 .sync_mode = WB_SYNC_NONE,
150 .nr_to_write = 1,
153 /* from now on we have normal address_space methods */
154 inode->i_data.a_ops = &udf_aops;
156 if (!UDF_I_LENALLOC(inode))
158 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
159 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
160 else
161 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
162 mark_inode_dirty(inode);
163 return;
166 page = grab_cache_page(inode->i_mapping, 0);
167 BUG_ON(!PageLocked(page));
169 if (!PageUptodate(page))
171 kaddr = kmap(page);
172 memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
173 PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
174 memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode),
175 UDF_I_LENALLOC(inode));
176 flush_dcache_page(page);
177 SetPageUptodate(page);
178 kunmap(page);
180 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00,
181 UDF_I_LENALLOC(inode));
182 UDF_I_LENALLOC(inode) = 0;
183 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
184 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
185 else
186 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
188 inode->i_data.a_ops->writepage(page, &udf_wbc);
189 page_cache_release(page);
191 mark_inode_dirty(inode);
194 struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err)
196 int newblock;
197 struct buffer_head *sbh = NULL, *dbh = NULL;
198 kernel_lb_addr bloc, eloc;
199 uint32_t elen, extoffset;
200 uint8_t alloctype;
202 struct udf_fileident_bh sfibh, dfibh;
203 loff_t f_pos = udf_ext0_offset(inode) >> 2;
204 int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
205 struct fileIdentDesc cfi, *sfi, *dfi;
207 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
208 alloctype = ICBTAG_FLAG_AD_SHORT;
209 else
210 alloctype = ICBTAG_FLAG_AD_LONG;
212 if (!inode->i_size)
214 UDF_I_ALLOCTYPE(inode) = alloctype;
215 mark_inode_dirty(inode);
216 return NULL;
219 /* alloc block, and copy data to it */
220 *block = udf_new_block(inode->i_sb, inode,
221 UDF_I_LOCATION(inode).partitionReferenceNum,
222 UDF_I_LOCATION(inode).logicalBlockNum, err);
224 if (!(*block))
225 return NULL;
226 newblock = udf_get_pblock(inode->i_sb, *block,
227 UDF_I_LOCATION(inode).partitionReferenceNum, 0);
228 if (!newblock)
229 return NULL;
230 dbh = udf_tgetblk(inode->i_sb, newblock);
231 if (!dbh)
232 return NULL;
233 lock_buffer(dbh);
234 memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
235 set_buffer_uptodate(dbh);
236 unlock_buffer(dbh);
237 mark_buffer_dirty_inode(dbh, inode);
239 sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
240 sbh = sfibh.sbh = sfibh.ebh = NULL;
241 dfibh.soffset = dfibh.eoffset = 0;
242 dfibh.sbh = dfibh.ebh = dbh;
243 while ( (f_pos < size) )
245 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
246 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL, NULL, NULL);
247 if (!sfi)
249 udf_release_data(dbh);
250 return NULL;
252 UDF_I_ALLOCTYPE(inode) = alloctype;
253 sfi->descTag.tagLocation = cpu_to_le32(*block);
254 dfibh.soffset = dfibh.eoffset;
255 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
256 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
257 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
258 sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse)))
260 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
261 udf_release_data(dbh);
262 return NULL;
265 mark_buffer_dirty_inode(dbh, inode);
267 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode));
268 UDF_I_LENALLOC(inode) = 0;
269 bloc = UDF_I_LOCATION(inode);
270 eloc.logicalBlockNum = *block;
271 eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
272 elen = inode->i_size;
273 UDF_I_LENEXTENTS(inode) = elen;
274 extoffset = udf_file_entry_alloc_offset(inode);
275 udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &sbh, 0);
276 /* UniqueID stuff */
278 udf_release_data(sbh);
279 mark_inode_dirty(inode);
280 return dbh;
283 static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
285 int err, new;
286 struct buffer_head *bh;
287 unsigned long phys;
289 if (!create)
291 phys = udf_block_map(inode, block);
292 if (phys)
293 map_bh(bh_result, inode->i_sb, phys);
294 return 0;
297 err = -EIO;
298 new = 0;
299 bh = NULL;
301 lock_kernel();
303 if (block < 0)
304 goto abort_negative;
306 if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1)
308 UDF_I_NEXT_ALLOC_BLOCK(inode) ++;
309 UDF_I_NEXT_ALLOC_GOAL(inode) ++;
312 err = 0;
314 bh = inode_getblk(inode, block, &err, &phys, &new);
315 BUG_ON(bh);
316 if (err)
317 goto abort;
318 BUG_ON(!phys);
320 if (new)
321 set_buffer_new(bh_result);
322 map_bh(bh_result, inode->i_sb, phys);
323 abort:
324 unlock_kernel();
325 return err;
327 abort_negative:
328 udf_warning(inode->i_sb, "udf_get_block", "block < 0");
329 goto abort;
332 static struct buffer_head *
333 udf_getblk(struct inode *inode, long block, int create, int *err)
335 struct buffer_head dummy;
337 dummy.b_state = 0;
338 dummy.b_blocknr = -1000;
339 *err = udf_get_block(inode, block, &dummy, create);
340 if (!*err && buffer_mapped(&dummy))
342 struct buffer_head *bh;
343 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
344 if (buffer_new(&dummy))
346 lock_buffer(bh);
347 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
348 set_buffer_uptodate(bh);
349 unlock_buffer(bh);
350 mark_buffer_dirty_inode(bh, inode);
352 return bh;
354 return NULL;
357 static struct buffer_head * inode_getblk(struct inode * inode, sector_t block,
358 int *err, long *phys, int *new)
360 struct buffer_head *pbh = NULL, *cbh = NULL, *nbh = NULL, *result = NULL;
361 kernel_long_ad laarr[EXTENT_MERGE_SIZE];
362 uint32_t pextoffset = 0, cextoffset = 0, nextoffset = 0;
363 int count = 0, startnum = 0, endnum = 0;
364 uint32_t elen = 0;
365 kernel_lb_addr eloc, pbloc, cbloc, nbloc;
366 int c = 1;
367 loff_t lbcount = 0, b_off = 0;
368 uint32_t newblocknum, newblock;
369 sector_t offset = 0;
370 int8_t etype;
371 int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum;
372 char lastblock = 0;
374 pextoffset = cextoffset = nextoffset = udf_file_entry_alloc_offset(inode);
375 b_off = (loff_t)block << inode->i_sb->s_blocksize_bits;
376 pbloc = cbloc = nbloc = UDF_I_LOCATION(inode);
378 /* find the extent which contains the block we are looking for.
379 alternate between laarr[0] and laarr[1] for locations of the
380 current extent, and the previous extent */
383 if (pbh != cbh)
385 udf_release_data(pbh);
386 atomic_inc(&cbh->b_count);
387 pbh = cbh;
389 if (cbh != nbh)
391 udf_release_data(cbh);
392 atomic_inc(&nbh->b_count);
393 cbh = nbh;
396 lbcount += elen;
398 pbloc = cbloc;
399 cbloc = nbloc;
401 pextoffset = cextoffset;
402 cextoffset = nextoffset;
404 if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) == -1)
405 break;
407 c = !c;
409 laarr[c].extLength = (etype << 30) | elen;
410 laarr[c].extLocation = eloc;
412 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
413 pgoal = eloc.logicalBlockNum +
414 ((elen + inode->i_sb->s_blocksize - 1) >>
415 inode->i_sb->s_blocksize_bits);
417 count ++;
418 } while (lbcount + elen <= b_off);
420 b_off -= lbcount;
421 offset = b_off >> inode->i_sb->s_blocksize_bits;
423 /* if the extent is allocated and recorded, return the block
424 if the extent is not a multiple of the blocksize, round up */
426 if (etype == (EXT_RECORDED_ALLOCATED >> 30))
428 if (elen & (inode->i_sb->s_blocksize - 1))
430 elen = EXT_RECORDED_ALLOCATED |
431 ((elen + inode->i_sb->s_blocksize - 1) &
432 ~(inode->i_sb->s_blocksize - 1));
433 etype = udf_write_aext(inode, nbloc, &cextoffset, eloc, elen, nbh, 1);
435 udf_release_data(pbh);
436 udf_release_data(cbh);
437 udf_release_data(nbh);
438 newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
439 *phys = newblock;
440 return NULL;
443 if (etype == -1)
445 endnum = startnum = ((count > 1) ? 1 : count);
446 if (laarr[c].extLength & (inode->i_sb->s_blocksize - 1))
448 laarr[c].extLength =
449 (laarr[c].extLength & UDF_EXTENT_FLAG_MASK) |
450 (((laarr[c].extLength & UDF_EXTENT_LENGTH_MASK) +
451 inode->i_sb->s_blocksize - 1) &
452 ~(inode->i_sb->s_blocksize - 1));
453 UDF_I_LENEXTENTS(inode) =
454 (UDF_I_LENEXTENTS(inode) + inode->i_sb->s_blocksize - 1) &
455 ~(inode->i_sb->s_blocksize - 1);
457 c = !c;
458 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
459 ((offset + 1) << inode->i_sb->s_blocksize_bits);
460 memset(&laarr[c].extLocation, 0x00, sizeof(kernel_lb_addr));
461 count ++;
462 endnum ++;
463 lastblock = 1;
465 else
466 endnum = startnum = ((count > 2) ? 2 : count);
468 /* if the current extent is in position 0, swap it with the previous */
469 if (!c && count != 1)
471 laarr[2] = laarr[0];
472 laarr[0] = laarr[1];
473 laarr[1] = laarr[2];
474 c = 1;
477 /* if the current block is located in a extent, read the next extent */
478 if (etype != -1)
480 if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 0)) != -1)
482 laarr[c+1].extLength = (etype << 30) | elen;
483 laarr[c+1].extLocation = eloc;
484 count ++;
485 startnum ++;
486 endnum ++;
488 else
489 lastblock = 1;
491 udf_release_data(cbh);
492 udf_release_data(nbh);
494 /* if the current extent is not recorded but allocated, get the
495 block in the extent corresponding to the requested block */
496 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
497 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
498 else /* otherwise, allocate a new block */
500 if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
501 goal = UDF_I_NEXT_ALLOC_GOAL(inode);
503 if (!goal)
505 if (!(goal = pgoal))
506 goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
509 if (!(newblocknum = udf_new_block(inode->i_sb, inode,
510 UDF_I_LOCATION(inode).partitionReferenceNum, goal, err)))
512 udf_release_data(pbh);
513 *err = -ENOSPC;
514 return NULL;
516 UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize;
519 /* if the extent the requsted block is located in contains multiple blocks,
520 split the extent into at most three extents. blocks prior to requested
521 block, requested block, and blocks after requested block */
522 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
524 #ifdef UDF_PREALLOCATE
525 /* preallocate blocks */
526 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
527 #endif
529 /* merge any continuous blocks in laarr */
530 udf_merge_extents(inode, laarr, &endnum);
532 /* write back the new extents, inserting new extents if the new number
533 of extents is greater than the old number, and deleting extents if
534 the new number of extents is less than the old number */
535 udf_update_extents(inode, laarr, startnum, endnum, pbloc, pextoffset, &pbh);
537 udf_release_data(pbh);
539 if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
540 UDF_I_LOCATION(inode).partitionReferenceNum, 0)))
542 return NULL;
544 *phys = newblock;
545 *err = 0;
546 *new = 1;
547 UDF_I_NEXT_ALLOC_BLOCK(inode) = block;
548 UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum;
549 inode->i_ctime = current_fs_time(inode->i_sb);
551 if (IS_SYNC(inode))
552 udf_sync_inode(inode);
553 else
554 mark_inode_dirty(inode);
555 return result;
558 static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum,
559 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
561 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
562 (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
564 int curr = *c;
565 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
566 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
567 int8_t etype = (laarr[curr].extLength >> 30);
569 if (blen == 1)
571 else if (!offset || blen == offset + 1)
573 laarr[curr+2] = laarr[curr+1];
574 laarr[curr+1] = laarr[curr];
576 else
578 laarr[curr+3] = laarr[curr+1];
579 laarr[curr+2] = laarr[curr+1] = laarr[curr];
582 if (offset)
584 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
586 udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset);
587 laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
588 (offset << inode->i_sb->s_blocksize_bits);
589 laarr[curr].extLocation.logicalBlockNum = 0;
590 laarr[curr].extLocation.partitionReferenceNum = 0;
592 else
593 laarr[curr].extLength = (etype << 30) |
594 (offset << inode->i_sb->s_blocksize_bits);
595 curr ++;
596 (*c) ++;
597 (*endnum) ++;
600 laarr[curr].extLocation.logicalBlockNum = newblocknum;
601 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
602 laarr[curr].extLocation.partitionReferenceNum =
603 UDF_I_LOCATION(inode).partitionReferenceNum;
604 laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
605 inode->i_sb->s_blocksize;
606 curr ++;
608 if (blen != offset + 1)
610 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
611 laarr[curr].extLocation.logicalBlockNum += (offset + 1);
612 laarr[curr].extLength = (etype << 30) |
613 ((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
614 curr ++;
615 (*endnum) ++;
620 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
621 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
623 int start, length = 0, currlength = 0, i;
625 if (*endnum >= (c+1))
627 if (!lastblock)
628 return;
629 else
630 start = c;
632 else
634 if ((laarr[c+1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
636 start = c+1;
637 length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) +
638 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
640 else
641 start = c;
644 for (i=start+1; i<=*endnum; i++)
646 if (i == *endnum)
648 if (lastblock)
649 length += UDF_DEFAULT_PREALLOC_BLOCKS;
651 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
652 length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
653 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
654 else
655 break;
658 if (length)
660 int next = laarr[start].extLocation.logicalBlockNum +
661 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
662 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
663 int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
664 laarr[start].extLocation.partitionReferenceNum,
665 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
666 UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
668 if (numalloc)
670 if (start == (c+1))
671 laarr[start].extLength +=
672 (numalloc << inode->i_sb->s_blocksize_bits);
673 else
675 memmove(&laarr[c+2], &laarr[c+1],
676 sizeof(long_ad) * (*endnum - (c+1)));
677 (*endnum) ++;
678 laarr[c+1].extLocation.logicalBlockNum = next;
679 laarr[c+1].extLocation.partitionReferenceNum =
680 laarr[c].extLocation.partitionReferenceNum;
681 laarr[c+1].extLength = EXT_NOT_RECORDED_ALLOCATED |
682 (numalloc << inode->i_sb->s_blocksize_bits);
683 start = c+1;
686 for (i=start+1; numalloc && i<*endnum; i++)
688 int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
689 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
691 if (elen > numalloc)
693 laarr[i].extLength -=
694 (numalloc << inode->i_sb->s_blocksize_bits);
695 numalloc = 0;
697 else
699 numalloc -= elen;
700 if (*endnum > (i+1))
701 memmove(&laarr[i], &laarr[i+1],
702 sizeof(long_ad) * (*endnum - (i+1)));
703 i --;
704 (*endnum) --;
707 UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits;
712 static void udf_merge_extents(struct inode *inode,
713 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
715 int i;
717 for (i=0; i<(*endnum-1); i++)
719 if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30))
721 if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
722 ((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
723 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
724 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits)))
726 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
727 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
728 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
730 laarr[i+1].extLength = (laarr[i+1].extLength -
731 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
732 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
733 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
734 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
735 laarr[i+1].extLocation.logicalBlockNum =
736 laarr[i].extLocation.logicalBlockNum +
737 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
738 inode->i_sb->s_blocksize_bits);
740 else
742 laarr[i].extLength = laarr[i+1].extLength +
743 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
744 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
745 if (*endnum > (i+2))
746 memmove(&laarr[i+1], &laarr[i+2],
747 sizeof(long_ad) * (*endnum - (i+2)));
748 i --;
749 (*endnum) --;
753 else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
754 ((laarr[i+1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)))
756 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
757 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
758 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
759 laarr[i].extLocation.logicalBlockNum = 0;
760 laarr[i].extLocation.partitionReferenceNum = 0;
762 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
763 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
764 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
766 laarr[i+1].extLength = (laarr[i+1].extLength -
767 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
768 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
769 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
770 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
772 else
774 laarr[i].extLength = laarr[i+1].extLength +
775 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
776 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
777 if (*endnum > (i+2))
778 memmove(&laarr[i+1], &laarr[i+2],
779 sizeof(long_ad) * (*endnum - (i+2)));
780 i --;
781 (*endnum) --;
784 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
786 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
787 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
788 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
789 laarr[i].extLocation.logicalBlockNum = 0;
790 laarr[i].extLocation.partitionReferenceNum = 0;
791 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) |
792 EXT_NOT_RECORDED_NOT_ALLOCATED;
797 static void udf_update_extents(struct inode *inode,
798 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum,
799 kernel_lb_addr pbloc, uint32_t pextoffset, struct buffer_head **pbh)
801 int start = 0, i;
802 kernel_lb_addr tmploc;
803 uint32_t tmplen;
805 if (startnum > endnum)
807 for (i=0; i<(startnum-endnum); i++)
809 udf_delete_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
810 laarr[i].extLength, *pbh);
813 else if (startnum < endnum)
815 for (i=0; i<(endnum-startnum); i++)
817 udf_insert_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
818 laarr[i].extLength, *pbh);
819 udf_next_aext(inode, &pbloc, &pextoffset, &laarr[i].extLocation,
820 &laarr[i].extLength, pbh, 1);
821 start ++;
825 for (i=start; i<endnum; i++)
827 udf_next_aext(inode, &pbloc, &pextoffset, &tmploc, &tmplen, pbh, 0);
828 udf_write_aext(inode, pbloc, &pextoffset, laarr[i].extLocation,
829 laarr[i].extLength, *pbh, 1);
833 struct buffer_head * udf_bread(struct inode * inode, int block,
834 int create, int * err)
836 struct buffer_head * bh = NULL;
838 bh = udf_getblk(inode, block, create, err);
839 if (!bh)
840 return NULL;
842 if (buffer_uptodate(bh))
843 return bh;
844 ll_rw_block(READ, 1, &bh);
845 wait_on_buffer(bh);
846 if (buffer_uptodate(bh))
847 return bh;
848 brelse(bh);
849 *err = -EIO;
850 return NULL;
853 void udf_truncate(struct inode * inode)
855 int offset;
856 int err;
858 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
859 S_ISLNK(inode->i_mode)))
860 return;
861 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
862 return;
864 lock_kernel();
865 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
867 if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
868 inode->i_size))
870 udf_expand_file_adinicb(inode, inode->i_size, &err);
871 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
873 inode->i_size = UDF_I_LENALLOC(inode);
874 unlock_kernel();
875 return;
877 else
878 udf_truncate_extents(inode);
880 else
882 offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
883 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode));
884 UDF_I_LENALLOC(inode) = inode->i_size;
887 else
889 block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block);
890 udf_truncate_extents(inode);
893 inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
894 if (IS_SYNC(inode))
895 udf_sync_inode (inode);
896 else
897 mark_inode_dirty(inode);
898 unlock_kernel();
901 static void
902 __udf_read_inode(struct inode *inode)
904 struct buffer_head *bh = NULL;
905 struct fileEntry *fe;
906 uint16_t ident;
909 * Set defaults, but the inode is still incomplete!
910 * Note: get_new_inode() sets the following on a new inode:
911 * i_sb = sb
912 * i_no = ino
913 * i_flags = sb->s_flags
914 * i_state = 0
915 * clean_inode(): zero fills and sets
916 * i_count = 1
917 * i_nlink = 1
918 * i_op = NULL;
920 bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
922 if (!bh)
924 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
925 inode->i_ino);
926 make_bad_inode(inode);
927 return;
930 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
931 ident != TAG_IDENT_USE)
933 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
934 inode->i_ino, ident);
935 udf_release_data(bh);
936 make_bad_inode(inode);
937 return;
940 fe = (struct fileEntry *)bh->b_data;
942 if (le16_to_cpu(fe->icbTag.strategyType) == 4096)
944 struct buffer_head *ibh = NULL, *nbh = NULL;
945 struct indirectEntry *ie;
947 ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
948 if (ident == TAG_IDENT_IE)
950 if (ibh)
952 kernel_lb_addr loc;
953 ie = (struct indirectEntry *)ibh->b_data;
955 loc = lelb_to_cpu(ie->indirectICB.extLocation);
957 if (ie->indirectICB.extLength &&
958 (nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident)))
960 if (ident == TAG_IDENT_FE ||
961 ident == TAG_IDENT_EFE)
963 memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(kernel_lb_addr));
964 udf_release_data(bh);
965 udf_release_data(ibh);
966 udf_release_data(nbh);
967 __udf_read_inode(inode);
968 return;
970 else
972 udf_release_data(nbh);
973 udf_release_data(ibh);
976 else
977 udf_release_data(ibh);
980 else
981 udf_release_data(ibh);
983 else if (le16_to_cpu(fe->icbTag.strategyType) != 4)
985 printk(KERN_ERR "udf: unsupported strategy type: %d\n",
986 le16_to_cpu(fe->icbTag.strategyType));
987 udf_release_data(bh);
988 make_bad_inode(inode);
989 return;
991 udf_fill_inode(inode, bh);
992 udf_release_data(bh);
995 static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
997 struct fileEntry *fe;
998 struct extendedFileEntry *efe;
999 time_t convtime;
1000 long convtime_usec;
1001 int offset;
1003 fe = (struct fileEntry *)bh->b_data;
1004 efe = (struct extendedFileEntry *)bh->b_data;
1006 if (le16_to_cpu(fe->icbTag.strategyType) == 4)
1007 UDF_I_STRAT4096(inode) = 0;
1008 else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
1009 UDF_I_STRAT4096(inode) = 1;
1011 UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
1012 UDF_I_UNIQUE(inode) = 0;
1013 UDF_I_LENEATTR(inode) = 0;
1014 UDF_I_LENEXTENTS(inode) = 0;
1015 UDF_I_LENALLOC(inode) = 0;
1016 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
1017 UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
1018 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE)
1020 UDF_I_EFE(inode) = 1;
1021 UDF_I_USE(inode) = 0;
1022 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
1023 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1025 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE)
1027 UDF_I_EFE(inode) = 0;
1028 UDF_I_USE(inode) = 0;
1029 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
1030 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1032 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1034 UDF_I_EFE(inode) = 0;
1035 UDF_I_USE(inode) = 1;
1036 UDF_I_LENALLOC(inode) =
1037 le32_to_cpu(
1038 ((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
1039 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry), GFP_KERNEL);
1040 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1041 return;
1044 inode->i_uid = le32_to_cpu(fe->uid);
1045 if (inode->i_uid == -1 || UDF_QUERY_FLAG(inode->i_sb,
1046 UDF_FLAG_UID_IGNORE))
1047 inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
1049 inode->i_gid = le32_to_cpu(fe->gid);
1050 if (inode->i_gid == -1 || UDF_QUERY_FLAG(inode->i_sb,
1051 UDF_FLAG_GID_IGNORE))
1052 inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
1054 inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
1055 if (!inode->i_nlink)
1056 inode->i_nlink = 1;
1058 inode->i_size = le64_to_cpu(fe->informationLength);
1059 UDF_I_LENEXTENTS(inode) = inode->i_size;
1061 inode->i_mode = udf_convert_permissions(fe);
1062 inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
1064 if (UDF_I_EFE(inode) == 0)
1066 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1067 (inode->i_sb->s_blocksize_bits - 9);
1069 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1070 lets_to_cpu(fe->accessTime)) )
1072 inode->i_atime.tv_sec = convtime;
1073 inode->i_atime.tv_nsec = convtime_usec * 1000;
1075 else
1077 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1080 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1081 lets_to_cpu(fe->modificationTime)) )
1083 inode->i_mtime.tv_sec = convtime;
1084 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1086 else
1088 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1091 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1092 lets_to_cpu(fe->attrTime)) )
1094 inode->i_ctime.tv_sec = convtime;
1095 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1097 else
1099 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1102 UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
1103 UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
1104 UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
1105 offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
1107 else
1109 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1110 (inode->i_sb->s_blocksize_bits - 9);
1112 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1113 lets_to_cpu(efe->accessTime)) )
1115 inode->i_atime.tv_sec = convtime;
1116 inode->i_atime.tv_nsec = convtime_usec * 1000;
1118 else
1120 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1123 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1124 lets_to_cpu(efe->modificationTime)) )
1126 inode->i_mtime.tv_sec = convtime;
1127 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1129 else
1131 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1134 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1135 lets_to_cpu(efe->createTime)) )
1137 UDF_I_CRTIME(inode).tv_sec = convtime;
1138 UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000;
1140 else
1142 UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb);
1145 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1146 lets_to_cpu(efe->attrTime)) )
1148 inode->i_ctime.tv_sec = convtime;
1149 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1151 else
1153 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1156 UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
1157 UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
1158 UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
1159 offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
1162 switch (fe->icbTag.fileType)
1164 case ICBTAG_FILE_TYPE_DIRECTORY:
1166 inode->i_op = &udf_dir_inode_operations;
1167 inode->i_fop = &udf_dir_operations;
1168 inode->i_mode |= S_IFDIR;
1169 inc_nlink(inode);
1170 break;
1172 case ICBTAG_FILE_TYPE_REALTIME:
1173 case ICBTAG_FILE_TYPE_REGULAR:
1174 case ICBTAG_FILE_TYPE_UNDEF:
1176 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
1177 inode->i_data.a_ops = &udf_adinicb_aops;
1178 else
1179 inode->i_data.a_ops = &udf_aops;
1180 inode->i_op = &udf_file_inode_operations;
1181 inode->i_fop = &udf_file_operations;
1182 inode->i_mode |= S_IFREG;
1183 break;
1185 case ICBTAG_FILE_TYPE_BLOCK:
1187 inode->i_mode |= S_IFBLK;
1188 break;
1190 case ICBTAG_FILE_TYPE_CHAR:
1192 inode->i_mode |= S_IFCHR;
1193 break;
1195 case ICBTAG_FILE_TYPE_FIFO:
1197 init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1198 break;
1200 case ICBTAG_FILE_TYPE_SOCKET:
1202 init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1203 break;
1205 case ICBTAG_FILE_TYPE_SYMLINK:
1207 inode->i_data.a_ops = &udf_symlink_aops;
1208 inode->i_op = &page_symlink_inode_operations;
1209 inode->i_mode = S_IFLNK|S_IRWXUGO;
1210 break;
1212 default:
1214 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
1215 inode->i_ino, fe->icbTag.fileType);
1216 make_bad_inode(inode);
1217 return;
1220 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1222 struct deviceSpec *dsea =
1223 (struct deviceSpec *)
1224 udf_get_extendedattr(inode, 12, 1);
1226 if (dsea)
1228 init_special_inode(inode, inode->i_mode, MKDEV(
1229 le32_to_cpu(dsea->majorDeviceIdent),
1230 le32_to_cpu(dsea->minorDeviceIdent)));
1231 /* Developer ID ??? */
1233 else
1235 make_bad_inode(inode);
1240 static mode_t
1241 udf_convert_permissions(struct fileEntry *fe)
1243 mode_t mode;
1244 uint32_t permissions;
1245 uint32_t flags;
1247 permissions = le32_to_cpu(fe->permissions);
1248 flags = le16_to_cpu(fe->icbTag.flags);
1250 mode = (( permissions ) & S_IRWXO) |
1251 (( permissions >> 2 ) & S_IRWXG) |
1252 (( permissions >> 4 ) & S_IRWXU) |
1253 (( flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1254 (( flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1255 (( flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1257 return mode;
1261 * udf_write_inode
1263 * PURPOSE
1264 * Write out the specified inode.
1266 * DESCRIPTION
1267 * This routine is called whenever an inode is synced.
1268 * Currently this routine is just a placeholder.
1270 * HISTORY
1271 * July 1, 1997 - Andrew E. Mileski
1272 * Written, tested, and released.
1275 int udf_write_inode(struct inode * inode, int sync)
1277 int ret;
1278 lock_kernel();
1279 ret = udf_update_inode(inode, sync);
1280 unlock_kernel();
1281 return ret;
1284 int udf_sync_inode(struct inode * inode)
1286 return udf_update_inode(inode, 1);
1289 static int
1290 udf_update_inode(struct inode *inode, int do_sync)
1292 struct buffer_head *bh = NULL;
1293 struct fileEntry *fe;
1294 struct extendedFileEntry *efe;
1295 uint32_t udfperms;
1296 uint16_t icbflags;
1297 uint16_t crclen;
1298 int i;
1299 kernel_timestamp cpu_time;
1300 int err = 0;
1302 bh = udf_tread(inode->i_sb,
1303 udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
1305 if (!bh)
1307 udf_debug("bread failure\n");
1308 return -EIO;
1311 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
1313 fe = (struct fileEntry *)bh->b_data;
1314 efe = (struct extendedFileEntry *)bh->b_data;
1316 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1318 struct unallocSpaceEntry *use =
1319 (struct unallocSpaceEntry *)bh->b_data;
1321 use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1322 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1323 crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
1324 sizeof(tag);
1325 use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1326 use->descTag.descCRCLength = cpu_to_le16(crclen);
1327 use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
1329 use->descTag.tagChecksum = 0;
1330 for (i=0; i<16; i++)
1331 if (i != 4)
1332 use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i];
1334 mark_buffer_dirty(bh);
1335 udf_release_data(bh);
1336 return err;
1339 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
1340 fe->uid = cpu_to_le32(-1);
1341 else fe->uid = cpu_to_le32(inode->i_uid);
1343 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
1344 fe->gid = cpu_to_le32(-1);
1345 else fe->gid = cpu_to_le32(inode->i_gid);
1347 udfperms = ((inode->i_mode & S_IRWXO) ) |
1348 ((inode->i_mode & S_IRWXG) << 2) |
1349 ((inode->i_mode & S_IRWXU) << 4);
1351 udfperms |= (le32_to_cpu(fe->permissions) &
1352 (FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1353 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1354 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1355 fe->permissions = cpu_to_le32(udfperms);
1357 if (S_ISDIR(inode->i_mode))
1358 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1359 else
1360 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1362 fe->informationLength = cpu_to_le64(inode->i_size);
1364 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1366 regid *eid;
1367 struct deviceSpec *dsea =
1368 (struct deviceSpec *)
1369 udf_get_extendedattr(inode, 12, 1);
1371 if (!dsea)
1373 dsea = (struct deviceSpec *)
1374 udf_add_extendedattr(inode,
1375 sizeof(struct deviceSpec) +
1376 sizeof(regid), 12, 0x3);
1377 dsea->attrType = cpu_to_le32(12);
1378 dsea->attrSubtype = 1;
1379 dsea->attrLength = cpu_to_le32(sizeof(struct deviceSpec) +
1380 sizeof(regid));
1381 dsea->impUseLength = cpu_to_le32(sizeof(regid));
1383 eid = (regid *)dsea->impUse;
1384 memset(eid, 0, sizeof(regid));
1385 strcpy(eid->ident, UDF_ID_DEVELOPER);
1386 eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1387 eid->identSuffix[1] = UDF_OS_ID_LINUX;
1388 dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1389 dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1392 if (UDF_I_EFE(inode) == 0)
1394 memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1395 fe->logicalBlocksRecorded = cpu_to_le64(
1396 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1397 (inode->i_sb->s_blocksize_bits - 9));
1399 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1400 fe->accessTime = cpu_to_lets(cpu_time);
1401 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1402 fe->modificationTime = cpu_to_lets(cpu_time);
1403 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1404 fe->attrTime = cpu_to_lets(cpu_time);
1405 memset(&(fe->impIdent), 0, sizeof(regid));
1406 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1407 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1408 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1409 fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1410 fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1411 fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1412 fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1413 crclen = sizeof(struct fileEntry);
1415 else
1417 memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1418 efe->objectSize = cpu_to_le64(inode->i_size);
1419 efe->logicalBlocksRecorded = cpu_to_le64(
1420 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1421 (inode->i_sb->s_blocksize_bits - 9));
1423 if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec ||
1424 (UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec &&
1425 UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec))
1427 UDF_I_CRTIME(inode) = inode->i_atime;
1429 if (UDF_I_CRTIME(inode).tv_sec > inode->i_mtime.tv_sec ||
1430 (UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec &&
1431 UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec))
1433 UDF_I_CRTIME(inode) = inode->i_mtime;
1435 if (UDF_I_CRTIME(inode).tv_sec > inode->i_ctime.tv_sec ||
1436 (UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec &&
1437 UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec))
1439 UDF_I_CRTIME(inode) = inode->i_ctime;
1442 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1443 efe->accessTime = cpu_to_lets(cpu_time);
1444 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1445 efe->modificationTime = cpu_to_lets(cpu_time);
1446 if (udf_time_to_stamp(&cpu_time, UDF_I_CRTIME(inode)))
1447 efe->createTime = cpu_to_lets(cpu_time);
1448 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1449 efe->attrTime = cpu_to_lets(cpu_time);
1451 memset(&(efe->impIdent), 0, sizeof(regid));
1452 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1453 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1454 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1455 efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1456 efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1457 efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1458 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1459 crclen = sizeof(struct extendedFileEntry);
1461 if (UDF_I_STRAT4096(inode))
1463 fe->icbTag.strategyType = cpu_to_le16(4096);
1464 fe->icbTag.strategyParameter = cpu_to_le16(1);
1465 fe->icbTag.numEntries = cpu_to_le16(2);
1467 else
1469 fe->icbTag.strategyType = cpu_to_le16(4);
1470 fe->icbTag.numEntries = cpu_to_le16(1);
1473 if (S_ISDIR(inode->i_mode))
1474 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1475 else if (S_ISREG(inode->i_mode))
1476 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1477 else if (S_ISLNK(inode->i_mode))
1478 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1479 else if (S_ISBLK(inode->i_mode))
1480 fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1481 else if (S_ISCHR(inode->i_mode))
1482 fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1483 else if (S_ISFIFO(inode->i_mode))
1484 fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1485 else if (S_ISSOCK(inode->i_mode))
1486 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1488 icbflags = UDF_I_ALLOCTYPE(inode) |
1489 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1490 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1491 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1492 (le16_to_cpu(fe->icbTag.flags) &
1493 ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1494 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1496 fe->icbTag.flags = cpu_to_le16(icbflags);
1497 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1498 fe->descTag.descVersion = cpu_to_le16(3);
1499 else
1500 fe->descTag.descVersion = cpu_to_le16(2);
1501 fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
1502 fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1503 crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
1504 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1505 fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
1507 fe->descTag.tagChecksum = 0;
1508 for (i=0; i<16; i++)
1509 if (i != 4)
1510 fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i];
1512 /* write the data blocks */
1513 mark_buffer_dirty(bh);
1514 if (do_sync)
1516 sync_dirty_buffer(bh);
1517 if (buffer_req(bh) && !buffer_uptodate(bh))
1519 printk("IO error syncing udf inode [%s:%08lx]\n",
1520 inode->i_sb->s_id, inode->i_ino);
1521 err = -EIO;
1524 udf_release_data(bh);
1525 return err;
1528 struct inode *
1529 udf_iget(struct super_block *sb, kernel_lb_addr ino)
1531 unsigned long block = udf_get_lb_pblock(sb, ino, 0);
1532 struct inode *inode = iget_locked(sb, block);
1534 if (!inode)
1535 return NULL;
1537 if (inode->i_state & I_NEW) {
1538 memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(kernel_lb_addr));
1539 __udf_read_inode(inode);
1540 unlock_new_inode(inode);
1543 if (is_bad_inode(inode))
1544 goto out_iput;
1546 if (ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) {
1547 udf_debug("block=%d, partition=%d out of range\n",
1548 ino.logicalBlockNum, ino.partitionReferenceNum);
1549 make_bad_inode(inode);
1550 goto out_iput;
1553 return inode;
1555 out_iput:
1556 iput(inode);
1557 return NULL;
1560 int8_t udf_add_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1561 kernel_lb_addr eloc, uint32_t elen, struct buffer_head **bh, int inc)
1563 int adsize;
1564 short_ad *sad = NULL;
1565 long_ad *lad = NULL;
1566 struct allocExtDesc *aed;
1567 int8_t etype;
1568 uint8_t *ptr;
1570 if (!*bh)
1571 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1572 else
1573 ptr = (*bh)->b_data + *extoffset;
1575 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1576 adsize = sizeof(short_ad);
1577 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1578 adsize = sizeof(long_ad);
1579 else
1580 return -1;
1582 if (*extoffset + (2 * adsize) > inode->i_sb->s_blocksize)
1584 char *sptr, *dptr;
1585 struct buffer_head *nbh;
1586 int err, loffset;
1587 kernel_lb_addr obloc = *bloc;
1589 if (!(bloc->logicalBlockNum = udf_new_block(inode->i_sb, NULL,
1590 obloc.partitionReferenceNum, obloc.logicalBlockNum, &err)))
1592 return -1;
1594 if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1595 *bloc, 0))))
1597 return -1;
1599 lock_buffer(nbh);
1600 memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
1601 set_buffer_uptodate(nbh);
1602 unlock_buffer(nbh);
1603 mark_buffer_dirty_inode(nbh, inode);
1605 aed = (struct allocExtDesc *)(nbh->b_data);
1606 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
1607 aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
1608 if (*extoffset + adsize > inode->i_sb->s_blocksize)
1610 loffset = *extoffset;
1611 aed->lengthAllocDescs = cpu_to_le32(adsize);
1612 sptr = ptr - adsize;
1613 dptr = nbh->b_data + sizeof(struct allocExtDesc);
1614 memcpy(dptr, sptr, adsize);
1615 *extoffset = sizeof(struct allocExtDesc) + adsize;
1617 else
1619 loffset = *extoffset + adsize;
1620 aed->lengthAllocDescs = cpu_to_le32(0);
1621 sptr = ptr;
1622 *extoffset = sizeof(struct allocExtDesc);
1624 if (*bh)
1626 aed = (struct allocExtDesc *)(*bh)->b_data;
1627 aed->lengthAllocDescs =
1628 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1630 else
1632 UDF_I_LENALLOC(inode) += adsize;
1633 mark_inode_dirty(inode);
1636 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1637 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
1638 bloc->logicalBlockNum, sizeof(tag));
1639 else
1640 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
1641 bloc->logicalBlockNum, sizeof(tag));
1642 switch (UDF_I_ALLOCTYPE(inode))
1644 case ICBTAG_FLAG_AD_SHORT:
1646 sad = (short_ad *)sptr;
1647 sad->extLength = cpu_to_le32(
1648 EXT_NEXT_EXTENT_ALLOCDECS |
1649 inode->i_sb->s_blocksize);
1650 sad->extPosition = cpu_to_le32(bloc->logicalBlockNum);
1651 break;
1653 case ICBTAG_FLAG_AD_LONG:
1655 lad = (long_ad *)sptr;
1656 lad->extLength = cpu_to_le32(
1657 EXT_NEXT_EXTENT_ALLOCDECS |
1658 inode->i_sb->s_blocksize);
1659 lad->extLocation = cpu_to_lelb(*bloc);
1660 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1661 break;
1664 if (*bh)
1666 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1667 udf_update_tag((*bh)->b_data, loffset);
1668 else
1669 udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1670 mark_buffer_dirty_inode(*bh, inode);
1671 udf_release_data(*bh);
1673 else
1674 mark_inode_dirty(inode);
1675 *bh = nbh;
1678 etype = udf_write_aext(inode, *bloc, extoffset, eloc, elen, *bh, inc);
1680 if (!*bh)
1682 UDF_I_LENALLOC(inode) += adsize;
1683 mark_inode_dirty(inode);
1685 else
1687 aed = (struct allocExtDesc *)(*bh)->b_data;
1688 aed->lengthAllocDescs =
1689 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1690 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1691 udf_update_tag((*bh)->b_data, *extoffset + (inc ? 0 : adsize));
1692 else
1693 udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1694 mark_buffer_dirty_inode(*bh, inode);
1697 return etype;
1700 int8_t udf_write_aext(struct inode *inode, kernel_lb_addr bloc, int *extoffset,
1701 kernel_lb_addr eloc, uint32_t elen, struct buffer_head *bh, int inc)
1703 int adsize;
1704 uint8_t *ptr;
1706 if (!bh)
1707 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1708 else
1710 ptr = bh->b_data + *extoffset;
1711 atomic_inc(&bh->b_count);
1714 switch (UDF_I_ALLOCTYPE(inode))
1716 case ICBTAG_FLAG_AD_SHORT:
1718 short_ad *sad = (short_ad *)ptr;
1719 sad->extLength = cpu_to_le32(elen);
1720 sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
1721 adsize = sizeof(short_ad);
1722 break;
1724 case ICBTAG_FLAG_AD_LONG:
1726 long_ad *lad = (long_ad *)ptr;
1727 lad->extLength = cpu_to_le32(elen);
1728 lad->extLocation = cpu_to_lelb(eloc);
1729 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1730 adsize = sizeof(long_ad);
1731 break;
1733 default:
1734 return -1;
1737 if (bh)
1739 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1741 struct allocExtDesc *aed = (struct allocExtDesc *)(bh)->b_data;
1742 udf_update_tag((bh)->b_data,
1743 le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
1745 mark_buffer_dirty_inode(bh, inode);
1746 udf_release_data(bh);
1748 else
1749 mark_inode_dirty(inode);
1751 if (inc)
1752 *extoffset += adsize;
1753 return (elen >> 30);
1756 int8_t udf_next_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1757 kernel_lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1759 int8_t etype;
1761 while ((etype = udf_current_aext(inode, bloc, extoffset, eloc, elen, bh, inc)) ==
1762 (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
1764 *bloc = *eloc;
1765 *extoffset = sizeof(struct allocExtDesc);
1766 udf_release_data(*bh);
1767 if (!(*bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
1769 udf_debug("reading block %d failed!\n",
1770 udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1771 return -1;
1775 return etype;
1778 int8_t udf_current_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1779 kernel_lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1781 int alen;
1782 int8_t etype;
1783 uint8_t *ptr;
1785 if (!*bh)
1787 if (!(*extoffset))
1788 *extoffset = udf_file_entry_alloc_offset(inode);
1789 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1790 alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
1792 else
1794 if (!(*extoffset))
1795 *extoffset = sizeof(struct allocExtDesc);
1796 ptr = (*bh)->b_data + *extoffset;
1797 alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)(*bh)->b_data)->lengthAllocDescs);
1800 switch (UDF_I_ALLOCTYPE(inode))
1802 case ICBTAG_FLAG_AD_SHORT:
1804 short_ad *sad;
1806 if (!(sad = udf_get_fileshortad(ptr, alen, extoffset, inc)))
1807 return -1;
1809 etype = le32_to_cpu(sad->extLength) >> 30;
1810 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1811 eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1812 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1813 break;
1815 case ICBTAG_FLAG_AD_LONG:
1817 long_ad *lad;
1819 if (!(lad = udf_get_filelongad(ptr, alen, extoffset, inc)))
1820 return -1;
1822 etype = le32_to_cpu(lad->extLength) >> 30;
1823 *eloc = lelb_to_cpu(lad->extLocation);
1824 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1825 break;
1827 default:
1829 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1830 return -1;
1834 return etype;
1837 static int8_t
1838 udf_insert_aext(struct inode *inode, kernel_lb_addr bloc, int extoffset,
1839 kernel_lb_addr neloc, uint32_t nelen, struct buffer_head *bh)
1841 kernel_lb_addr oeloc;
1842 uint32_t oelen;
1843 int8_t etype;
1845 if (bh)
1846 atomic_inc(&bh->b_count);
1848 while ((etype = udf_next_aext(inode, &bloc, &extoffset, &oeloc, &oelen, &bh, 0)) != -1)
1850 udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1);
1852 neloc = oeloc;
1853 nelen = (etype << 30) | oelen;
1855 udf_add_aext(inode, &bloc, &extoffset, neloc, nelen, &bh, 1);
1856 udf_release_data(bh);
1857 return (nelen >> 30);
1860 int8_t udf_delete_aext(struct inode *inode, kernel_lb_addr nbloc, int nextoffset,
1861 kernel_lb_addr eloc, uint32_t elen, struct buffer_head *nbh)
1863 struct buffer_head *obh;
1864 kernel_lb_addr obloc;
1865 int oextoffset, adsize;
1866 int8_t etype;
1867 struct allocExtDesc *aed;
1869 if (nbh)
1871 atomic_inc(&nbh->b_count);
1872 atomic_inc(&nbh->b_count);
1875 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1876 adsize = sizeof(short_ad);
1877 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1878 adsize = sizeof(long_ad);
1879 else
1880 adsize = 0;
1882 obh = nbh;
1883 obloc = nbloc;
1884 oextoffset = nextoffset;
1886 if (udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1) == -1)
1887 return -1;
1889 while ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) != -1)
1891 udf_write_aext(inode, obloc, &oextoffset, eloc, (etype << 30) | elen, obh, 1);
1892 if (obh != nbh)
1894 obloc = nbloc;
1895 udf_release_data(obh);
1896 atomic_inc(&nbh->b_count);
1897 obh = nbh;
1898 oextoffset = nextoffset - adsize;
1901 memset(&eloc, 0x00, sizeof(kernel_lb_addr));
1902 elen = 0;
1904 if (nbh != obh)
1906 udf_free_blocks(inode->i_sb, inode, nbloc, 0, 1);
1907 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1908 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1909 if (!obh)
1911 UDF_I_LENALLOC(inode) -= (adsize * 2);
1912 mark_inode_dirty(inode);
1914 else
1916 aed = (struct allocExtDesc *)(obh)->b_data;
1917 aed->lengthAllocDescs =
1918 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize));
1919 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1920 udf_update_tag((obh)->b_data, oextoffset - (2*adsize));
1921 else
1922 udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
1923 mark_buffer_dirty_inode(obh, inode);
1926 else
1928 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1929 if (!obh)
1931 UDF_I_LENALLOC(inode) -= adsize;
1932 mark_inode_dirty(inode);
1934 else
1936 aed = (struct allocExtDesc *)(obh)->b_data;
1937 aed->lengthAllocDescs =
1938 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
1939 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1940 udf_update_tag((obh)->b_data, oextoffset - adsize);
1941 else
1942 udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
1943 mark_buffer_dirty_inode(obh, inode);
1947 udf_release_data(nbh);
1948 udf_release_data(obh);
1949 return (elen >> 30);
1952 int8_t inode_bmap(struct inode *inode, sector_t block, kernel_lb_addr *bloc, uint32_t *extoffset,
1953 kernel_lb_addr *eloc, uint32_t *elen, sector_t *offset, struct buffer_head **bh)
1955 loff_t lbcount = 0, bcount = (loff_t)block << inode->i_sb->s_blocksize_bits;
1956 int8_t etype;
1958 if (block < 0)
1960 printk(KERN_ERR "udf: inode_bmap: block < 0\n");
1961 return -1;
1964 *extoffset = 0;
1965 *elen = 0;
1966 *bloc = UDF_I_LOCATION(inode);
1970 if ((etype = udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, 1)) == -1)
1972 *offset = (bcount - lbcount) >> inode->i_sb->s_blocksize_bits;
1973 UDF_I_LENEXTENTS(inode) = lbcount;
1974 return -1;
1976 lbcount += *elen;
1977 } while (lbcount <= bcount);
1979 *offset = (bcount + *elen - lbcount) >> inode->i_sb->s_blocksize_bits;
1981 return etype;
1984 long udf_block_map(struct inode *inode, sector_t block)
1986 kernel_lb_addr eloc, bloc;
1987 uint32_t extoffset, elen;
1988 sector_t offset;
1989 struct buffer_head *bh = NULL;
1990 int ret;
1992 lock_kernel();
1994 if (inode_bmap(inode, block, &bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
1995 ret = udf_get_lb_pblock(inode->i_sb, eloc, offset);
1996 else
1997 ret = 0;
1999 unlock_kernel();
2000 udf_release_data(bh);
2002 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2003 return udf_fixed_to_variable(ret);
2004 else
2005 return ret;