initial commit with v2.6.9
[linux-2.6.9-moxart.git] / fs / udf / inode.c
blob68245ca6ee555f6bdbf22c9676bd268372f1b4eb
1 /*
2 * inode.c
4 * PURPOSE
5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
7 * CONTACTS
8 * E-mail regarding any portion of the Linux UDF file system should be
9 * directed to the development team mailing list (run by majordomo):
10 * linux_udf@hpesjro.fc.hp.com
12 * COPYRIGHT
13 * This file is distributed under the terms of the GNU General Public
14 * License (GPL). Copies of the GPL can be obtained from:
15 * ftp://prep.ai.mit.edu/pub/gnu/GPL
16 * Each contributing author retains all rights to their own work.
18 * (C) 1998 Dave Boynton
19 * (C) 1998-2004 Ben Fennema
20 * (C) 1999-2000 Stelias Computing Inc
22 * HISTORY
24 * 10/04/98 dgb Added rudimentary directory functions
25 * 10/07/98 Fully working udf_block_map! It works!
26 * 11/25/98 bmap altered to better support extents
27 * 12/06/98 blf partition support in udf_iget, udf_block_map and udf_read_inode
28 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
29 * block boundaries (which is not actually allowed)
30 * 12/20/98 added support for strategy 4096
31 * 03/07/99 rewrote udf_block_map (again)
32 * New funcs, inode_bmap, udf_next_aext
33 * 04/19/99 Support for writing device EA's for major/minor #
36 #include "udfdecl.h"
37 #include <linux/mm.h>
38 #include <linux/smp_lock.h>
39 #include <linux/module.h>
40 #include <linux/pagemap.h>
41 #include <linux/buffer_head.h>
42 #include <linux/writeback.h>
43 #include <linux/slab.h>
45 #include "udf_i.h"
46 #include "udf_sb.h"
48 MODULE_AUTHOR("Ben Fennema");
49 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
50 MODULE_LICENSE("GPL");
52 #define EXTENT_MERGE_SIZE 5
54 static mode_t udf_convert_permissions(struct fileEntry *);
55 static int udf_update_inode(struct inode *, int);
56 static void udf_fill_inode(struct inode *, struct buffer_head *);
57 static struct buffer_head *inode_getblk(struct inode *, long, int *,
58 long *, int *);
59 static int8_t udf_insert_aext(struct inode *, kernel_lb_addr, int,
60 kernel_lb_addr, uint32_t, struct buffer_head *);
61 static void udf_split_extents(struct inode *, int *, int, int,
62 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
63 static void udf_prealloc_extents(struct inode *, int, int,
64 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
65 static void udf_merge_extents(struct inode *,
66 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
67 static void udf_update_extents(struct inode *,
68 kernel_long_ad [EXTENT_MERGE_SIZE], int, int,
69 kernel_lb_addr, uint32_t, struct buffer_head **);
70 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
73 * udf_put_inode
75 * PURPOSE
77 * DESCRIPTION
78 * This routine is called whenever the kernel no longer needs the inode.
80 * HISTORY
81 * July 1, 1997 - Andrew E. Mileski
82 * Written, tested, and released.
84 * Called at each iput()
86 void udf_put_inode(struct inode * inode)
88 if (!(inode->i_sb->s_flags & MS_RDONLY))
90 lock_kernel();
91 udf_discard_prealloc(inode);
92 unlock_kernel();
97 * udf_delete_inode
99 * PURPOSE
100 * Clean-up before the specified inode is destroyed.
102 * DESCRIPTION
103 * This routine is called when the kernel destroys an inode structure
104 * ie. when iput() finds i_count == 0.
106 * HISTORY
107 * July 1, 1997 - Andrew E. Mileski
108 * Written, tested, and released.
110 * Called at the last iput() if i_nlink is zero.
112 void udf_delete_inode(struct inode * inode)
114 if (is_bad_inode(inode))
115 goto no_delete;
117 inode->i_size = 0;
118 udf_truncate(inode);
119 lock_kernel();
121 udf_update_inode(inode, IS_SYNC(inode));
122 udf_free_inode(inode);
124 unlock_kernel();
125 return;
126 no_delete:
127 clear_inode(inode);
130 void udf_clear_inode(struct inode *inode)
132 kfree(UDF_I_DATA(inode));
133 UDF_I_DATA(inode) = NULL;
136 static int udf_writepage(struct page *page, struct writeback_control *wbc)
138 return block_write_full_page(page, udf_get_block, wbc);
141 static int udf_readpage(struct file *file, struct page *page)
143 return block_read_full_page(page, udf_get_block);
146 static int udf_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
148 return block_prepare_write(page, from, to, udf_get_block);
151 static sector_t udf_bmap(struct address_space *mapping, sector_t block)
153 return generic_block_bmap(mapping,block,udf_get_block);
156 struct address_space_operations udf_aops = {
157 .readpage = udf_readpage,
158 .writepage = udf_writepage,
159 .sync_page = block_sync_page,
160 .prepare_write = udf_prepare_write,
161 .commit_write = generic_commit_write,
162 .bmap = udf_bmap,
165 void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
167 struct page *page;
168 char *kaddr;
169 struct writeback_control udf_wbc = {
170 .sync_mode = WB_SYNC_NONE,
171 .nr_to_write = 1,
174 /* from now on we have normal address_space methods */
175 inode->i_data.a_ops = &udf_aops;
177 if (!UDF_I_LENALLOC(inode))
179 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
180 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
181 else
182 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
183 mark_inode_dirty(inode);
184 return;
187 page = grab_cache_page(inode->i_mapping, 0);
188 if (!PageLocked(page))
189 PAGE_BUG(page);
190 if (!PageUptodate(page))
192 kaddr = kmap(page);
193 memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
194 PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
195 memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode),
196 UDF_I_LENALLOC(inode));
197 flush_dcache_page(page);
198 SetPageUptodate(page);
199 kunmap(page);
201 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00,
202 UDF_I_LENALLOC(inode));
203 UDF_I_LENALLOC(inode) = 0;
204 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
205 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
206 else
207 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
209 inode->i_data.a_ops->writepage(page, &udf_wbc);
210 page_cache_release(page);
212 mark_inode_dirty(inode);
215 struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err)
217 int newblock;
218 struct buffer_head *sbh = NULL, *dbh = NULL;
219 kernel_lb_addr bloc, eloc;
220 uint32_t elen, extoffset;
221 uint8_t alloctype;
223 struct udf_fileident_bh sfibh, dfibh;
224 loff_t f_pos = udf_ext0_offset(inode) >> 2;
225 int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
226 struct fileIdentDesc cfi, *sfi, *dfi;
228 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
229 alloctype = ICBTAG_FLAG_AD_SHORT;
230 else
231 alloctype = ICBTAG_FLAG_AD_LONG;
233 if (!inode->i_size)
235 UDF_I_ALLOCTYPE(inode) = alloctype;
236 mark_inode_dirty(inode);
237 return NULL;
240 /* alloc block, and copy data to it */
241 *block = udf_new_block(inode->i_sb, inode,
242 UDF_I_LOCATION(inode).partitionReferenceNum,
243 UDF_I_LOCATION(inode).logicalBlockNum, err);
245 if (!(*block))
246 return NULL;
247 newblock = udf_get_pblock(inode->i_sb, *block,
248 UDF_I_LOCATION(inode).partitionReferenceNum, 0);
249 if (!newblock)
250 return NULL;
251 dbh = udf_tgetblk(inode->i_sb, newblock);
252 if (!dbh)
253 return NULL;
254 lock_buffer(dbh);
255 memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
256 set_buffer_uptodate(dbh);
257 unlock_buffer(dbh);
258 mark_buffer_dirty_inode(dbh, inode);
260 sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
261 sbh = sfibh.sbh = sfibh.ebh = NULL;
262 dfibh.soffset = dfibh.eoffset = 0;
263 dfibh.sbh = dfibh.ebh = dbh;
264 while ( (f_pos < size) )
266 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
267 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL, NULL, NULL);
268 if (!sfi)
270 udf_release_data(dbh);
271 return NULL;
273 UDF_I_ALLOCTYPE(inode) = alloctype;
274 sfi->descTag.tagLocation = cpu_to_le32(*block);
275 dfibh.soffset = dfibh.eoffset;
276 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
277 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
278 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
279 sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse)))
281 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
282 udf_release_data(dbh);
283 return NULL;
286 mark_buffer_dirty_inode(dbh, inode);
288 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode));
289 UDF_I_LENALLOC(inode) = 0;
290 bloc = UDF_I_LOCATION(inode);
291 eloc.logicalBlockNum = *block;
292 eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
293 elen = inode->i_size;
294 UDF_I_LENEXTENTS(inode) = elen;
295 extoffset = udf_file_entry_alloc_offset(inode);
296 udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &sbh, 0);
297 /* UniqueID stuff */
299 udf_release_data(sbh);
300 mark_inode_dirty(inode);
301 return dbh;
304 static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
306 int err, new;
307 struct buffer_head *bh;
308 unsigned long phys;
310 if (!create)
312 phys = udf_block_map(inode, block);
313 if (phys)
314 map_bh(bh_result, inode->i_sb, phys);
315 return 0;
318 err = -EIO;
319 new = 0;
320 bh = NULL;
322 lock_kernel();
324 if (block < 0)
325 goto abort_negative;
327 if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1)
329 UDF_I_NEXT_ALLOC_BLOCK(inode) ++;
330 UDF_I_NEXT_ALLOC_GOAL(inode) ++;
333 err = 0;
335 bh = inode_getblk(inode, block, &err, &phys, &new);
336 if (bh)
337 BUG();
338 if (err)
339 goto abort;
340 if (!phys)
341 BUG();
343 if (new)
344 set_buffer_new(bh_result);
345 map_bh(bh_result, inode->i_sb, phys);
346 abort:
347 unlock_kernel();
348 return err;
350 abort_negative:
351 udf_warning(inode->i_sb, "udf_get_block", "block < 0");
352 goto abort;
355 static struct buffer_head *
356 udf_getblk(struct inode *inode, long block, int create, int *err)
358 struct buffer_head dummy;
360 dummy.b_state = 0;
361 dummy.b_blocknr = -1000;
362 *err = udf_get_block(inode, block, &dummy, create);
363 if (!*err && buffer_mapped(&dummy))
365 struct buffer_head *bh;
366 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
367 if (buffer_new(&dummy))
369 lock_buffer(bh);
370 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
371 set_buffer_uptodate(bh);
372 unlock_buffer(bh);
373 mark_buffer_dirty_inode(bh, inode);
375 return bh;
377 return NULL;
380 static struct buffer_head * inode_getblk(struct inode * inode, long block,
381 int *err, long *phys, int *new)
383 struct buffer_head *pbh = NULL, *cbh = NULL, *nbh = NULL, *result = NULL;
384 kernel_long_ad laarr[EXTENT_MERGE_SIZE];
385 uint32_t pextoffset = 0, cextoffset = 0, nextoffset = 0;
386 int count = 0, startnum = 0, endnum = 0;
387 uint32_t elen = 0;
388 kernel_lb_addr eloc, pbloc, cbloc, nbloc;
389 int c = 1;
390 uint64_t lbcount = 0, b_off = 0;
391 uint32_t newblocknum, newblock, offset = 0;
392 int8_t etype;
393 int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum;
394 char lastblock = 0;
396 pextoffset = cextoffset = nextoffset = udf_file_entry_alloc_offset(inode);
397 b_off = (uint64_t)block << inode->i_sb->s_blocksize_bits;
398 pbloc = cbloc = nbloc = UDF_I_LOCATION(inode);
400 /* find the extent which contains the block we are looking for.
401 alternate between laarr[0] and laarr[1] for locations of the
402 current extent, and the previous extent */
405 if (pbh != cbh)
407 udf_release_data(pbh);
408 atomic_inc(&cbh->b_count);
409 pbh = cbh;
411 if (cbh != nbh)
413 udf_release_data(cbh);
414 atomic_inc(&nbh->b_count);
415 cbh = nbh;
418 lbcount += elen;
420 pbloc = cbloc;
421 cbloc = nbloc;
423 pextoffset = cextoffset;
424 cextoffset = nextoffset;
426 if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) == -1)
427 break;
429 c = !c;
431 laarr[c].extLength = (etype << 30) | elen;
432 laarr[c].extLocation = eloc;
434 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
435 pgoal = eloc.logicalBlockNum +
436 ((elen + inode->i_sb->s_blocksize - 1) >>
437 inode->i_sb->s_blocksize_bits);
439 count ++;
440 } while (lbcount + elen <= b_off);
442 b_off -= lbcount;
443 offset = b_off >> inode->i_sb->s_blocksize_bits;
445 /* if the extent is allocated and recorded, return the block
446 if the extent is not a multiple of the blocksize, round up */
448 if (etype == (EXT_RECORDED_ALLOCATED >> 30))
450 if (elen & (inode->i_sb->s_blocksize - 1))
452 elen = EXT_RECORDED_ALLOCATED |
453 ((elen + inode->i_sb->s_blocksize - 1) &
454 ~(inode->i_sb->s_blocksize - 1));
455 etype = udf_write_aext(inode, nbloc, &cextoffset, eloc, elen, nbh, 1);
457 udf_release_data(pbh);
458 udf_release_data(cbh);
459 udf_release_data(nbh);
460 newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
461 *phys = newblock;
462 return NULL;
465 if (etype == -1)
467 endnum = startnum = ((count > 1) ? 1 : count);
468 if (laarr[c].extLength & (inode->i_sb->s_blocksize - 1))
470 laarr[c].extLength =
471 (laarr[c].extLength & UDF_EXTENT_FLAG_MASK) |
472 (((laarr[c].extLength & UDF_EXTENT_LENGTH_MASK) +
473 inode->i_sb->s_blocksize - 1) &
474 ~(inode->i_sb->s_blocksize - 1));
475 UDF_I_LENEXTENTS(inode) =
476 (UDF_I_LENEXTENTS(inode) + inode->i_sb->s_blocksize - 1) &
477 ~(inode->i_sb->s_blocksize - 1);
479 c = !c;
480 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
481 ((offset + 1) << inode->i_sb->s_blocksize_bits);
482 memset(&laarr[c].extLocation, 0x00, sizeof(kernel_lb_addr));
483 count ++;
484 endnum ++;
485 lastblock = 1;
487 else
488 endnum = startnum = ((count > 2) ? 2 : count);
490 /* if the current extent is in position 0, swap it with the previous */
491 if (!c && count != 1)
493 laarr[2] = laarr[0];
494 laarr[0] = laarr[1];
495 laarr[1] = laarr[2];
496 c = 1;
499 /* if the current block is located in a extent, read the next extent */
500 if (etype != -1)
502 if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 0)) != -1)
504 laarr[c+1].extLength = (etype << 30) | elen;
505 laarr[c+1].extLocation = eloc;
506 count ++;
507 startnum ++;
508 endnum ++;
510 else
511 lastblock = 1;
513 udf_release_data(cbh);
514 udf_release_data(nbh);
516 /* if the current extent is not recorded but allocated, get the
517 block in the extent corresponding to the requested block */
518 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
519 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
520 else /* otherwise, allocate a new block */
522 if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
523 goal = UDF_I_NEXT_ALLOC_GOAL(inode);
525 if (!goal)
527 if (!(goal = pgoal))
528 goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
531 if (!(newblocknum = udf_new_block(inode->i_sb, inode,
532 UDF_I_LOCATION(inode).partitionReferenceNum, goal, err)))
534 udf_release_data(pbh);
535 *err = -ENOSPC;
536 return NULL;
538 UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize;
541 /* if the extent the requsted block is located in contains multiple blocks,
542 split the extent into at most three extents. blocks prior to requested
543 block, requested block, and blocks after requested block */
544 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
546 #ifdef UDF_PREALLOCATE
547 /* preallocate blocks */
548 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
549 #endif
551 /* merge any continuous blocks in laarr */
552 udf_merge_extents(inode, laarr, &endnum);
554 /* write back the new extents, inserting new extents if the new number
555 of extents is greater than the old number, and deleting extents if
556 the new number of extents is less than the old number */
557 udf_update_extents(inode, laarr, startnum, endnum, pbloc, pextoffset, &pbh);
559 udf_release_data(pbh);
561 if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
562 UDF_I_LOCATION(inode).partitionReferenceNum, 0)))
564 return NULL;
566 *phys = newblock;
567 *err = 0;
568 *new = 1;
569 UDF_I_NEXT_ALLOC_BLOCK(inode) = block;
570 UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum;
571 inode->i_ctime = CURRENT_TIME;
573 if (IS_SYNC(inode))
574 udf_sync_inode(inode);
575 else
576 mark_inode_dirty(inode);
577 return result;
580 static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum,
581 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
583 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
584 (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
586 int curr = *c;
587 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
588 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
589 int8_t etype = (laarr[curr].extLength >> 30);
591 if (blen == 1)
593 else if (!offset || blen == offset + 1)
595 laarr[curr+2] = laarr[curr+1];
596 laarr[curr+1] = laarr[curr];
598 else
600 laarr[curr+3] = laarr[curr+1];
601 laarr[curr+2] = laarr[curr+1] = laarr[curr];
604 if (offset)
606 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
608 udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset);
609 laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
610 (offset << inode->i_sb->s_blocksize_bits);
611 laarr[curr].extLocation.logicalBlockNum = 0;
612 laarr[curr].extLocation.partitionReferenceNum = 0;
614 else
615 laarr[curr].extLength = (etype << 30) |
616 (offset << inode->i_sb->s_blocksize_bits);
617 curr ++;
618 (*c) ++;
619 (*endnum) ++;
622 laarr[curr].extLocation.logicalBlockNum = newblocknum;
623 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
624 laarr[curr].extLocation.partitionReferenceNum =
625 UDF_I_LOCATION(inode).partitionReferenceNum;
626 laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
627 inode->i_sb->s_blocksize;
628 curr ++;
630 if (blen != offset + 1)
632 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
633 laarr[curr].extLocation.logicalBlockNum += (offset + 1);
634 laarr[curr].extLength = (etype << 30) |
635 ((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
636 curr ++;
637 (*endnum) ++;
642 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
643 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
645 int start, length = 0, currlength = 0, i;
647 if (*endnum >= (c+1))
649 if (!lastblock)
650 return;
651 else
652 start = c;
654 else
656 if ((laarr[c+1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
658 start = c+1;
659 length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) +
660 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
662 else
663 start = c;
666 for (i=start+1; i<=*endnum; i++)
668 if (i == *endnum)
670 if (lastblock)
671 length += UDF_DEFAULT_PREALLOC_BLOCKS;
673 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
674 length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
675 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
676 else
677 break;
680 if (length)
682 int next = laarr[start].extLocation.logicalBlockNum +
683 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
684 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
685 int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
686 laarr[start].extLocation.partitionReferenceNum,
687 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
688 UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
690 if (numalloc)
692 if (start == (c+1))
693 laarr[start].extLength +=
694 (numalloc << inode->i_sb->s_blocksize_bits);
695 else
697 memmove(&laarr[c+2], &laarr[c+1],
698 sizeof(long_ad) * (*endnum - (c+1)));
699 (*endnum) ++;
700 laarr[c+1].extLocation.logicalBlockNum = next;
701 laarr[c+1].extLocation.partitionReferenceNum =
702 laarr[c].extLocation.partitionReferenceNum;
703 laarr[c+1].extLength = EXT_NOT_RECORDED_ALLOCATED |
704 (numalloc << inode->i_sb->s_blocksize_bits);
705 start = c+1;
708 for (i=start+1; numalloc && i<*endnum; i++)
710 int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
711 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
713 if (elen > numalloc)
715 laarr[i].extLength -=
716 (numalloc << inode->i_sb->s_blocksize_bits);
717 numalloc = 0;
719 else
721 numalloc -= elen;
722 if (*endnum > (i+1))
723 memmove(&laarr[i], &laarr[i+1],
724 sizeof(long_ad) * (*endnum - (i+1)));
725 i --;
726 (*endnum) --;
729 UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits;
734 static void udf_merge_extents(struct inode *inode,
735 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
737 int i;
739 for (i=0; i<(*endnum-1); i++)
741 if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30))
743 if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
744 ((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
745 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
746 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits)))
748 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
749 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
750 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
752 laarr[i+1].extLength = (laarr[i+1].extLength -
753 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
754 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
755 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
756 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
757 laarr[i+1].extLocation.logicalBlockNum =
758 laarr[i].extLocation.logicalBlockNum +
759 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
760 inode->i_sb->s_blocksize_bits);
762 else
764 laarr[i].extLength = laarr[i+1].extLength +
765 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
766 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
767 if (*endnum > (i+2))
768 memmove(&laarr[i+1], &laarr[i+2],
769 sizeof(long_ad) * (*endnum - (i+2)));
770 i --;
771 (*endnum) --;
775 else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
776 ((laarr[i+1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)))
778 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
779 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
780 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
781 laarr[i].extLocation.logicalBlockNum = 0;
782 laarr[i].extLocation.partitionReferenceNum = 0;
784 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
785 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
786 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
788 laarr[i+1].extLength = (laarr[i+1].extLength -
789 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
790 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
791 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
792 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
794 else
796 laarr[i].extLength = laarr[i+1].extLength +
797 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
798 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
799 if (*endnum > (i+2))
800 memmove(&laarr[i+1], &laarr[i+2],
801 sizeof(long_ad) * (*endnum - (i+2)));
802 i --;
803 (*endnum) --;
806 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
808 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
809 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
810 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
811 laarr[i].extLocation.logicalBlockNum = 0;
812 laarr[i].extLocation.partitionReferenceNum = 0;
813 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) |
814 EXT_NOT_RECORDED_NOT_ALLOCATED;
819 static void udf_update_extents(struct inode *inode,
820 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum,
821 kernel_lb_addr pbloc, uint32_t pextoffset, struct buffer_head **pbh)
823 int start = 0, i;
824 kernel_lb_addr tmploc;
825 uint32_t tmplen;
827 if (startnum > endnum)
829 for (i=0; i<(startnum-endnum); i++)
831 udf_delete_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
832 laarr[i].extLength, *pbh);
835 else if (startnum < endnum)
837 for (i=0; i<(endnum-startnum); i++)
839 udf_insert_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
840 laarr[i].extLength, *pbh);
841 udf_next_aext(inode, &pbloc, &pextoffset, &laarr[i].extLocation,
842 &laarr[i].extLength, pbh, 1);
843 start ++;
847 for (i=start; i<endnum; i++)
849 udf_next_aext(inode, &pbloc, &pextoffset, &tmploc, &tmplen, pbh, 0);
850 udf_write_aext(inode, pbloc, &pextoffset, laarr[i].extLocation,
851 laarr[i].extLength, *pbh, 1);
855 struct buffer_head * udf_bread(struct inode * inode, int block,
856 int create, int * err)
858 struct buffer_head * bh = NULL;
860 bh = udf_getblk(inode, block, create, err);
861 if (!bh)
862 return NULL;
864 if (buffer_uptodate(bh))
865 return bh;
866 ll_rw_block(READ, 1, &bh);
867 wait_on_buffer(bh);
868 if (buffer_uptodate(bh))
869 return bh;
870 brelse(bh);
871 *err = -EIO;
872 return NULL;
875 void udf_truncate(struct inode * inode)
877 int offset;
878 int err;
880 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
881 S_ISLNK(inode->i_mode)))
882 return;
883 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
884 return;
886 lock_kernel();
887 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
889 if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
890 inode->i_size))
892 udf_expand_file_adinicb(inode, inode->i_size, &err);
893 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
895 inode->i_size = UDF_I_LENALLOC(inode);
896 unlock_kernel();
897 return;
899 else
900 udf_truncate_extents(inode);
902 else
904 offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
905 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode));
906 UDF_I_LENALLOC(inode) = inode->i_size;
909 else
911 block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block);
912 udf_truncate_extents(inode);
915 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
916 if (IS_SYNC(inode))
917 udf_sync_inode (inode);
918 else
919 mark_inode_dirty(inode);
920 unlock_kernel();
924 * udf_read_inode
926 * PURPOSE
927 * Read an inode.
929 * DESCRIPTION
930 * This routine is called by iget() [which is called by udf_iget()]
931 * (clean_inode() will have been called first)
932 * when an inode is first read into memory.
934 * HISTORY
935 * July 1, 1997 - Andrew E. Mileski
936 * Written, tested, and released.
938 * 12/19/98 dgb Updated to fix size problems.
941 void
942 udf_read_inode(struct inode *inode)
944 memset(&UDF_I_LOCATION(inode), 0xFF, sizeof(kernel_lb_addr));
947 static void
948 __udf_read_inode(struct inode *inode)
950 struct buffer_head *bh = NULL;
951 struct fileEntry *fe;
952 uint16_t ident;
955 * Set defaults, but the inode is still incomplete!
956 * Note: get_new_inode() sets the following on a new inode:
957 * i_sb = sb
958 * i_no = ino
959 * i_flags = sb->s_flags
960 * i_state = 0
961 * clean_inode(): zero fills and sets
962 * i_count = 1
963 * i_nlink = 1
964 * i_op = NULL;
966 inode->i_blksize = PAGE_SIZE;
968 bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
970 if (!bh)
972 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
973 inode->i_ino);
974 make_bad_inode(inode);
975 return;
978 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
979 ident != TAG_IDENT_USE)
981 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
982 inode->i_ino, ident);
983 udf_release_data(bh);
984 make_bad_inode(inode);
985 return;
988 fe = (struct fileEntry *)bh->b_data;
990 if (le16_to_cpu(fe->icbTag.strategyType) == 4096)
992 struct buffer_head *ibh = NULL, *nbh = NULL;
993 struct indirectEntry *ie;
995 ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
996 if (ident == TAG_IDENT_IE)
998 if (ibh)
1000 kernel_lb_addr loc;
1001 ie = (struct indirectEntry *)ibh->b_data;
1003 loc = lelb_to_cpu(ie->indirectICB.extLocation);
1005 if (ie->indirectICB.extLength &&
1006 (nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident)))
1008 if (ident == TAG_IDENT_FE ||
1009 ident == TAG_IDENT_EFE)
1011 memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(kernel_lb_addr));
1012 udf_release_data(bh);
1013 udf_release_data(ibh);
1014 udf_release_data(nbh);
1015 __udf_read_inode(inode);
1016 return;
1018 else
1020 udf_release_data(nbh);
1021 udf_release_data(ibh);
1024 else
1025 udf_release_data(ibh);
1028 else
1029 udf_release_data(ibh);
1031 else if (le16_to_cpu(fe->icbTag.strategyType) != 4)
1033 printk(KERN_ERR "udf: unsupported strategy type: %d\n",
1034 le16_to_cpu(fe->icbTag.strategyType));
1035 udf_release_data(bh);
1036 make_bad_inode(inode);
1037 return;
1039 udf_fill_inode(inode, bh);
1040 udf_release_data(bh);
1043 static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1045 struct fileEntry *fe;
1046 struct extendedFileEntry *efe;
1047 time_t convtime;
1048 long convtime_usec;
1049 int offset;
1051 fe = (struct fileEntry *)bh->b_data;
1052 efe = (struct extendedFileEntry *)bh->b_data;
1054 if (le16_to_cpu(fe->icbTag.strategyType) == 4)
1055 UDF_I_STRAT4096(inode) = 0;
1056 else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
1057 UDF_I_STRAT4096(inode) = 1;
1059 UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
1060 UDF_I_UNIQUE(inode) = 0;
1061 UDF_I_LENEATTR(inode) = 0;
1062 UDF_I_LENEXTENTS(inode) = 0;
1063 UDF_I_LENALLOC(inode) = 0;
1064 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
1065 UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
1066 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE)
1068 UDF_I_EFE(inode) = 1;
1069 UDF_I_USE(inode) = 0;
1070 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
1071 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1073 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE)
1075 UDF_I_EFE(inode) = 0;
1076 UDF_I_USE(inode) = 0;
1077 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
1078 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1080 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1082 UDF_I_EFE(inode) = 0;
1083 UDF_I_USE(inode) = 1;
1084 UDF_I_LENALLOC(inode) =
1085 le32_to_cpu(
1086 ((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
1087 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry), GFP_KERNEL);
1088 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1089 return;
1092 inode->i_uid = le32_to_cpu(fe->uid);
1093 if ( inode->i_uid == -1 ) inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
1095 inode->i_gid = le32_to_cpu(fe->gid);
1096 if ( inode->i_gid == -1 ) inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
1098 inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
1099 if (!inode->i_nlink)
1100 inode->i_nlink = 1;
1102 inode->i_size = le64_to_cpu(fe->informationLength);
1103 UDF_I_LENEXTENTS(inode) = inode->i_size;
1105 inode->i_mode = udf_convert_permissions(fe);
1106 inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
1108 if (UDF_I_EFE(inode) == 0)
1110 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1111 (inode->i_sb->s_blocksize_bits - 9);
1113 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1114 lets_to_cpu(fe->accessTime)) )
1116 inode->i_atime.tv_sec = convtime;
1117 inode->i_atime.tv_nsec = convtime_usec * 1000;
1119 else
1121 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1124 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1125 lets_to_cpu(fe->modificationTime)) )
1127 inode->i_mtime.tv_sec = convtime;
1128 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1130 else
1132 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1135 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1136 lets_to_cpu(fe->attrTime)) )
1138 inode->i_ctime.tv_sec = convtime;
1139 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1141 else
1143 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1146 UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
1147 UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
1148 UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
1149 offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
1151 else
1153 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1154 (inode->i_sb->s_blocksize_bits - 9);
1156 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1157 lets_to_cpu(efe->accessTime)) )
1159 inode->i_atime.tv_sec = convtime;
1160 inode->i_atime.tv_nsec = convtime_usec * 1000;
1162 else
1164 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1167 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1168 lets_to_cpu(efe->modificationTime)) )
1170 inode->i_mtime.tv_sec = convtime;
1171 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1173 else
1175 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1178 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1179 lets_to_cpu(efe->createTime)) )
1181 UDF_I_CRTIME(inode).tv_sec = convtime;
1182 UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000;
1184 else
1186 UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb);
1189 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1190 lets_to_cpu(efe->attrTime)) )
1192 inode->i_ctime.tv_sec = convtime;
1193 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1195 else
1197 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1200 UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
1201 UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
1202 UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
1203 offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
1206 switch (fe->icbTag.fileType)
1208 case ICBTAG_FILE_TYPE_DIRECTORY:
1210 inode->i_op = &udf_dir_inode_operations;
1211 inode->i_fop = &udf_dir_operations;
1212 inode->i_mode |= S_IFDIR;
1213 inode->i_nlink ++;
1214 break;
1216 case ICBTAG_FILE_TYPE_REALTIME:
1217 case ICBTAG_FILE_TYPE_REGULAR:
1218 case ICBTAG_FILE_TYPE_UNDEF:
1220 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
1221 inode->i_data.a_ops = &udf_adinicb_aops;
1222 else
1223 inode->i_data.a_ops = &udf_aops;
1224 inode->i_op = &udf_file_inode_operations;
1225 inode->i_fop = &udf_file_operations;
1226 inode->i_mode |= S_IFREG;
1227 break;
1229 case ICBTAG_FILE_TYPE_BLOCK:
1231 inode->i_mode |= S_IFBLK;
1232 break;
1234 case ICBTAG_FILE_TYPE_CHAR:
1236 inode->i_mode |= S_IFCHR;
1237 break;
1239 case ICBTAG_FILE_TYPE_FIFO:
1241 init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1242 break;
1244 case ICBTAG_FILE_TYPE_SOCKET:
1246 init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1247 break;
1249 case ICBTAG_FILE_TYPE_SYMLINK:
1251 inode->i_data.a_ops = &udf_symlink_aops;
1252 inode->i_op = &page_symlink_inode_operations;
1253 inode->i_mode = S_IFLNK|S_IRWXUGO;
1254 break;
1256 default:
1258 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
1259 inode->i_ino, fe->icbTag.fileType);
1260 make_bad_inode(inode);
1261 return;
1264 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1266 struct deviceSpec *dsea =
1267 (struct deviceSpec *)
1268 udf_get_extendedattr(inode, 12, 1);
1270 if (dsea)
1272 init_special_inode(inode, inode->i_mode, MKDEV(
1273 le32_to_cpu(dsea->majorDeviceIdent),
1274 le32_to_cpu(dsea->minorDeviceIdent)));
1275 /* Developer ID ??? */
1277 else
1279 make_bad_inode(inode);
1284 static mode_t
1285 udf_convert_permissions(struct fileEntry *fe)
1287 mode_t mode;
1288 uint32_t permissions;
1289 uint32_t flags;
1291 permissions = le32_to_cpu(fe->permissions);
1292 flags = le16_to_cpu(fe->icbTag.flags);
1294 mode = (( permissions ) & S_IRWXO) |
1295 (( permissions >> 2 ) & S_IRWXG) |
1296 (( permissions >> 4 ) & S_IRWXU) |
1297 (( flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1298 (( flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1299 (( flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1301 return mode;
1305 * udf_write_inode
1307 * PURPOSE
1308 * Write out the specified inode.
1310 * DESCRIPTION
1311 * This routine is called whenever an inode is synced.
1312 * Currently this routine is just a placeholder.
1314 * HISTORY
1315 * July 1, 1997 - Andrew E. Mileski
1316 * Written, tested, and released.
1319 int udf_write_inode(struct inode * inode, int sync)
1321 int ret;
1322 lock_kernel();
1323 ret = udf_update_inode(inode, sync);
1324 unlock_kernel();
1325 return ret;
1328 int udf_sync_inode(struct inode * inode)
1330 return udf_update_inode(inode, 1);
1333 static int
1334 udf_update_inode(struct inode *inode, int do_sync)
1336 struct buffer_head *bh = NULL;
1337 struct fileEntry *fe;
1338 struct extendedFileEntry *efe;
1339 uint32_t udfperms;
1340 uint16_t icbflags;
1341 uint16_t crclen;
1342 int i;
1343 kernel_timestamp cpu_time;
1344 int err = 0;
1346 bh = udf_tread(inode->i_sb,
1347 udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
1349 if (!bh)
1351 udf_debug("bread failure\n");
1352 return -EIO;
1355 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
1357 fe = (struct fileEntry *)bh->b_data;
1358 efe = (struct extendedFileEntry *)bh->b_data;
1360 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1362 struct unallocSpaceEntry *use =
1363 (struct unallocSpaceEntry *)bh->b_data;
1365 use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1366 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1367 crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
1368 sizeof(tag);
1369 use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1370 use->descTag.descCRCLength = cpu_to_le16(crclen);
1371 use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
1373 use->descTag.tagChecksum = 0;
1374 for (i=0; i<16; i++)
1375 if (i != 4)
1376 use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i];
1378 mark_buffer_dirty(bh);
1379 udf_release_data(bh);
1380 return err;
1383 if (inode->i_uid != UDF_SB(inode->i_sb)->s_uid)
1384 fe->uid = cpu_to_le32(inode->i_uid);
1386 if (inode->i_gid != UDF_SB(inode->i_sb)->s_gid)
1387 fe->gid = cpu_to_le32(inode->i_gid);
1389 udfperms = ((inode->i_mode & S_IRWXO) ) |
1390 ((inode->i_mode & S_IRWXG) << 2) |
1391 ((inode->i_mode & S_IRWXU) << 4);
1393 udfperms |= (le32_to_cpu(fe->permissions) &
1394 (FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1395 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1396 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1397 fe->permissions = cpu_to_le32(udfperms);
1399 if (S_ISDIR(inode->i_mode))
1400 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1401 else
1402 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1404 fe->informationLength = cpu_to_le64(inode->i_size);
1406 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1408 regid *eid;
1409 struct deviceSpec *dsea =
1410 (struct deviceSpec *)
1411 udf_get_extendedattr(inode, 12, 1);
1413 if (!dsea)
1415 dsea = (struct deviceSpec *)
1416 udf_add_extendedattr(inode,
1417 sizeof(struct deviceSpec) +
1418 sizeof(regid), 12, 0x3);
1419 dsea->attrType = cpu_to_le32(12);
1420 dsea->attrSubtype = 1;
1421 dsea->attrLength = cpu_to_le32(sizeof(struct deviceSpec) +
1422 sizeof(regid));
1423 dsea->impUseLength = cpu_to_le32(sizeof(regid));
1425 eid = (regid *)dsea->impUse;
1426 memset(eid, 0, sizeof(regid));
1427 strcpy(eid->ident, UDF_ID_DEVELOPER);
1428 eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1429 eid->identSuffix[1] = UDF_OS_ID_LINUX;
1430 dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1431 dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1434 if (UDF_I_EFE(inode) == 0)
1436 memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1437 fe->logicalBlocksRecorded = cpu_to_le64(
1438 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1439 (inode->i_sb->s_blocksize_bits - 9));
1441 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1442 fe->accessTime = cpu_to_lets(cpu_time);
1443 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1444 fe->modificationTime = cpu_to_lets(cpu_time);
1445 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1446 fe->attrTime = cpu_to_lets(cpu_time);
1447 memset(&(fe->impIdent), 0, sizeof(regid));
1448 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1449 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1450 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1451 fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1452 fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1453 fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1454 fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1455 crclen = sizeof(struct fileEntry);
1457 else
1459 memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1460 efe->objectSize = cpu_to_le64(inode->i_size);
1461 efe->logicalBlocksRecorded = cpu_to_le64(
1462 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1463 (inode->i_sb->s_blocksize_bits - 9));
1465 if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec ||
1466 (UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec &&
1467 UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec))
1469 UDF_I_CRTIME(inode) = inode->i_atime;
1471 if (UDF_I_CRTIME(inode).tv_sec > inode->i_mtime.tv_sec ||
1472 (UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec &&
1473 UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec))
1475 UDF_I_CRTIME(inode) = inode->i_mtime;
1477 if (UDF_I_CRTIME(inode).tv_sec > inode->i_ctime.tv_sec ||
1478 (UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec &&
1479 UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec))
1481 UDF_I_CRTIME(inode) = inode->i_ctime;
1484 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1485 efe->accessTime = cpu_to_lets(cpu_time);
1486 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1487 efe->modificationTime = cpu_to_lets(cpu_time);
1488 if (udf_time_to_stamp(&cpu_time, UDF_I_CRTIME(inode)))
1489 efe->createTime = cpu_to_lets(cpu_time);
1490 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1491 efe->attrTime = cpu_to_lets(cpu_time);
1493 memset(&(efe->impIdent), 0, sizeof(regid));
1494 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1495 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1496 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1497 efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1498 efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1499 efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1500 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1501 crclen = sizeof(struct extendedFileEntry);
1503 if (UDF_I_STRAT4096(inode))
1505 fe->icbTag.strategyType = cpu_to_le16(4096);
1506 fe->icbTag.strategyParameter = cpu_to_le16(1);
1507 fe->icbTag.numEntries = cpu_to_le16(2);
1509 else
1511 fe->icbTag.strategyType = cpu_to_le16(4);
1512 fe->icbTag.numEntries = cpu_to_le16(1);
1515 if (S_ISDIR(inode->i_mode))
1516 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1517 else if (S_ISREG(inode->i_mode))
1518 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1519 else if (S_ISLNK(inode->i_mode))
1520 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1521 else if (S_ISBLK(inode->i_mode))
1522 fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1523 else if (S_ISCHR(inode->i_mode))
1524 fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1525 else if (S_ISFIFO(inode->i_mode))
1526 fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1527 else if (S_ISSOCK(inode->i_mode))
1528 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1530 icbflags = UDF_I_ALLOCTYPE(inode) |
1531 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1532 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1533 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1534 (le16_to_cpu(fe->icbTag.flags) &
1535 ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1536 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1538 fe->icbTag.flags = cpu_to_le16(icbflags);
1539 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1540 fe->descTag.descVersion = cpu_to_le16(3);
1541 else
1542 fe->descTag.descVersion = cpu_to_le16(2);
1543 fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
1544 fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1545 crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
1546 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1547 fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
1549 fe->descTag.tagChecksum = 0;
1550 for (i=0; i<16; i++)
1551 if (i != 4)
1552 fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i];
1554 /* write the data blocks */
1555 mark_buffer_dirty(bh);
1556 if (do_sync)
1558 sync_dirty_buffer(bh);
1559 if (buffer_req(bh) && !buffer_uptodate(bh))
1561 printk("IO error syncing udf inode [%s:%08lx]\n",
1562 inode->i_sb->s_id, inode->i_ino);
1563 err = -EIO;
1566 udf_release_data(bh);
1567 return err;
1571 * udf_iget
1573 * PURPOSE
1574 * Get an inode.
1576 * DESCRIPTION
1577 * This routine replaces iget() and read_inode().
1579 * HISTORY
1580 * October 3, 1997 - Andrew E. Mileski
1581 * Written, tested, and released.
1583 * 12/19/98 dgb Added semaphore and changed to be a wrapper of iget
1585 struct inode *
1586 udf_iget(struct super_block *sb, kernel_lb_addr ino)
1588 struct inode *inode;
1589 unsigned long block;
1591 block = udf_get_lb_pblock(sb, ino, 0);
1593 /* Get the inode */
1595 inode = iget(sb, block);
1596 /* calls udf_read_inode() ! */
1598 if (!inode)
1600 printk(KERN_ERR "udf: iget() failed\n");
1601 return NULL;
1603 else if (is_bad_inode(inode))
1605 iput(inode);
1606 return NULL;
1608 else if (UDF_I_LOCATION(inode).logicalBlockNum == 0xFFFFFFFF &&
1609 UDF_I_LOCATION(inode).partitionReferenceNum == 0xFFFF)
1611 memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(kernel_lb_addr));
1612 __udf_read_inode(inode);
1613 if (is_bad_inode(inode))
1615 iput(inode);
1616 return NULL;
1620 if ( ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum) )
1622 udf_debug("block=%d, partition=%d out of range\n",
1623 ino.logicalBlockNum, ino.partitionReferenceNum);
1624 make_bad_inode(inode);
1625 iput(inode);
1626 return NULL;
1629 return inode;
1632 int8_t udf_add_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1633 kernel_lb_addr eloc, uint32_t elen, struct buffer_head **bh, int inc)
1635 int adsize;
1636 short_ad *sad = NULL;
1637 long_ad *lad = NULL;
1638 struct allocExtDesc *aed;
1639 int8_t etype;
1640 uint8_t *ptr;
1642 if (!*bh)
1643 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1644 else
1645 ptr = (*bh)->b_data + *extoffset;
1647 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1648 adsize = sizeof(short_ad);
1649 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1650 adsize = sizeof(long_ad);
1651 else
1652 return -1;
1654 if (*extoffset + (2 * adsize) > inode->i_sb->s_blocksize)
1656 char *sptr, *dptr;
1657 struct buffer_head *nbh;
1658 int err, loffset;
1659 kernel_lb_addr obloc = *bloc;
1661 if (!(bloc->logicalBlockNum = udf_new_block(inode->i_sb, NULL,
1662 obloc.partitionReferenceNum, obloc.logicalBlockNum, &err)))
1664 return -1;
1666 if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1667 *bloc, 0))))
1669 return -1;
1671 lock_buffer(nbh);
1672 memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
1673 set_buffer_uptodate(nbh);
1674 unlock_buffer(nbh);
1675 mark_buffer_dirty_inode(nbh, inode);
1677 aed = (struct allocExtDesc *)(nbh->b_data);
1678 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
1679 aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
1680 if (*extoffset + adsize > inode->i_sb->s_blocksize)
1682 loffset = *extoffset;
1683 aed->lengthAllocDescs = cpu_to_le32(adsize);
1684 sptr = ptr - adsize;
1685 dptr = nbh->b_data + sizeof(struct allocExtDesc);
1686 memcpy(dptr, sptr, adsize);
1687 *extoffset = sizeof(struct allocExtDesc) + adsize;
1689 else
1691 loffset = *extoffset + adsize;
1692 aed->lengthAllocDescs = cpu_to_le32(0);
1693 sptr = ptr;
1694 *extoffset = sizeof(struct allocExtDesc);
1696 if (*bh)
1698 aed = (struct allocExtDesc *)(*bh)->b_data;
1699 aed->lengthAllocDescs =
1700 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1702 else
1704 UDF_I_LENALLOC(inode) += adsize;
1705 mark_inode_dirty(inode);
1708 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1709 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
1710 bloc->logicalBlockNum, sizeof(tag));
1711 else
1712 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
1713 bloc->logicalBlockNum, sizeof(tag));
1714 switch (UDF_I_ALLOCTYPE(inode))
1716 case ICBTAG_FLAG_AD_SHORT:
1718 sad = (short_ad *)sptr;
1719 sad->extLength = cpu_to_le32(
1720 EXT_NEXT_EXTENT_ALLOCDECS |
1721 inode->i_sb->s_blocksize);
1722 sad->extPosition = cpu_to_le32(bloc->logicalBlockNum);
1723 break;
1725 case ICBTAG_FLAG_AD_LONG:
1727 lad = (long_ad *)sptr;
1728 lad->extLength = cpu_to_le32(
1729 EXT_NEXT_EXTENT_ALLOCDECS |
1730 inode->i_sb->s_blocksize);
1731 lad->extLocation = cpu_to_lelb(*bloc);
1732 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1733 break;
1736 if (*bh)
1738 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1739 udf_update_tag((*bh)->b_data, loffset);
1740 else
1741 udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1742 mark_buffer_dirty_inode(*bh, inode);
1743 udf_release_data(*bh);
1745 else
1746 mark_inode_dirty(inode);
1747 *bh = nbh;
1750 etype = udf_write_aext(inode, *bloc, extoffset, eloc, elen, *bh, inc);
1752 if (!*bh)
1754 UDF_I_LENALLOC(inode) += adsize;
1755 mark_inode_dirty(inode);
1757 else
1759 aed = (struct allocExtDesc *)(*bh)->b_data;
1760 aed->lengthAllocDescs =
1761 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1762 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1763 udf_update_tag((*bh)->b_data, *extoffset + (inc ? 0 : adsize));
1764 else
1765 udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1766 mark_buffer_dirty_inode(*bh, inode);
1769 return etype;
1772 int8_t udf_write_aext(struct inode *inode, kernel_lb_addr bloc, int *extoffset,
1773 kernel_lb_addr eloc, uint32_t elen, struct buffer_head *bh, int inc)
1775 int adsize;
1776 uint8_t *ptr;
1778 if (!bh)
1779 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1780 else
1782 ptr = bh->b_data + *extoffset;
1783 atomic_inc(&bh->b_count);
1786 switch (UDF_I_ALLOCTYPE(inode))
1788 case ICBTAG_FLAG_AD_SHORT:
1790 short_ad *sad = (short_ad *)ptr;
1791 sad->extLength = cpu_to_le32(elen);
1792 sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
1793 adsize = sizeof(short_ad);
1794 break;
1796 case ICBTAG_FLAG_AD_LONG:
1798 long_ad *lad = (long_ad *)ptr;
1799 lad->extLength = cpu_to_le32(elen);
1800 lad->extLocation = cpu_to_lelb(eloc);
1801 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1802 adsize = sizeof(long_ad);
1803 break;
1805 default:
1806 return -1;
1809 if (bh)
1811 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1813 struct allocExtDesc *aed = (struct allocExtDesc *)(bh)->b_data;
1814 udf_update_tag((bh)->b_data,
1815 le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
1817 mark_buffer_dirty_inode(bh, inode);
1818 udf_release_data(bh);
1820 else
1821 mark_inode_dirty(inode);
1823 if (inc)
1824 *extoffset += adsize;
1825 return (elen >> 30);
1828 int8_t udf_next_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1829 kernel_lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1831 int8_t etype;
1833 while ((etype = udf_current_aext(inode, bloc, extoffset, eloc, elen, bh, inc)) ==
1834 (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
1836 *bloc = *eloc;
1837 *extoffset = sizeof(struct allocExtDesc);
1838 udf_release_data(*bh);
1839 if (!(*bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
1841 udf_debug("reading block %d failed!\n",
1842 udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1843 return -1;
1847 return etype;
1850 int8_t udf_current_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1851 kernel_lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1853 int alen;
1854 int8_t etype;
1855 uint8_t *ptr;
1857 if (!*bh)
1859 if (!(*extoffset))
1860 *extoffset = udf_file_entry_alloc_offset(inode);
1861 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1862 alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
1864 else
1866 if (!(*extoffset))
1867 *extoffset = sizeof(struct allocExtDesc);
1868 ptr = (*bh)->b_data + *extoffset;
1869 alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)(*bh)->b_data)->lengthAllocDescs);
1872 switch (UDF_I_ALLOCTYPE(inode))
1874 case ICBTAG_FLAG_AD_SHORT:
1876 short_ad *sad;
1878 if (!(sad = udf_get_fileshortad(ptr, alen, extoffset, inc)))
1879 return -1;
1881 etype = le32_to_cpu(sad->extLength) >> 30;
1882 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1883 eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1884 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1885 break;
1887 case ICBTAG_FLAG_AD_LONG:
1889 long_ad *lad;
1891 if (!(lad = udf_get_filelongad(ptr, alen, extoffset, inc)))
1892 return -1;
1894 etype = le32_to_cpu(lad->extLength) >> 30;
1895 *eloc = lelb_to_cpu(lad->extLocation);
1896 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1897 break;
1899 default:
1901 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1902 return -1;
1906 return etype;
1909 static int8_t
1910 udf_insert_aext(struct inode *inode, kernel_lb_addr bloc, int extoffset,
1911 kernel_lb_addr neloc, uint32_t nelen, struct buffer_head *bh)
1913 kernel_lb_addr oeloc;
1914 uint32_t oelen;
1915 int8_t etype;
1917 if (bh)
1918 atomic_inc(&bh->b_count);
1920 while ((etype = udf_next_aext(inode, &bloc, &extoffset, &oeloc, &oelen, &bh, 0)) != -1)
1922 udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1);
1924 neloc = oeloc;
1925 nelen = (etype << 30) | oelen;
1927 udf_add_aext(inode, &bloc, &extoffset, neloc, nelen, &bh, 1);
1928 udf_release_data(bh);
1929 return (nelen >> 30);
1932 int8_t udf_delete_aext(struct inode *inode, kernel_lb_addr nbloc, int nextoffset,
1933 kernel_lb_addr eloc, uint32_t elen, struct buffer_head *nbh)
1935 struct buffer_head *obh;
1936 kernel_lb_addr obloc;
1937 int oextoffset, adsize;
1938 int8_t etype;
1939 struct allocExtDesc *aed;
1941 if (nbh)
1943 atomic_inc(&nbh->b_count);
1944 atomic_inc(&nbh->b_count);
1947 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1948 adsize = sizeof(short_ad);
1949 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1950 adsize = sizeof(long_ad);
1951 else
1952 adsize = 0;
1954 obh = nbh;
1955 obloc = nbloc;
1956 oextoffset = nextoffset;
1958 if (udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1) == -1)
1959 return -1;
1961 while ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) != -1)
1963 udf_write_aext(inode, obloc, &oextoffset, eloc, (etype << 30) | elen, obh, 1);
1964 if (obh != nbh)
1966 obloc = nbloc;
1967 udf_release_data(obh);
1968 atomic_inc(&nbh->b_count);
1969 obh = nbh;
1970 oextoffset = nextoffset - adsize;
1973 memset(&eloc, 0x00, sizeof(kernel_lb_addr));
1974 elen = 0;
1976 if (nbh != obh)
1978 udf_free_blocks(inode->i_sb, inode, nbloc, 0, 1);
1979 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1980 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1981 if (!obh)
1983 UDF_I_LENALLOC(inode) -= (adsize * 2);
1984 mark_inode_dirty(inode);
1986 else
1988 aed = (struct allocExtDesc *)(obh)->b_data;
1989 aed->lengthAllocDescs =
1990 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize));
1991 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1992 udf_update_tag((obh)->b_data, oextoffset - (2*adsize));
1993 else
1994 udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
1995 mark_buffer_dirty_inode(obh, inode);
1998 else
2000 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
2001 if (!obh)
2003 UDF_I_LENALLOC(inode) -= adsize;
2004 mark_inode_dirty(inode);
2006 else
2008 aed = (struct allocExtDesc *)(obh)->b_data;
2009 aed->lengthAllocDescs =
2010 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
2011 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
2012 udf_update_tag((obh)->b_data, oextoffset - adsize);
2013 else
2014 udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
2015 mark_buffer_dirty_inode(obh, inode);
2019 udf_release_data(nbh);
2020 udf_release_data(obh);
2021 return (elen >> 30);
2024 int8_t inode_bmap(struct inode *inode, int block, kernel_lb_addr *bloc, uint32_t *extoffset,
2025 kernel_lb_addr *eloc, uint32_t *elen, uint32_t *offset, struct buffer_head **bh)
2027 uint64_t lbcount = 0, bcount = (uint64_t)block << inode->i_sb->s_blocksize_bits;
2028 int8_t etype;
2030 if (block < 0)
2032 printk(KERN_ERR "udf: inode_bmap: block < 0\n");
2033 return -1;
2035 if (!inode)
2037 printk(KERN_ERR "udf: inode_bmap: NULL inode\n");
2038 return -1;
2041 *extoffset = 0;
2042 *elen = 0;
2043 *bloc = UDF_I_LOCATION(inode);
2047 if ((etype = udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, 1)) == -1)
2049 *offset = bcount - lbcount;
2050 UDF_I_LENEXTENTS(inode) = lbcount;
2051 return -1;
2053 lbcount += *elen;
2054 } while (lbcount <= bcount);
2056 *offset = bcount + *elen - lbcount;
2058 return etype;
2061 long udf_block_map(struct inode *inode, long block)
2063 kernel_lb_addr eloc, bloc;
2064 uint32_t offset, extoffset, elen;
2065 struct buffer_head *bh = NULL;
2066 int ret;
2068 lock_kernel();
2070 if (inode_bmap(inode, block, &bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
2071 ret = udf_get_lb_pblock(inode->i_sb, eloc, offset >> inode->i_sb->s_blocksize_bits);
2072 else
2073 ret = 0;
2075 unlock_kernel();
2076 udf_release_data(bh);
2078 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2079 return udf_fixed_to_variable(ret);
2080 else
2081 return ret;