- pre5:
[davej-history.git] / fs / udf / inode.c
blob4219c505ff90b580e869942b96d2f9f689d94ad6
1 /*
2 * inode.c
4 * PURPOSE
5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
7 * CONTACTS
8 * E-mail regarding any portion of the Linux UDF file system should be
9 * directed to the development team mailing list (run by majordomo):
10 * linux_udf@hootie.lvld.hp.com
12 * COPYRIGHT
13 * This file is distributed under the terms of the GNU General Public
14 * License (GPL). Copies of the GPL can be obtained from:
15 * ftp://prep.ai.mit.edu/pub/gnu/GPL
16 * Each contributing author retains all rights to their own work.
18 * (C) 1998 Dave Boynton
19 * (C) 1998-2000 Ben Fennema
20 * (C) 1999-2000 Stelias Computing Inc
22 * HISTORY
24 * 10/04/98 dgb Added rudimentary directory functions
25 * 10/07/98 Fully working udf_block_map! It works!
26 * 11/25/98 bmap altered to better support extents
27 * 12/06/98 blf partition support in udf_iget, udf_block_map and udf_read_inode
28 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
29 * block boundaries (which is not actually allowed)
30 * 12/20/98 added support for strategy 4096
31 * 03/07/99 rewrote udf_block_map (again)
32 * New funcs, inode_bmap, udf_next_aext
33 * 04/19/99 Support for writing device EA's for major/minor #
36 #include "udfdecl.h"
37 #include <linux/locks.h>
38 #include <linux/mm.h>
39 #include <linux/smp_lock.h>
41 #include "udf_i.h"
42 #include "udf_sb.h"
44 #define EXTENT_MERGE_SIZE 5
46 static mode_t udf_convert_permissions(struct FileEntry *);
47 static int udf_update_inode(struct inode *, int);
48 static void udf_fill_inode(struct inode *, struct buffer_head *);
49 static struct buffer_head *inode_getblk(struct inode *, long, int *, long *, int *);
50 static void udf_split_extents(struct inode *, int *, int, int,
51 long_ad [EXTENT_MERGE_SIZE], int *);
52 static void udf_prealloc_extents(struct inode *, int, int,
53 long_ad [EXTENT_MERGE_SIZE], int *);
54 static void udf_merge_extents(struct inode *,
55 long_ad [EXTENT_MERGE_SIZE], int *);
56 static void udf_update_extents(struct inode *,
57 long_ad [EXTENT_MERGE_SIZE], int, int,
58 lb_addr, Uint32, struct buffer_head **);
59 static int udf_get_block(struct inode *, long, struct buffer_head *, int);
62 * udf_put_inode
64 * PURPOSE
66 * DESCRIPTION
67 * This routine is called whenever the kernel no longer needs the inode.
69 * HISTORY
70 * July 1, 1997 - Andrew E. Mileski
71 * Written, tested, and released.
73 * Called at each iput()
75 void udf_put_inode(struct inode * inode)
77 if (!(inode->i_sb->s_flags & MS_RDONLY))
79 lock_kernel();
80 udf_discard_prealloc(inode);
81 /* write the root inode on put, if dirty */
82 if (!inode->i_sb->s_root && inode->i_state & I_DIRTY)
83 udf_update_inode(inode, IS_SYNC(inode));
84 unlock_kernel();
89 * udf_delete_inode
91 * PURPOSE
92 * Clean-up before the specified inode is destroyed.
94 * DESCRIPTION
95 * This routine is called when the kernel destroys an inode structure
96 * ie. when iput() finds i_count == 0.
98 * HISTORY
99 * July 1, 1997 - Andrew E. Mileski
100 * Written, tested, and released.
102 * Called at the last iput() if i_nlink is zero.
104 void udf_delete_inode(struct inode * inode)
106 lock_kernel();
108 if (is_bad_inode(inode))
110 clear_inode(inode);
111 goto out;
114 inode->i_size = 0;
115 udf_truncate(inode);
116 udf_update_inode(inode, IS_SYNC(inode));
117 udf_free_inode(inode);
118 out:
119 unlock_kernel();
122 void udf_discard_prealloc(struct inode * inode)
124 if (inode->i_size && UDF_I_ALLOCTYPE(inode) != ICB_FLAG_AD_IN_ICB)
125 udf_trunc(inode);
128 static int udf_writepage(struct file *file, struct page *page)
130 return block_write_full_page(page, udf_get_block);
133 static int udf_readpage(struct file *file, struct page *page)
135 return block_read_full_page(page, udf_get_block);
138 static int udf_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
140 return block_prepare_write(page, from, to, udf_get_block);
143 static int udf_bmap(struct address_space *mapping, long block)
145 return generic_block_bmap(mapping,block,udf_get_block);
148 struct address_space_operations udf_aops = {
149 readpage: udf_readpage,
150 writepage: udf_writepage,
151 sync_page: block_sync_page,
152 prepare_write: udf_prepare_write,
153 commit_write: generic_commit_write,
154 bmap: udf_bmap,
157 void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
159 struct buffer_head *bh = NULL;
160 struct page *page;
161 unsigned long kaddr = 0;
162 int block;
164 /* from now on we have normal address_space methods */
165 inode->i_data.a_ops = &udf_aops;
167 if (!UDF_I_LENALLOC(inode))
169 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
170 UDF_I_ALLOCTYPE(inode) = ICB_FLAG_AD_SHORT;
171 else
172 UDF_I_ALLOCTYPE(inode) = ICB_FLAG_AD_LONG;
173 mark_inode_dirty(inode);
174 return;
177 block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
178 bh = udf_tread(inode->i_sb, block, inode->i_sb->s_blocksize);
179 if (!bh)
180 return;
181 page = grab_cache_page(inode->i_mapping, 0);
182 if (!PageLocked(page))
183 PAGE_BUG(page);
184 if (!Page_Uptodate(page))
186 kaddr = kmap(page);
187 memset((char *)kaddr + UDF_I_LENALLOC(inode), 0x00,
188 PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
189 memcpy((char *)kaddr, bh->b_data + udf_file_entry_alloc_offset(inode),
190 UDF_I_LENALLOC(inode));
191 flush_dcache_page(page);
192 SetPageUptodate(page);
193 kunmap(page);
195 memset(bh->b_data + udf_file_entry_alloc_offset(inode),
196 0, UDF_I_LENALLOC(inode));
197 UDF_I_LENALLOC(inode) = 0;
198 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
199 UDF_I_ALLOCTYPE(inode) = ICB_FLAG_AD_SHORT;
200 else
201 UDF_I_ALLOCTYPE(inode) = ICB_FLAG_AD_LONG;
202 inode->i_blocks = inode->i_sb->s_blocksize / 512;
203 mark_buffer_dirty(bh);
204 udf_release_data(bh);
206 inode->i_data.a_ops->writepage(NULL, page);
207 UnlockPage(page);
208 page_cache_release(page);
210 mark_inode_dirty(inode);
211 inode->i_version ++;
214 struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err)
216 int newblock;
217 struct buffer_head *sbh = NULL, *dbh = NULL;
218 lb_addr bloc, eloc;
219 Uint32 elen, extoffset;
221 struct udf_fileident_bh sfibh, dfibh;
222 loff_t f_pos = udf_ext0_offset(inode) >> 2;
223 int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
224 struct FileIdentDesc cfi, *sfi, *dfi;
226 if (!inode->i_size)
228 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
229 UDF_I_ALLOCTYPE(inode) = ICB_FLAG_AD_SHORT;
230 else
231 UDF_I_ALLOCTYPE(inode) = ICB_FLAG_AD_LONG;
232 mark_inode_dirty(inode);
233 return NULL;
236 /* alloc block, and copy data to it */
237 *block = udf_new_block(inode,
238 UDF_I_LOCATION(inode).partitionReferenceNum,
239 UDF_I_LOCATION(inode).logicalBlockNum, err);
241 if (!(*block))
242 return NULL;
243 newblock = udf_get_pblock(inode->i_sb, *block,
244 UDF_I_LOCATION(inode).partitionReferenceNum, 0);
245 if (!newblock)
246 return NULL;
247 sbh = udf_tread(inode->i_sb, inode->i_ino, inode->i_sb->s_blocksize);
248 if (!sbh)
249 return NULL;
250 dbh = udf_tread(inode->i_sb, newblock, inode->i_sb->s_blocksize);
251 if (!dbh)
252 return NULL;
254 sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
255 sfibh.sbh = sfibh.ebh = sbh;
256 dfibh.soffset = dfibh.eoffset = 0;
257 dfibh.sbh = dfibh.ebh = dbh;
258 while ( (f_pos < size) )
260 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL, NULL, NULL);
261 if (!sfi)
263 udf_release_data(sbh);
264 udf_release_data(dbh);
265 return NULL;
267 sfi->descTag.tagLocation = *block;
268 dfibh.soffset = dfibh.eoffset;
269 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
270 dfi = (struct FileIdentDesc *)(dbh->b_data + dfibh.soffset);
271 if (udf_write_fi(sfi, dfi, &dfibh, sfi->impUse,
272 sfi->fileIdent + sfi->lengthOfImpUse))
274 udf_release_data(sbh);
275 udf_release_data(dbh);
276 return NULL;
279 mark_buffer_dirty(dbh);
281 memset(sbh->b_data + udf_file_entry_alloc_offset(inode),
282 0, UDF_I_LENALLOC(inode));
284 UDF_I_LENALLOC(inode) = 0;
285 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
286 UDF_I_ALLOCTYPE(inode) = ICB_FLAG_AD_SHORT;
287 else
288 UDF_I_ALLOCTYPE(inode) = ICB_FLAG_AD_LONG;
289 bloc = UDF_I_LOCATION(inode);
290 eloc.logicalBlockNum = *block;
291 eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
292 elen = inode->i_size;
293 extoffset = udf_file_entry_alloc_offset(inode);
294 udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &sbh, 0);
295 /* UniqueID stuff */
297 inode->i_blocks = inode->i_sb->s_blocksize / 512;
298 mark_buffer_dirty(sbh);
299 udf_release_data(sbh);
300 mark_inode_dirty(inode);
301 inode->i_version ++;
302 return dbh;
305 static int udf_get_block(struct inode *inode, long block, struct buffer_head *bh_result, int create)
307 int err, new;
308 struct buffer_head *bh;
309 unsigned long phys;
311 if (!create)
313 phys = udf_block_map(inode, block);
314 if (phys)
316 bh_result->b_dev = inode->i_dev;
317 bh_result->b_blocknr = phys;
318 bh_result->b_state |= (1UL << BH_Mapped);
320 return 0;
323 err = -EIO;
324 new = 0;
325 bh = NULL;
327 lock_kernel();
329 if (block < 0)
330 goto abort_negative;
332 if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1)
334 UDF_I_NEXT_ALLOC_BLOCK(inode) ++;
335 UDF_I_NEXT_ALLOC_GOAL(inode) ++;
338 err = 0;
340 bh = inode_getblk(inode, block, &err, &phys, &new);
341 if (bh)
342 BUG();
343 if (err)
344 goto abort;
345 if (!phys)
346 BUG();
348 bh_result->b_dev = inode->i_dev;
349 bh_result->b_blocknr = phys;
350 bh_result->b_state |= (1UL << BH_Mapped);
351 if (new)
352 bh_result->b_state |= (1UL << BH_New);
353 abort:
354 unlock_kernel();
355 return err;
357 abort_negative:
358 udf_warning(inode->i_sb, "udf_get_block", "block < 0");
359 goto abort;
362 struct buffer_head * udf_getblk(struct inode * inode, long block,
363 int create, int * err)
365 struct buffer_head dummy;
366 int error;
368 dummy.b_state = 0;
369 dummy.b_blocknr = -1000;
370 error = udf_get_block(inode, block, &dummy, create);
371 *err = error;
372 if (!error & buffer_mapped(&dummy))
374 struct buffer_head *bh;
375 bh = getblk(dummy.b_dev, dummy.b_blocknr, inode->i_sb->s_blocksize);
376 if (buffer_new(&dummy))
378 if (!buffer_uptodate(bh))
379 wait_on_buffer(bh);
380 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
381 mark_buffer_uptodate(bh, 1);
382 mark_buffer_dirty(bh);
384 return bh;
386 return NULL;
389 static struct buffer_head * inode_getblk(struct inode * inode, long block,
390 int *err, long *phys, int *new)
392 struct buffer_head *pbh = NULL, *cbh = NULL, *result = NULL;
393 long_ad laarr[EXTENT_MERGE_SIZE];
394 Uint32 pextoffset = 0, cextoffset = 0, nextoffset = 0;
395 int count = 0, startnum = 0, endnum = 0;
396 Uint32 elen = 0;
397 lb_addr eloc, pbloc = UDF_I_LOCATION(inode), cbloc = UDF_I_LOCATION(inode);
398 int c = 1;
399 int lbcount = 0, b_off = 0, offset = 0;
400 Uint32 newblocknum, newblock;
401 int etype;
402 int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum;
403 char lastblock = 0;
405 pextoffset = cextoffset = nextoffset = udf_file_entry_alloc_offset(inode);
406 b_off = block << inode->i_sb->s_blocksize_bits;
407 pbloc = cbloc = UDF_I_LOCATION(inode);
409 /* find the extent which contains the block we are looking for.
410 alternate between laarr[0] and laarr[1] for locations of the
411 current extent, and the previous extent */
414 if (pbh != cbh)
416 udf_release_data(pbh);
417 pbh = cbh;
418 atomic_inc(&cbh->b_count);
419 pbloc = cbloc;
422 lbcount += elen;
424 pextoffset = cextoffset;
425 cextoffset = nextoffset;
427 if ((etype = udf_next_aext(inode, &cbloc, &nextoffset, &eloc, &elen, &cbh, 1)) == -1)
428 break;
430 c = !c;
432 laarr[c].extLength = (etype << 30) | elen;
433 laarr[c].extLocation = eloc;
435 if (etype != EXTENT_NOT_RECORDED_NOT_ALLOCATED)
436 pgoal = eloc.logicalBlockNum +
437 ((elen + inode->i_sb->s_blocksize - 1) >>
438 inode->i_sb->s_blocksize);
440 count ++;
441 } while (lbcount + elen <= b_off);
443 b_off -= lbcount;
444 offset = b_off >> inode->i_sb->s_blocksize_bits;
446 /* if the extent is allocated and recorded, return the block
447 if the extent is not a multiple of the blocksize, round up */
449 if (etype == EXTENT_RECORDED_ALLOCATED)
451 if (elen & (inode->i_sb->s_blocksize - 1))
453 elen = (EXTENT_RECORDED_ALLOCATED << 30) |
454 ((elen + inode->i_sb->s_blocksize - 1) &
455 ~(inode->i_sb->s_blocksize - 1));
456 etype = udf_write_aext(inode, cbloc, &cextoffset, eloc, elen, &cbh, 1);
458 udf_release_data(pbh);
459 udf_release_data(cbh);
460 newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
461 *phys = newblock;
462 return NULL;
465 if (etype == -1)
467 endnum = startnum = ((count > 1) ? 1 : count);
468 if (laarr[c].extLength & (inode->i_sb->s_blocksize - 1))
470 laarr[c].extLength =
471 (laarr[c].extLength & UDF_EXTENT_FLAG_MASK) |
472 (((laarr[c].extLength & UDF_EXTENT_LENGTH_MASK) +
473 inode->i_sb->s_blocksize - 1) &
474 ~(inode->i_sb->s_blocksize - 1));
476 c = !c;
477 laarr[c].extLength = (EXTENT_NOT_RECORDED_NOT_ALLOCATED << 30) |
478 ((offset + 1) << inode->i_sb->s_blocksize_bits);
479 memset(&laarr[c].extLocation, 0x00, sizeof(lb_addr));
480 count ++;
481 endnum ++;
482 lastblock = 1;
484 else
485 endnum = startnum = ((count > 2) ? 2 : count);
487 /* if the current extent is in position 0, swap it with the previous */
488 if (!c && count != 1)
490 laarr[2] = laarr[0];
491 laarr[0] = laarr[1];
492 laarr[1] = laarr[2];
493 c = 1;
496 /* if the current block is located in a extent, read the next extent */
497 if (etype != -1)
499 if ((etype = udf_next_aext(inode, &cbloc, &nextoffset, &eloc, &elen, &cbh, 0)) != -1)
501 laarr[c+1].extLength = (etype << 30) | elen;
502 laarr[c+1].extLocation = eloc;
503 count ++;
504 startnum ++;
505 endnum ++;
507 else
508 lastblock = 1;
510 udf_release_data(cbh);
512 /* if the current extent is not recorded but allocated, get the
513 block in the extent corresponding to the requested block */
514 if ((laarr[c].extLength >> 30) == EXTENT_NOT_RECORDED_ALLOCATED)
515 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
516 else /* otherwise, allocate a new block */
518 if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
519 goal = UDF_I_NEXT_ALLOC_GOAL(inode);
521 if (!goal)
523 if (!(goal = pgoal))
524 goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
527 if (!(newblocknum = udf_new_block(inode,
528 UDF_I_LOCATION(inode).partitionReferenceNum, goal, err)))
530 udf_release_data(pbh);
531 *err = -ENOSPC;
532 return NULL;
536 /* if the extent the requsted block is located in contains multiple blocks,
537 split the extent into at most three extents. blocks prior to requested
538 block, requested block, and blocks after requested block */
539 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
541 #ifdef UDF_PREALLOCATE
542 /* preallocate blocks */
543 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
544 #endif
546 /* merge any continuous blocks in laarr */
547 udf_merge_extents(inode, laarr, &endnum);
549 /* write back the new extents, inserting new extents if the new number
550 of extents is greater than the old number, and deleting extents if
551 the new number of extents is less than the old number */
552 udf_update_extents(inode, laarr, startnum, endnum, pbloc, pextoffset, &pbh);
554 udf_release_data(pbh);
556 if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
557 UDF_I_LOCATION(inode).partitionReferenceNum, 0)))
559 return NULL;
561 *phys = newblock;
562 *err = 0;
563 *new = 1;
564 UDF_I_NEXT_ALLOC_BLOCK(inode) = block;
565 UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum;
566 inode->i_ctime = CURRENT_TIME;
567 UDF_I_UCTIME(inode) = CURRENT_UTIME;
568 inode->i_blocks += inode->i_sb->s_blocksize / 512;
569 if (IS_SYNC(inode))
570 udf_sync_inode(inode);
571 else
572 mark_inode_dirty(inode);
573 return result;
576 static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum,
577 long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
579 if ((laarr[*c].extLength >> 30) == EXTENT_NOT_RECORDED_ALLOCATED ||
580 (laarr[*c].extLength >> 30) == EXTENT_NOT_RECORDED_NOT_ALLOCATED)
582 int curr = *c;
583 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
584 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
585 int type = laarr[curr].extLength & ~UDF_EXTENT_LENGTH_MASK;
587 if (blen == 1)
589 else if (!offset || blen == offset + 1)
591 laarr[curr+2] = laarr[curr+1];
592 laarr[curr+1] = laarr[curr];
594 else
596 laarr[curr+3] = laarr[curr+1];
597 laarr[curr+2] = laarr[curr+1] = laarr[curr];
600 if (offset)
602 laarr[curr].extLength = type |
603 (offset << inode->i_sb->s_blocksize_bits);
604 curr ++;
605 (*c) ++;
606 (*endnum) ++;
609 laarr[curr].extLocation.logicalBlockNum = newblocknum;
610 if ((type >> 30) == EXTENT_NOT_RECORDED_NOT_ALLOCATED)
611 laarr[curr].extLocation.partitionReferenceNum =
612 UDF_I_LOCATION(inode).partitionReferenceNum;
613 laarr[curr].extLength = (EXTENT_RECORDED_ALLOCATED << 30) |
614 inode->i_sb->s_blocksize;
615 curr ++;
617 if (blen != offset + 1)
619 if ((type >> 30) == EXTENT_NOT_RECORDED_ALLOCATED)
620 laarr[curr].extLocation.logicalBlockNum += (offset + 1);
621 laarr[curr].extLength = type |
622 ((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
623 curr ++;
624 (*endnum) ++;
629 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
630 long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
632 int start, length = 0, currlength = 0, i;
634 if (*endnum >= (c+1) && !lastblock)
635 return;
637 if ((laarr[c+1].extLength >> 30) == EXTENT_NOT_RECORDED_ALLOCATED)
639 start = c+1;
640 length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) +
641 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
643 else
644 start = c;
646 for (i=start+1; i<=*endnum; i++)
648 if (i == *endnum)
650 if (lastblock)
651 length += UDF_DEFAULT_PREALLOC_BLOCKS;
653 else if ((laarr[i].extLength >> 30) == EXTENT_NOT_RECORDED_NOT_ALLOCATED)
654 length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
655 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
656 else
657 break;
660 if (length)
662 int next = laarr[start].extLocation.logicalBlockNum +
663 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
664 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
665 int numalloc = udf_prealloc_blocks(inode,
666 laarr[start].extLocation.partitionReferenceNum,
667 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
668 UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
670 if (numalloc)
672 if (start == (c+1))
673 laarr[start].extLength +=
674 (numalloc << inode->i_sb->s_blocksize_bits);
675 else
677 memmove(&laarr[c+2], &laarr[c+1],
678 sizeof(long_ad) * (*endnum - (c+1)));
679 (*endnum) ++;
680 laarr[c+1].extLocation.logicalBlockNum = next;
681 laarr[c+1].extLocation.partitionReferenceNum =
682 laarr[c].extLocation.partitionReferenceNum;
683 laarr[c+1].extLength = (EXTENT_NOT_RECORDED_ALLOCATED << 30) |
684 (numalloc << inode->i_sb->s_blocksize_bits);
685 start = c+1;
688 for (i=start+1; numalloc && i<*endnum; i++)
690 int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
691 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
693 if (elen > numalloc)
695 laarr[c+1].extLength -=
696 (numalloc << inode->i_sb->s_blocksize_bits);
697 numalloc = 0;
699 else
701 numalloc -= elen;
702 if (*endnum > (i+1))
703 memmove(&laarr[i], &laarr[i+1],
704 sizeof(long_ad) * (*endnum - (i+1)));
705 i --;
706 (*endnum) --;
713 static void udf_merge_extents(struct inode *inode,
714 long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
716 int i;
718 for (i=0; i<(*endnum-1); i++)
720 if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30))
722 if (((laarr[i].extLength >> 30) == EXTENT_NOT_RECORDED_NOT_ALLOCATED) ||
723 ((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
724 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
725 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits)))
727 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
728 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
729 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
731 laarr[i+1].extLength = (laarr[i+1].extLength -
732 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
733 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
734 laarr[i].extLength = (UDF_EXTENT_LENGTH_MASK + 1) -
735 inode->i_sb->s_blocksize;
736 laarr[i+1].extLocation.logicalBlockNum =
737 laarr[i].extLocation.logicalBlockNum +
738 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
739 inode->i_sb->s_blocksize_bits);
741 else
743 laarr[i].extLength = laarr[i+1].extLength +
744 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
745 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
746 if (*endnum > (i+2))
747 memmove(&laarr[i+1], &laarr[i+2],
748 sizeof(long_ad) * (*endnum - (i+2)));
749 i --;
750 (*endnum) --;
757 static void udf_update_extents(struct inode *inode,
758 long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum,
759 lb_addr pbloc, Uint32 pextoffset, struct buffer_head **pbh)
761 int start = 0, i;
762 lb_addr tmploc;
763 Uint32 tmplen;
765 if (startnum > endnum)
767 for (i=0; i<(startnum-endnum); i++)
769 udf_delete_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
770 laarr[i].extLength, *pbh);
773 else if (startnum < endnum)
775 for (i=0; i<(endnum-startnum); i++)
777 udf_insert_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
778 laarr[i].extLength, *pbh);
779 udf_next_aext(inode, &pbloc, &pextoffset, &laarr[i].extLocation,
780 &laarr[i].extLength, pbh, 1);
781 start ++;
785 for (i=start; i<endnum; i++)
787 udf_next_aext(inode, &pbloc, &pextoffset, &tmploc, &tmplen, pbh, 0);
788 udf_write_aext(inode, pbloc, &pextoffset, laarr[i].extLocation,
789 laarr[i].extLength, pbh, 1);
793 struct buffer_head * udf_bread(struct inode * inode, int block,
794 int create, int * err)
796 struct buffer_head * bh = NULL;
797 int prev_blocks;
799 prev_blocks = inode->i_blocks;
801 bh = udf_getblk(inode, block, create, err);
802 if (!bh)
803 return NULL;
805 #if 0
806 if (create &&
807 S_ISDIR(inode->i_mode) &&
808 inode->i_blocks > prev_blocks)
810 int i;
811 struct buffer_head *tmp_bh = NULL;
813 for (i=1;
814 i < UDF_DEFAULT_PREALLOC_DIR_BLOCKS;
815 i++)
817 tmp_bh = udf_getblk(inode, block+i, create, err);
818 if (!tmp_bh)
820 udf_release_data(bh);
821 return 0;
823 udf_release_data(tmp_bh);
826 #endif
828 if (buffer_uptodate(bh))
829 return bh;
830 ll_rw_block(READ, 1, &bh);
831 wait_on_buffer(bh);
832 if (buffer_uptodate(bh))
833 return bh;
834 brelse(bh);
835 *err = -EIO;
836 return NULL;
840 * udf_read_inode
842 * PURPOSE
843 * Read an inode.
845 * DESCRIPTION
846 * This routine is called by iget() [which is called by udf_iget()]
847 * (clean_inode() will have been called first)
848 * when an inode is first read into memory.
850 * HISTORY
851 * July 1, 1997 - Andrew E. Mileski
852 * Written, tested, and released.
854 * 12/19/98 dgb Updated to fix size problems.
857 void
858 udf_read_inode(struct inode *inode)
860 memset(&UDF_I_LOCATION(inode), 0xFF, sizeof(lb_addr));
863 void
864 __udf_read_inode(struct inode *inode)
866 struct buffer_head *bh = NULL;
867 struct FileEntry *fe;
868 Uint16 ident;
871 * Set defaults, but the inode is still incomplete!
872 * Note: get_new_inode() sets the following on a new inode:
873 * i_sb = sb
874 * i_dev = sb->s_dev;
875 * i_no = ino
876 * i_flags = sb->s_flags
877 * i_state = 0
878 * clean_inode(): zero fills and sets
879 * i_count = 1
880 * i_nlink = 1
881 * i_op = NULL;
884 inode->i_blksize = PAGE_SIZE;
886 bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
888 if (!bh)
890 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
891 inode->i_ino);
892 make_bad_inode(inode);
893 return;
896 if (ident != TID_FILE_ENTRY && ident != TID_EXTENDED_FILE_ENTRY)
898 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
899 inode->i_ino, ident);
900 udf_release_data(bh);
901 make_bad_inode(inode);
902 return;
905 fe = (struct FileEntry *)bh->b_data;
907 if (le16_to_cpu(fe->icbTag.strategyType) == 4096)
909 struct buffer_head *ibh = NULL, *nbh = NULL;
910 struct IndirectEntry *ie;
912 ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
913 if (ident == TID_INDIRECT_ENTRY)
915 if (ibh)
917 lb_addr loc;
918 ie = (struct IndirectEntry *)ibh->b_data;
920 loc = lelb_to_cpu(ie->indirectICB.extLocation);
922 if (ie->indirectICB.extLength &&
923 (nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident)))
925 if (ident == TID_FILE_ENTRY ||
926 ident == TID_EXTENDED_FILE_ENTRY)
928 memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(lb_addr));
929 udf_release_data(bh);
930 udf_release_data(ibh);
931 udf_release_data(nbh);
932 __udf_read_inode(inode);
933 return;
935 else
937 udf_release_data(nbh);
938 udf_release_data(ibh);
941 else
942 udf_release_data(ibh);
945 else
946 udf_release_data(ibh);
948 else if (le16_to_cpu(fe->icbTag.strategyType) != 4)
950 printk(KERN_ERR "udf: unsupported strategy type: %d\n",
951 le16_to_cpu(fe->icbTag.strategyType));
952 udf_release_data(bh);
953 make_bad_inode(inode);
954 return;
956 udf_fill_inode(inode, bh);
957 udf_release_data(bh);
960 static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
962 struct FileEntry *fe;
963 struct ExtendedFileEntry *efe;
964 time_t convtime;
965 long convtime_usec;
966 int offset, alen;
968 inode->i_version = ++event;
969 UDF_I_NEW_INODE(inode) = 0;
971 fe = (struct FileEntry *)bh->b_data;
972 efe = (struct ExtendedFileEntry *)bh->b_data;
974 if (fe->descTag.tagIdent == TID_EXTENDED_FILE_ENTRY)
975 UDF_I_EXTENDED_FE(inode) = 1;
976 else /* fe->descTag.tagIdent == TID_FILE_ENTRY */
977 UDF_I_EXTENDED_FE(inode) = 0;
979 if (le16_to_cpu(fe->icbTag.strategyType) == 4)
980 UDF_I_STRAT4096(inode) = 0;
981 else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
982 UDF_I_STRAT4096(inode) = 1;
984 inode->i_uid = le32_to_cpu(fe->uid);
985 if ( inode->i_uid == -1 ) inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
987 inode->i_gid = le32_to_cpu(fe->gid);
988 if ( inode->i_gid == -1 ) inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
990 inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
991 if (!inode->i_nlink)
992 inode->i_nlink = 1;
994 inode->i_size = le64_to_cpu(fe->informationLength);
996 inode->i_mode = udf_convert_permissions(fe);
997 inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
999 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
1000 UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
1002 UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICB_FLAG_ALLOC_MASK;
1004 if (UDF_I_EXTENDED_FE(inode) == 0)
1006 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1007 (inode->i_sb->s_blocksize_bits - 9);
1009 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1010 lets_to_cpu(fe->modificationTime)) )
1012 inode->i_mtime = convtime;
1013 UDF_I_UMTIME(inode) = convtime_usec;
1014 inode->i_ctime = convtime;
1015 UDF_I_UCTIME(inode) = convtime_usec;
1017 else
1019 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1020 UDF_I_UMTIME(inode) = 0;
1021 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1022 UDF_I_UCTIME(inode) = 0;
1025 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1026 lets_to_cpu(fe->accessTime)) )
1028 inode->i_atime = convtime;
1029 UDF_I_UATIME(inode) = convtime_usec;
1031 else
1033 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1034 UDF_I_UATIME(inode) = convtime_usec;
1037 UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
1038 UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
1039 UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
1040 offset = sizeof(struct FileEntry) + UDF_I_LENEATTR(inode);
1041 alen = offset + UDF_I_LENALLOC(inode);
1043 else
1045 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1046 (inode->i_sb->s_blocksize_bits - 9);
1048 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1049 lets_to_cpu(efe->modificationTime)) )
1051 inode->i_mtime = convtime;
1052 UDF_I_UMTIME(inode) = convtime_usec;
1054 else
1056 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1057 UDF_I_UMTIME(inode) = 0;
1060 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1061 lets_to_cpu(efe->accessTime)) )
1063 inode->i_atime = convtime;
1064 UDF_I_UATIME(inode) = convtime_usec;
1066 else
1068 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1069 UDF_I_UATIME(inode) = 0;
1072 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1073 lets_to_cpu(efe->createTime)) )
1075 inode->i_ctime = convtime;
1076 UDF_I_UCTIME(inode) = convtime_usec;
1078 else
1080 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1081 UDF_I_UCTIME(inode) = 0;
1084 UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
1085 UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
1086 UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
1087 offset = sizeof(struct ExtendedFileEntry) + UDF_I_LENEATTR(inode);
1088 alen = offset + UDF_I_LENALLOC(inode);
1091 switch (fe->icbTag.fileType)
1093 case FILE_TYPE_DIRECTORY:
1095 inode->i_op = &udf_dir_inode_operations;
1096 inode->i_fop = &udf_dir_operations;
1097 inode->i_mode |= S_IFDIR;
1098 inode->i_nlink ++;
1099 break;
1101 case FILE_TYPE_REGULAR:
1102 case FILE_TYPE_NONE:
1104 if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_IN_ICB)
1105 inode->i_data.a_ops = &udf_adinicb_aops;
1106 else
1107 inode->i_data.a_ops = &udf_aops;
1108 inode->i_op = &udf_file_inode_operations;
1109 inode->i_fop = &udf_file_operations;
1110 inode->i_mode |= S_IFREG;
1111 break;
1113 case FILE_TYPE_BLOCK:
1115 inode->i_mode |= S_IFBLK;
1116 break;
1118 case FILE_TYPE_CHAR:
1120 inode->i_mode |= S_IFCHR;
1121 break;
1123 case FILE_TYPE_FIFO:
1125 init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1126 break;
1128 case FILE_TYPE_SYMLINK:
1130 inode->i_data.a_ops = &udf_symlink_aops;
1131 inode->i_op = &page_symlink_inode_operations;
1132 inode->i_mode = S_IFLNK|S_IRWXUGO;
1133 break;
1135 default:
1137 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
1138 inode->i_ino, fe->icbTag.fileType);
1139 make_bad_inode(inode);
1140 return;
1143 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1145 struct buffer_head *tbh = NULL;
1146 struct DeviceSpecificationExtendedAttr *dsea =
1147 (struct DeviceSpecificationExtendedAttr *)
1148 udf_get_extendedattr(inode, 12, 1, &tbh);
1150 if (dsea)
1152 init_special_inode(inode, inode->i_mode,
1153 ((le32_to_cpu(dsea->majorDeviceIdent)) << 8) |
1154 (le32_to_cpu(dsea->minorDeviceIdent) & 0xFF));
1155 /* Developer ID ??? */
1156 udf_release_data(tbh);
1158 else
1160 make_bad_inode(inode);
1165 static mode_t
1166 udf_convert_permissions(struct FileEntry *fe)
1168 mode_t mode;
1169 Uint32 permissions;
1170 Uint32 flags;
1172 permissions = le32_to_cpu(fe->permissions);
1173 flags = le16_to_cpu(fe->icbTag.flags);
1175 mode = (( permissions ) & S_IRWXO) |
1176 (( permissions >> 2 ) & S_IRWXG) |
1177 (( permissions >> 4 ) & S_IRWXU) |
1178 (( flags & ICB_FLAG_SETUID) ? S_ISUID : 0) |
1179 (( flags & ICB_FLAG_SETGID) ? S_ISGID : 0) |
1180 (( flags & ICB_FLAG_STICKY) ? S_ISVTX : 0);
1182 return mode;
1186 * udf_write_inode
1188 * PURPOSE
1189 * Write out the specified inode.
1191 * DESCRIPTION
1192 * This routine is called whenever an inode is synced.
1193 * Currently this routine is just a placeholder.
1195 * HISTORY
1196 * July 1, 1997 - Andrew E. Mileski
1197 * Written, tested, and released.
1200 void udf_write_inode(struct inode * inode, int sync)
1202 lock_kernel();
1203 udf_update_inode(inode, sync);
1204 unlock_kernel();
1207 int udf_sync_inode(struct inode * inode)
1209 return udf_update_inode(inode, 1);
1212 static int
1213 udf_update_inode(struct inode *inode, int do_sync)
1215 struct buffer_head *bh = NULL;
1216 struct FileEntry *fe;
1217 struct ExtendedFileEntry *efe;
1218 Uint32 udfperms;
1219 Uint16 icbflags;
1220 Uint16 crclen;
1221 int i;
1222 timestamp cpu_time;
1223 int err = 0;
1225 bh = udf_tread(inode->i_sb,
1226 udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0),
1227 inode->i_sb->s_blocksize);
1228 if (!bh)
1230 udf_debug("bread failure\n");
1231 return -EIO;
1233 fe = (struct FileEntry *)bh->b_data;
1234 efe = (struct ExtendedFileEntry *)bh->b_data;
1235 if (UDF_I_NEW_INODE(inode) == 1)
1237 if (UDF_I_EXTENDED_FE(inode) == 0)
1238 memset(bh->b_data, 0x0, sizeof(struct FileEntry));
1239 else
1240 memset(bh->b_data, 0x00, sizeof(struct ExtendedFileEntry));
1241 memset(bh->b_data + udf_file_entry_alloc_offset(inode) +
1242 UDF_I_LENALLOC(inode), 0x0, inode->i_sb->s_blocksize -
1243 udf_file_entry_alloc_offset(inode) - UDF_I_LENALLOC(inode));
1244 UDF_I_NEW_INODE(inode) = 0;
1247 if (inode->i_uid != UDF_SB(inode->i_sb)->s_uid)
1248 fe->uid = cpu_to_le32(inode->i_uid);
1250 if (inode->i_gid != UDF_SB(inode->i_sb)->s_gid)
1251 fe->gid = cpu_to_le32(inode->i_gid);
1253 udfperms = ((inode->i_mode & S_IRWXO) ) |
1254 ((inode->i_mode & S_IRWXG) << 2) |
1255 ((inode->i_mode & S_IRWXU) << 4);
1257 udfperms |= (le32_to_cpu(fe->permissions) &
1258 (PERM_O_DELETE | PERM_O_CHATTR |
1259 PERM_G_DELETE | PERM_G_CHATTR |
1260 PERM_U_DELETE | PERM_U_CHATTR));
1261 fe->permissions = cpu_to_le32(udfperms);
1263 if (S_ISDIR(inode->i_mode))
1264 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1265 else
1266 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1268 fe->informationLength = cpu_to_le64(inode->i_size);
1270 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1272 EntityID *eid;
1273 struct buffer_head *tbh = NULL;
1274 struct DeviceSpecificationExtendedAttr *dsea =
1275 (struct DeviceSpecificationExtendedAttr *)
1276 udf_get_extendedattr(inode, 12, 1, &tbh);
1278 if (!dsea)
1280 dsea = (struct DeviceSpecificationExtendedAttr *)
1281 udf_add_extendedattr(inode,
1282 sizeof(struct DeviceSpecificationExtendedAttr) +
1283 sizeof(EntityID), 12, 0x3, &tbh);
1284 dsea->attrType = 12;
1285 dsea->attrSubtype = 1;
1286 dsea->attrLength = sizeof(struct DeviceSpecificationExtendedAttr) +
1287 sizeof(EntityID);
1288 dsea->impUseLength = sizeof(EntityID);
1290 eid = (EntityID *)dsea->impUse;
1291 memset(eid, 0, sizeof(EntityID));
1292 strcpy(eid->ident, UDF_ID_DEVELOPER);
1293 eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1294 eid->identSuffix[1] = UDF_OS_ID_LINUX;
1295 dsea->majorDeviceIdent = kdev_t_to_nr(inode->i_rdev) >> 8;
1296 dsea->minorDeviceIdent = kdev_t_to_nr(inode->i_rdev) & 0xFF;
1297 mark_buffer_dirty(tbh);
1298 udf_release_data(tbh);
1301 if (UDF_I_EXTENDED_FE(inode) == 0)
1303 fe->logicalBlocksRecorded = cpu_to_le64(
1304 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1305 (inode->i_sb->s_blocksize_bits - 9));
1307 if (udf_time_to_stamp(&cpu_time, inode->i_atime, UDF_I_UATIME(inode)))
1308 fe->accessTime = cpu_to_lets(cpu_time);
1309 if (udf_time_to_stamp(&cpu_time, inode->i_mtime, UDF_I_UMTIME(inode)))
1310 fe->modificationTime = cpu_to_lets(cpu_time);
1311 memset(&(fe->impIdent), 0, sizeof(EntityID));
1312 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1313 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1314 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1315 fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1316 fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1317 fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1318 fe->descTag.tagIdent = le16_to_cpu(TID_FILE_ENTRY);
1319 crclen = sizeof(struct FileEntry);
1321 else
1323 efe->logicalBlocksRecorded = cpu_to_le64(
1324 (inode->i_blocks + (2 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1325 (inode->i_sb->s_blocksize_bits - 9));
1327 if (udf_time_to_stamp(&cpu_time, inode->i_atime, UDF_I_UATIME(inode)))
1328 efe->accessTime = cpu_to_lets(cpu_time);
1329 if (udf_time_to_stamp(&cpu_time, inode->i_mtime, UDF_I_UMTIME(inode)))
1330 efe->modificationTime = cpu_to_lets(cpu_time);
1331 if (udf_time_to_stamp(&cpu_time, inode->i_ctime, UDF_I_UCTIME(inode)))
1332 efe->createTime = cpu_to_lets(cpu_time);
1333 memset(&(efe->impIdent), 0, sizeof(EntityID));
1334 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1335 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1336 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1337 efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1338 efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1339 efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1340 efe->descTag.tagIdent = le16_to_cpu(TID_EXTENDED_FILE_ENTRY);
1341 crclen = sizeof(struct ExtendedFileEntry);
1343 if (UDF_I_STRAT4096(inode))
1345 fe->icbTag.strategyType = cpu_to_le16(4096);
1346 fe->icbTag.strategyParameter = cpu_to_le16(1);
1347 fe->icbTag.numEntries = cpu_to_le16(2);
1349 else
1351 fe->icbTag.strategyType = cpu_to_le16(4);
1352 fe->icbTag.numEntries = cpu_to_le16(1);
1355 if (S_ISDIR(inode->i_mode))
1356 fe->icbTag.fileType = FILE_TYPE_DIRECTORY;
1357 else if (S_ISREG(inode->i_mode))
1358 fe->icbTag.fileType = FILE_TYPE_REGULAR;
1359 else if (S_ISLNK(inode->i_mode))
1360 fe->icbTag.fileType = FILE_TYPE_SYMLINK;
1361 else if (S_ISBLK(inode->i_mode))
1362 fe->icbTag.fileType = FILE_TYPE_BLOCK;
1363 else if (S_ISCHR(inode->i_mode))
1364 fe->icbTag.fileType = FILE_TYPE_CHAR;
1365 else if (S_ISFIFO(inode->i_mode))
1366 fe->icbTag.fileType = FILE_TYPE_FIFO;
1368 icbflags = UDF_I_ALLOCTYPE(inode) |
1369 ((inode->i_mode & S_ISUID) ? ICB_FLAG_SETUID : 0) |
1370 ((inode->i_mode & S_ISGID) ? ICB_FLAG_SETGID : 0) |
1371 ((inode->i_mode & S_ISVTX) ? ICB_FLAG_STICKY : 0) |
1372 (le16_to_cpu(fe->icbTag.flags) &
1373 ~(ICB_FLAG_ALLOC_MASK | ICB_FLAG_SETUID |
1374 ICB_FLAG_SETGID | ICB_FLAG_STICKY));
1376 fe->icbTag.flags = cpu_to_le16(icbflags);
1377 fe->descTag.descVersion = cpu_to_le16(2);
1378 fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
1379 fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1380 crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
1381 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1382 fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
1384 fe->descTag.tagChecksum = 0;
1385 for (i=0; i<16; i++)
1386 if (i != 4)
1387 fe->descTag.tagChecksum += ((Uint8 *)&(fe->descTag))[i];
1389 /* write the data blocks */
1390 mark_buffer_dirty(bh);
1391 if (do_sync)
1393 ll_rw_block(WRITE, 1, &bh);
1394 wait_on_buffer(bh);
1395 if (buffer_req(bh) && !buffer_uptodate(bh))
1397 printk("IO error syncing udf inode [%s:%08lx]\n",
1398 bdevname(inode->i_dev), inode->i_ino);
1399 err = -EIO;
1402 udf_release_data(bh);
1403 return err;
1407 * udf_iget
1409 * PURPOSE
1410 * Get an inode.
1412 * DESCRIPTION
1413 * This routine replaces iget() and read_inode().
1415 * HISTORY
1416 * October 3, 1997 - Andrew E. Mileski
1417 * Written, tested, and released.
1419 * 12/19/98 dgb Added semaphore and changed to be a wrapper of iget
1421 struct inode *
1422 udf_iget(struct super_block *sb, lb_addr ino)
1424 struct inode *inode;
1425 unsigned long block;
1427 block = udf_get_lb_pblock(sb, ino, 0);
1429 /* Get the inode */
1431 inode = iget(sb, block);
1432 /* calls udf_read_inode() ! */
1434 if (!inode)
1436 printk(KERN_ERR "udf: iget() failed\n");
1437 return NULL;
1439 else if (is_bad_inode(inode))
1441 iput(inode);
1442 return NULL;
1444 else if (UDF_I_LOCATION(inode).logicalBlockNum == 0xFFFFFFFF &&
1445 UDF_I_LOCATION(inode).partitionReferenceNum == 0xFFFF)
1447 memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(lb_addr));
1448 __udf_read_inode(inode);
1451 if ( ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum) )
1453 udf_debug("block=%d, partition=%d out of range\n",
1454 ino.logicalBlockNum, ino.partitionReferenceNum);
1455 return NULL;
1458 return inode;
1461 int udf_add_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
1462 lb_addr eloc, Uint32 elen, struct buffer_head **bh, int inc)
1464 int adsize;
1465 short_ad *sad = NULL;
1466 long_ad *lad = NULL;
1467 struct AllocExtDesc *aed;
1468 int ret;
1470 if (!(*bh))
1472 if (!(*bh = udf_tread(inode->i_sb,
1473 udf_get_lb_pblock(inode->i_sb, *bloc, 0),
1474 inode->i_sb->s_blocksize)))
1476 udf_debug("reading block %d failed!\n",
1477 udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1478 return -1;
1482 if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_SHORT)
1483 adsize = sizeof(short_ad);
1484 else if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_LONG)
1485 adsize = sizeof(long_ad);
1486 else
1487 return -1;
1489 if (*extoffset + (2 * adsize) > inode->i_sb->s_blocksize)
1491 char *sptr, *dptr;
1492 struct buffer_head *nbh;
1493 int err, loffset;
1494 lb_addr obloc = *bloc;
1496 if (!(bloc->logicalBlockNum = udf_new_block(inode,
1497 obloc.partitionReferenceNum, obloc.logicalBlockNum, &err)))
1499 return -1;
1501 if (!(nbh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1502 *bloc, 0), inode->i_sb->s_blocksize)))
1504 return -1;
1506 aed = (struct AllocExtDesc *)(nbh->b_data);
1507 aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
1508 if (*extoffset + adsize > inode->i_sb->s_blocksize)
1510 loffset = *extoffset;
1511 aed->lengthAllocDescs = cpu_to_le32(adsize);
1512 sptr = (*bh)->b_data + *extoffset - adsize;
1513 dptr = nbh->b_data + sizeof(struct AllocExtDesc);
1514 memcpy(dptr, sptr, adsize);
1515 *extoffset = sizeof(struct AllocExtDesc) + adsize;
1517 else
1519 loffset = *extoffset + adsize;
1520 aed->lengthAllocDescs = cpu_to_le32(0);
1521 sptr = (*bh)->b_data + *extoffset;
1522 *extoffset = sizeof(struct AllocExtDesc);
1524 if (memcmp(&UDF_I_LOCATION(inode), &obloc, sizeof(lb_addr)))
1526 aed = (struct AllocExtDesc *)(*bh)->b_data;
1527 aed->lengthAllocDescs =
1528 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1530 else
1532 UDF_I_LENALLOC(inode) += adsize;
1533 mark_inode_dirty(inode);
1536 udf_new_tag(nbh->b_data, TID_ALLOC_EXTENT_DESC, 2, 1,
1537 bloc->logicalBlockNum, sizeof(tag));
1538 switch (UDF_I_ALLOCTYPE(inode))
1540 case ICB_FLAG_AD_SHORT:
1542 sad = (short_ad *)sptr;
1543 sad->extLength = cpu_to_le32(
1544 EXTENT_NEXT_EXTENT_ALLOCDECS << 30 |
1545 inode->i_sb->s_blocksize);
1546 sad->extPosition = cpu_to_le32(bloc->logicalBlockNum);
1547 break;
1549 case ICB_FLAG_AD_LONG:
1551 lad = (long_ad *)sptr;
1552 lad->extLength = cpu_to_le32(
1553 EXTENT_NEXT_EXTENT_ALLOCDECS << 30 |
1554 inode->i_sb->s_blocksize);
1555 lad->extLocation = cpu_to_lelb(*bloc);
1556 break;
1559 udf_update_tag((*bh)->b_data, loffset);
1560 mark_buffer_dirty(*bh);
1561 udf_release_data(*bh);
1562 *bh = nbh;
1565 ret = udf_write_aext(inode, *bloc, extoffset, eloc, elen, bh, inc);
1567 if (!memcmp(&UDF_I_LOCATION(inode), bloc, sizeof(lb_addr)))
1569 UDF_I_LENALLOC(inode) += adsize;
1570 mark_inode_dirty(inode);
1572 else
1574 aed = (struct AllocExtDesc *)(*bh)->b_data;
1575 aed->lengthAllocDescs =
1576 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1577 udf_update_tag((*bh)->b_data, *extoffset + (inc ? 0 : adsize));
1578 mark_buffer_dirty(*bh);
1581 return ret;
1584 int udf_write_aext(struct inode *inode, lb_addr bloc, int *extoffset,
1585 lb_addr eloc, Uint32 elen, struct buffer_head **bh, int inc)
1587 int adsize;
1588 short_ad *sad = NULL;
1589 long_ad *lad = NULL;
1591 if (!(*bh))
1593 if (!(*bh = udf_tread(inode->i_sb,
1594 udf_get_lb_pblock(inode->i_sb, bloc, 0),
1595 inode->i_sb->s_blocksize)))
1597 udf_debug("reading block %d failed!\n",
1598 udf_get_lb_pblock(inode->i_sb, bloc, 0));
1599 return -1;
1603 if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_SHORT)
1604 adsize = sizeof(short_ad);
1605 else if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_LONG)
1606 adsize = sizeof(long_ad);
1607 else
1608 return -1;
1610 switch (UDF_I_ALLOCTYPE(inode))
1612 case ICB_FLAG_AD_SHORT:
1614 sad = (short_ad *)((*bh)->b_data + *extoffset);
1615 sad->extLength = cpu_to_le32(elen);
1616 sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
1617 break;
1619 case ICB_FLAG_AD_LONG:
1621 lad = (long_ad *)((*bh)->b_data + *extoffset);
1622 lad->extLength = cpu_to_le32(elen);
1623 lad->extLocation = cpu_to_lelb(eloc);
1624 break;
1628 if (memcmp(&UDF_I_LOCATION(inode), &bloc, sizeof(lb_addr)))
1630 struct AllocExtDesc *aed = (struct AllocExtDesc *)(*bh)->b_data;
1631 udf_update_tag((*bh)->b_data,
1632 le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct AllocExtDesc));
1634 else
1635 mark_inode_dirty(inode);
1637 mark_buffer_dirty(*bh);
1639 if (inc)
1640 *extoffset += adsize;
1641 return (elen >> 30);
1644 int udf_next_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
1645 lb_addr *eloc, Uint32 *elen, struct buffer_head **bh, int inc)
1647 int pos, alen;
1648 Uint8 etype;
1650 if (!(*bh))
1652 if (!(*bh = udf_tread(inode->i_sb,
1653 udf_get_lb_pblock(inode->i_sb, *bloc, 0),
1654 inode->i_sb->s_blocksize)))
1656 udf_debug("reading block %d failed!\n",
1657 udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1658 return -1;
1662 if (!memcmp(&UDF_I_LOCATION(inode), bloc, sizeof(lb_addr)))
1664 pos = udf_file_entry_alloc_offset(inode);
1665 alen = UDF_I_LENALLOC(inode) + pos;
1667 else
1669 struct AllocExtDesc *aed = (struct AllocExtDesc *)(*bh)->b_data;
1671 pos = sizeof(struct AllocExtDesc);
1672 alen = le32_to_cpu(aed->lengthAllocDescs) + pos;
1675 if (!(*extoffset))
1676 *extoffset = pos;
1678 switch (UDF_I_ALLOCTYPE(inode))
1680 case ICB_FLAG_AD_SHORT:
1682 short_ad *sad;
1684 if (!(sad = udf_get_fileshortad((*bh)->b_data, alen, extoffset, inc)))
1685 return -1;
1687 if ((etype = le32_to_cpu(sad->extLength) >> 30) == EXTENT_NEXT_EXTENT_ALLOCDECS)
1689 bloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1690 *extoffset = 0;
1691 udf_release_data(*bh);
1692 *bh = NULL;
1693 return udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, inc);
1695 else
1697 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1698 eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1699 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1701 break;
1703 case ICB_FLAG_AD_LONG:
1705 long_ad *lad;
1707 if (!(lad = udf_get_filelongad((*bh)->b_data, alen, extoffset, inc)))
1708 return -1;
1710 if ((etype = le32_to_cpu(lad->extLength) >> 30) == EXTENT_NEXT_EXTENT_ALLOCDECS)
1712 *bloc = lelb_to_cpu(lad->extLocation);
1713 *extoffset = 0;
1714 udf_release_data(*bh);
1715 *bh = NULL;
1716 return udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, inc);
1718 else
1720 *eloc = lelb_to_cpu(lad->extLocation);
1721 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1723 break;
1725 case ICB_FLAG_AD_IN_ICB:
1727 if (UDF_I_LENALLOC(inode) == 0)
1728 return -1;
1729 etype = EXTENT_RECORDED_ALLOCATED;
1730 *eloc = UDF_I_LOCATION(inode);
1731 *elen = UDF_I_LENALLOC(inode);
1732 break;
1734 default:
1736 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1737 return -1;
1740 if (*elen)
1741 return etype;
1743 udf_debug("Empty Extent, inode=%ld, alloctype=%d, eloc=%d, elen=%d, etype=%d, extoffset=%d\n",
1744 inode->i_ino, UDF_I_ALLOCTYPE(inode), eloc->logicalBlockNum, *elen, etype, *extoffset);
1745 if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_SHORT)
1746 *extoffset -= sizeof(short_ad);
1747 else if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_LONG)
1748 *extoffset -= sizeof(long_ad);
1749 return -1;
1752 int udf_current_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
1753 lb_addr *eloc, Uint32 *elen, struct buffer_head **bh, int inc)
1755 int pos, alen;
1756 Uint8 etype;
1758 if (!(*bh))
1760 if (!(*bh = udf_tread(inode->i_sb,
1761 udf_get_lb_pblock(inode->i_sb, *bloc, 0),
1762 inode->i_sb->s_blocksize)))
1764 udf_debug("reading block %d failed!\n",
1765 udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1766 return -1;
1770 if (!memcmp(&UDF_I_LOCATION(inode), bloc, sizeof(lb_addr)))
1772 if (!(UDF_I_EXTENDED_FE(inode)))
1773 pos = sizeof(struct FileEntry) + UDF_I_LENEATTR(inode);
1774 else
1775 pos = sizeof(struct ExtendedFileEntry) + UDF_I_LENEATTR(inode);
1776 alen = UDF_I_LENALLOC(inode) + pos;
1778 else
1780 struct AllocExtDesc *aed = (struct AllocExtDesc *)(*bh)->b_data;
1782 pos = sizeof(struct AllocExtDesc);
1783 alen = le32_to_cpu(aed->lengthAllocDescs) + pos;
1786 if (!(*extoffset))
1787 *extoffset = pos;
1789 switch (UDF_I_ALLOCTYPE(inode))
1791 case ICB_FLAG_AD_SHORT:
1793 short_ad *sad;
1795 if (!(sad = udf_get_fileshortad((*bh)->b_data, alen, extoffset, inc)))
1796 return -1;
1798 etype = le32_to_cpu(sad->extLength) >> 30;
1799 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1800 eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1801 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1802 break;
1804 case ICB_FLAG_AD_LONG:
1806 long_ad *lad;
1808 if (!(lad = udf_get_filelongad((*bh)->b_data, alen, extoffset, inc)))
1809 return -1;
1811 etype = le32_to_cpu(lad->extLength) >> 30;
1812 *eloc = lelb_to_cpu(lad->extLocation);
1813 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1814 break;
1816 default:
1818 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1819 return -1;
1822 if (*elen)
1823 return etype;
1825 udf_debug("Empty Extent!\n");
1826 if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_SHORT)
1827 *extoffset -= sizeof(short_ad);
1828 else if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_LONG)
1829 *extoffset -= sizeof(long_ad);
1830 return -1;
1833 int udf_insert_aext(struct inode *inode, lb_addr bloc, int extoffset,
1834 lb_addr neloc, Uint32 nelen, struct buffer_head *bh)
1836 lb_addr oeloc;
1837 Uint32 oelen;
1838 int type;
1840 if (!bh)
1842 if (!(bh = udf_tread(inode->i_sb,
1843 udf_get_lb_pblock(inode->i_sb, bloc, 0),
1844 inode->i_sb->s_blocksize)))
1846 udf_debug("reading block %d failed!\n",
1847 udf_get_lb_pblock(inode->i_sb, bloc, 0));
1848 return -1;
1851 else
1852 atomic_inc(&bh->b_count);
1854 while ((type = udf_next_aext(inode, &bloc, &extoffset, &oeloc, &oelen, &bh, 0)) != -1)
1856 udf_write_aext(inode, bloc, &extoffset, neloc, nelen, &bh, 1);
1858 neloc = oeloc;
1859 nelen = (type << 30) | oelen;
1861 udf_add_aext(inode, &bloc, &extoffset, neloc, nelen, &bh, 1);
1862 udf_release_data(bh);
1863 return (nelen >> 30);
1866 int udf_delete_aext(struct inode *inode, lb_addr nbloc, int nextoffset,
1867 lb_addr eloc, Uint32 elen, struct buffer_head *nbh)
1869 struct buffer_head *obh;
1870 lb_addr obloc;
1871 int oextoffset, adsize;
1872 int type;
1873 struct AllocExtDesc *aed;
1875 if (!(nbh))
1877 if (!(nbh = udf_tread(inode->i_sb,
1878 udf_get_lb_pblock(inode->i_sb, nbloc, 0),
1879 inode->i_sb->s_blocksize)))
1881 udf_debug("reading block %d failed!\n",
1882 udf_get_lb_pblock(inode->i_sb, nbloc, 0));
1883 return -1;
1886 else
1887 atomic_inc(&nbh->b_count);
1888 atomic_inc(&nbh->b_count);
1890 if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_SHORT)
1891 adsize = sizeof(short_ad);
1892 else if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_LONG)
1893 adsize = sizeof(long_ad);
1894 else
1895 adsize = 0;
1897 obh = nbh;
1898 obloc = nbloc;
1899 oextoffset = nextoffset;
1901 if (udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1) == -1)
1902 return -1;
1904 while ((type = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) != -1)
1906 udf_write_aext(inode, obloc, &oextoffset, eloc, (type << 30) | elen, &obh, 1);
1907 if (memcmp(&nbloc, &obloc, sizeof(lb_addr)))
1909 obloc = nbloc;
1910 udf_release_data(obh);
1911 atomic_inc(&nbh->b_count);
1912 obh = nbh;
1913 oextoffset = nextoffset - adsize;
1916 memset(&eloc, 0x00, sizeof(lb_addr));
1917 elen = 0;
1919 if (memcmp(&nbloc, &obloc, sizeof(lb_addr)))
1921 udf_free_blocks(inode, nbloc, 0, 1);
1922 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, &obh, 1);
1923 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, &obh, 1);
1924 if (!memcmp(&UDF_I_LOCATION(inode), &obloc, sizeof(lb_addr)))
1926 UDF_I_LENALLOC(inode) -= (adsize * 2);
1927 mark_inode_dirty(inode);
1929 else
1931 aed = (struct AllocExtDesc *)(obh)->b_data;
1932 aed->lengthAllocDescs =
1933 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize));
1934 udf_update_tag((obh)->b_data, oextoffset - (2*adsize));
1935 mark_buffer_dirty(obh);
1938 else
1940 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, &obh, 1);
1941 if (!memcmp(&UDF_I_LOCATION(inode), &obloc, sizeof(lb_addr)))
1943 UDF_I_LENALLOC(inode) -= adsize;
1944 mark_inode_dirty(inode);
1946 else
1948 aed = (struct AllocExtDesc *)(obh)->b_data;
1949 aed->lengthAllocDescs =
1950 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
1951 udf_update_tag((obh)->b_data, oextoffset - adsize);
1952 mark_buffer_dirty(obh);
1956 udf_release_data(nbh);
1957 udf_release_data(obh);
1958 return (elen >> 30);
1961 int inode_bmap(struct inode *inode, int block, lb_addr *bloc, Uint32 *extoffset,
1962 lb_addr *eloc, Uint32 *elen, Uint32 *offset, struct buffer_head **bh)
1964 int etype, lbcount = 0;
1966 if (block < 0)
1968 printk(KERN_ERR "udf: inode_bmap: block < 0\n");
1969 return -1;
1971 if (!inode)
1973 printk(KERN_ERR "udf: inode_bmap: NULL inode\n");
1974 return -1;
1977 *extoffset = 0;
1978 *elen = 0;
1979 *bloc = UDF_I_LOCATION(inode);
1983 if ((etype = udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, 1)) == -1)
1985 *offset = block - lbcount;
1986 return -1;
1988 lbcount += ((*elen + inode->i_sb->s_blocksize - 1) >>
1989 inode->i_sb->s_blocksize_bits);
1990 } while (lbcount <= block);
1992 *offset = block + ((*elen + inode->i_sb->s_blocksize - 1) >>
1993 inode->i_sb->s_blocksize_bits) - lbcount;
1995 return etype;
1998 long udf_locked_block_map(struct inode *inode, long block)
2000 lb_addr eloc, bloc;
2001 Uint32 offset, extoffset, elen;
2002 struct buffer_head *bh = NULL;
2003 int ret;
2005 if (inode_bmap(inode, block, &bloc, &extoffset, &eloc, &elen, &offset, &bh) == EXTENT_RECORDED_ALLOCATED)
2006 ret = udf_get_lb_pblock(inode->i_sb, eloc, offset);
2007 else
2008 ret = 0;
2010 if (bh)
2011 udf_release_data(bh);
2013 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2014 return udf_fixed_to_variable(ret);
2015 else
2016 return ret;
2019 long udf_block_map(struct inode *inode, long block)
2021 int ret;
2023 lock_kernel();
2024 ret = udf_locked_block_map(inode, block);
2025 unlock_kernel();
2026 return ret;