Merge with Linux 2.4.0-test5-pre6.
[linux-2.6/linux-mips.git] / fs / udf / inode.c
blobbbc4e30a5074f52015d914a4339a161544affc7a
1 /*
2 * inode.c
4 * PURPOSE
5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
7 * CONTACTS
8 * E-mail regarding any portion of the Linux UDF file system should be
9 * directed to the development team mailing list (run by majordomo):
10 * linux_udf@hootie.lvld.hp.com
12 * COPYRIGHT
13 * This file is distributed under the terms of the GNU General Public
14 * License (GPL). Copies of the GPL can be obtained from:
15 * ftp://prep.ai.mit.edu/pub/gnu/GPL
16 * Each contributing author retains all rights to their own work.
18 * (C) 1998 Dave Boynton
19 * (C) 1998-2000 Ben Fennema
20 * (C) 1999-2000 Stelias Computing Inc
22 * HISTORY
24 * 10/04/98 dgb Added rudimentary directory functions
25 * 10/07/98 Fully working udf_block_map! It works!
26 * 11/25/98 bmap altered to better support extents
27 * 12/06/98 blf partition support in udf_iget, udf_block_map and udf_read_inode
28 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
29 * block boundaries (which is not actually allowed)
30 * 12/20/98 added support for strategy 4096
31 * 03/07/99 rewrote udf_block_map (again)
32 * New funcs, inode_bmap, udf_next_aext
33 * 04/19/99 Support for writing device EA's for major/minor #
36 #include "udfdecl.h"
37 #include <linux/locks.h>
38 #include <linux/mm.h>
39 #include <linux/smp_lock.h>
41 #include "udf_i.h"
42 #include "udf_sb.h"
44 #define EXTENT_MERGE_SIZE 5
46 static mode_t udf_convert_permissions(struct FileEntry *);
47 static int udf_update_inode(struct inode *, int);
48 static void udf_fill_inode(struct inode *, struct buffer_head *);
49 static struct buffer_head *inode_getblk(struct inode *, long, int *, long *, int *);
50 static void udf_split_extents(struct inode *, int *, int, int,
51 long_ad [EXTENT_MERGE_SIZE], int *);
52 static void udf_prealloc_extents(struct inode *, int, int,
53 long_ad [EXTENT_MERGE_SIZE], int *);
54 static void udf_merge_extents(struct inode *,
55 long_ad [EXTENT_MERGE_SIZE], int *);
56 static void udf_update_extents(struct inode *,
57 long_ad [EXTENT_MERGE_SIZE], int, int,
58 lb_addr, Uint32, struct buffer_head **);
59 static int udf_get_block(struct inode *, long, struct buffer_head *, int);
62 * udf_put_inode
64 * PURPOSE
66 * DESCRIPTION
67 * This routine is called whenever the kernel no longer needs the inode.
69 * HISTORY
70 * July 1, 1997 - Andrew E. Mileski
71 * Written, tested, and released.
73 * Called at each iput()
75 void udf_put_inode(struct inode * inode)
77 if (!(inode->i_sb->s_flags & MS_RDONLY))
79 lock_kernel();
80 udf_discard_prealloc(inode);
81 /* write the root inode on put, if dirty */
82 if (!inode->i_sb->s_root && inode->i_state & I_DIRTY)
83 udf_update_inode(inode, IS_SYNC(inode));
84 unlock_kernel();
89 * udf_delete_inode
91 * PURPOSE
92 * Clean-up before the specified inode is destroyed.
94 * DESCRIPTION
95 * This routine is called when the kernel destroys an inode structure
96 * ie. when iput() finds i_count == 0.
98 * HISTORY
99 * July 1, 1997 - Andrew E. Mileski
100 * Written, tested, and released.
102 * Called at the last iput() if i_nlink is zero.
104 void udf_delete_inode(struct inode * inode)
106 lock_kernel();
108 if (is_bad_inode(inode))
110 clear_inode(inode);
111 goto out;
114 inode->i_size = 0;
115 udf_truncate(inode);
116 udf_update_inode(inode, IS_SYNC(inode));
117 udf_free_inode(inode);
118 out:
119 unlock_kernel();
122 void udf_discard_prealloc(struct inode * inode)
124 if (inode->i_size && UDF_I_ALLOCTYPE(inode) != ICB_FLAG_AD_IN_ICB)
125 udf_trunc(inode);
128 static int udf_writepage(struct file *file, struct page *page)
130 return block_write_full_page(page, udf_get_block);
133 static int udf_readpage(struct file *file, struct page *page)
135 return block_read_full_page(page, udf_get_block);
138 static int udf_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
140 return block_prepare_write(page, from, to, udf_get_block);
143 static int udf_bmap(struct address_space *mapping, long block)
145 return generic_block_bmap(mapping,block,udf_get_block);
148 struct address_space_operations udf_aops = {
149 readpage: udf_readpage,
150 writepage: udf_writepage,
151 sync_page: block_sync_page,
152 prepare_write: udf_prepare_write,
153 commit_write: generic_commit_write,
154 bmap: udf_bmap,
157 void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
159 struct buffer_head *bh = NULL;
160 struct page *page;
161 unsigned long kaddr = 0;
162 int block;
164 /* from now on we have normal address_space methods */
165 inode->i_data.a_ops = &udf_aops;
167 if (!UDF_I_LENALLOC(inode))
169 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
170 UDF_I_ALLOCTYPE(inode) = ICB_FLAG_AD_SHORT;
171 else
172 UDF_I_ALLOCTYPE(inode) = ICB_FLAG_AD_LONG;
173 mark_inode_dirty(inode);
174 return;
177 block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
178 bh = udf_tread(inode->i_sb, block, inode->i_sb->s_blocksize);
179 if (!bh)
180 return;
181 page = grab_cache_page(inode->i_mapping, 0);
182 if (!PageLocked(page))
183 PAGE_BUG(page);
184 if (!Page_Uptodate(page))
186 kaddr = kmap(page);
187 memset((char *)kaddr + UDF_I_LENALLOC(inode), 0x00,
188 PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
189 memcpy((char *)kaddr, bh->b_data + udf_file_entry_alloc_offset(inode),
190 UDF_I_LENALLOC(inode));
191 SetPageUptodate(page);
192 kunmap(page);
194 memset(bh->b_data + udf_file_entry_alloc_offset(inode),
195 0, UDF_I_LENALLOC(inode));
196 UDF_I_LENALLOC(inode) = 0;
197 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
198 UDF_I_ALLOCTYPE(inode) = ICB_FLAG_AD_SHORT;
199 else
200 UDF_I_ALLOCTYPE(inode) = ICB_FLAG_AD_LONG;
201 inode->i_blocks = inode->i_sb->s_blocksize / 512;
202 mark_buffer_dirty(bh, 1);
203 udf_release_data(bh);
205 inode->i_data.a_ops->writepage(NULL, page);
206 UnlockPage(page);
207 page_cache_release(page);
209 mark_inode_dirty(inode);
210 inode->i_version ++;
213 struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err)
215 int newblock;
216 struct buffer_head *sbh = NULL, *dbh = NULL;
217 lb_addr bloc, eloc;
218 Uint32 elen, extoffset;
220 struct udf_fileident_bh sfibh, dfibh;
221 loff_t f_pos = udf_ext0_offset(inode) >> 2;
222 int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
223 struct FileIdentDesc cfi, *sfi, *dfi;
225 if (!inode->i_size)
227 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
228 UDF_I_ALLOCTYPE(inode) = ICB_FLAG_AD_SHORT;
229 else
230 UDF_I_ALLOCTYPE(inode) = ICB_FLAG_AD_LONG;
231 mark_inode_dirty(inode);
232 return NULL;
235 /* alloc block, and copy data to it */
236 *block = udf_new_block(inode,
237 UDF_I_LOCATION(inode).partitionReferenceNum,
238 UDF_I_LOCATION(inode).logicalBlockNum, err);
240 if (!(*block))
241 return NULL;
242 newblock = udf_get_pblock(inode->i_sb, *block,
243 UDF_I_LOCATION(inode).partitionReferenceNum, 0);
244 if (!newblock)
245 return NULL;
246 sbh = udf_tread(inode->i_sb, inode->i_ino, inode->i_sb->s_blocksize);
247 if (!sbh)
248 return NULL;
249 dbh = udf_tread(inode->i_sb, newblock, inode->i_sb->s_blocksize);
250 if (!dbh)
251 return NULL;
253 sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
254 sfibh.sbh = sfibh.ebh = sbh;
255 dfibh.soffset = dfibh.eoffset = 0;
256 dfibh.sbh = dfibh.ebh = dbh;
257 while ( (f_pos < size) )
259 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL, NULL, NULL);
260 if (!sfi)
262 udf_release_data(sbh);
263 udf_release_data(dbh);
264 return NULL;
266 sfi->descTag.tagLocation = *block;
267 dfibh.soffset = dfibh.eoffset;
268 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
269 dfi = (struct FileIdentDesc *)(dbh->b_data + dfibh.soffset);
270 if (udf_write_fi(sfi, dfi, &dfibh, sfi->impUse,
271 sfi->fileIdent + sfi->lengthOfImpUse))
273 udf_release_data(sbh);
274 udf_release_data(dbh);
275 return NULL;
278 mark_buffer_dirty(dbh, 1);
280 memset(sbh->b_data + udf_file_entry_alloc_offset(inode),
281 0, UDF_I_LENALLOC(inode));
283 UDF_I_LENALLOC(inode) = 0;
284 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
285 UDF_I_ALLOCTYPE(inode) = ICB_FLAG_AD_SHORT;
286 else
287 UDF_I_ALLOCTYPE(inode) = ICB_FLAG_AD_LONG;
288 bloc = UDF_I_LOCATION(inode);
289 eloc.logicalBlockNum = *block;
290 eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
291 elen = inode->i_size;
292 extoffset = udf_file_entry_alloc_offset(inode);
293 udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &sbh, 0);
294 /* UniqueID stuff */
296 inode->i_blocks = inode->i_sb->s_blocksize / 512;
297 mark_buffer_dirty(sbh, 1);
298 udf_release_data(sbh);
299 mark_inode_dirty(inode);
300 inode->i_version ++;
301 return dbh;
304 static int udf_get_block(struct inode *inode, long block, struct buffer_head *bh_result, int create)
306 int err, new;
307 struct buffer_head *bh;
308 unsigned long phys;
310 if (!create)
312 phys = udf_block_map(inode, block);
313 if (phys)
315 bh_result->b_dev = inode->i_dev;
316 bh_result->b_blocknr = phys;
317 bh_result->b_state |= (1UL << BH_Mapped);
319 return 0;
322 err = -EIO;
323 new = 0;
324 bh = NULL;
326 lock_kernel();
328 if (block < 0)
329 goto abort_negative;
331 if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1)
333 UDF_I_NEXT_ALLOC_BLOCK(inode) ++;
334 UDF_I_NEXT_ALLOC_GOAL(inode) ++;
337 err = 0;
339 bh = inode_getblk(inode, block, &err, &phys, &new);
340 if (bh)
341 BUG();
342 if (err)
343 goto abort;
344 if (!phys)
345 BUG();
347 bh_result->b_dev = inode->i_dev;
348 bh_result->b_blocknr = phys;
349 bh_result->b_state |= (1UL << BH_Mapped);
350 if (new)
351 bh_result->b_state |= (1UL << BH_New);
352 abort:
353 unlock_kernel();
354 return err;
356 abort_negative:
357 udf_warning(inode->i_sb, "udf_get_block", "block < 0");
358 goto abort;
361 struct buffer_head * udf_getblk(struct inode * inode, long block,
362 int create, int * err)
364 struct buffer_head dummy;
365 int error;
367 dummy.b_state = 0;
368 dummy.b_blocknr = -1000;
369 error = udf_get_block(inode, block, &dummy, create);
370 *err = error;
371 if (!error & buffer_mapped(&dummy))
373 struct buffer_head *bh;
374 bh = getblk(dummy.b_dev, dummy.b_blocknr, inode->i_sb->s_blocksize);
375 if (buffer_new(&dummy))
377 if (!buffer_uptodate(bh))
378 wait_on_buffer(bh);
379 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
380 mark_buffer_uptodate(bh, 1);
381 mark_buffer_dirty(bh, 1);
383 return bh;
385 return NULL;
388 static struct buffer_head * inode_getblk(struct inode * inode, long block,
389 int *err, long *phys, int *new)
391 struct buffer_head *pbh = NULL, *cbh = NULL, *result = NULL;
392 long_ad laarr[EXTENT_MERGE_SIZE];
393 Uint32 pextoffset = 0, cextoffset = 0, nextoffset = 0;
394 int count = 0, startnum = 0, endnum = 0;
395 Uint32 elen = 0;
396 lb_addr eloc, pbloc = UDF_I_LOCATION(inode), cbloc = UDF_I_LOCATION(inode);
397 int c = 1;
398 int lbcount = 0, b_off = 0, offset = 0;
399 Uint32 newblocknum, newblock;
400 int etype;
401 int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum;
402 char lastblock = 0;
404 pextoffset = cextoffset = nextoffset = udf_file_entry_alloc_offset(inode);
405 b_off = block << inode->i_sb->s_blocksize_bits;
406 pbloc = cbloc = UDF_I_LOCATION(inode);
408 /* find the extent which contains the block we are looking for.
409 alternate between laarr[0] and laarr[1] for locations of the
410 current extent, and the previous extent */
413 if (pbh != cbh)
415 udf_release_data(pbh);
416 pbh = cbh;
417 atomic_inc(&cbh->b_count);
418 pbloc = cbloc;
421 lbcount += elen;
423 pextoffset = cextoffset;
424 cextoffset = nextoffset;
426 if ((etype = udf_next_aext(inode, &cbloc, &nextoffset, &eloc, &elen, &cbh, 1)) == -1)
427 break;
429 c = !c;
431 laarr[c].extLength = (etype << 30) | elen;
432 laarr[c].extLocation = eloc;
434 if (etype != EXTENT_NOT_RECORDED_NOT_ALLOCATED)
435 pgoal = eloc.logicalBlockNum +
436 ((elen + inode->i_sb->s_blocksize - 1) >>
437 inode->i_sb->s_blocksize);
439 count ++;
440 } while (lbcount + elen <= b_off);
442 b_off -= lbcount;
443 offset = b_off >> inode->i_sb->s_blocksize_bits;
445 /* if the extent is allocated and recorded, return the block
446 if the extent is not a multiple of the blocksize, round up */
448 if (etype == EXTENT_RECORDED_ALLOCATED)
450 if (elen & (inode->i_sb->s_blocksize - 1))
452 elen = (EXTENT_RECORDED_ALLOCATED << 30) |
453 ((elen + inode->i_sb->s_blocksize - 1) &
454 ~(inode->i_sb->s_blocksize - 1));
455 etype = udf_write_aext(inode, cbloc, &cextoffset, eloc, elen, &cbh, 1);
457 udf_release_data(pbh);
458 udf_release_data(cbh);
459 newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
460 *phys = newblock;
461 return NULL;
464 if (etype == -1)
466 endnum = startnum = ((count > 1) ? 1 : count);
467 if (laarr[c].extLength & (inode->i_sb->s_blocksize - 1))
469 laarr[c].extLength =
470 (laarr[c].extLength & UDF_EXTENT_FLAG_MASK) |
471 (((laarr[c].extLength & UDF_EXTENT_LENGTH_MASK) +
472 inode->i_sb->s_blocksize - 1) &
473 ~(inode->i_sb->s_blocksize - 1));
475 c = !c;
476 laarr[c].extLength = (EXTENT_NOT_RECORDED_NOT_ALLOCATED << 30) |
477 ((offset + 1) << inode->i_sb->s_blocksize_bits);
478 memset(&laarr[c].extLocation, 0x00, sizeof(lb_addr));
479 count ++;
480 endnum ++;
481 lastblock = 1;
483 else
484 endnum = startnum = ((count > 2) ? 2 : count);
486 /* if the current extent is in position 0, swap it with the previous */
487 if (!c && count != 1)
489 laarr[2] = laarr[0];
490 laarr[0] = laarr[1];
491 laarr[1] = laarr[2];
492 c = 1;
495 /* if the current block is located in a extent, read the next extent */
496 if (etype != -1)
498 if ((etype = udf_next_aext(inode, &cbloc, &nextoffset, &eloc, &elen, &cbh, 0)) != -1)
500 laarr[c+1].extLength = (etype << 30) | elen;
501 laarr[c+1].extLocation = eloc;
502 count ++;
503 startnum ++;
504 endnum ++;
506 else
507 lastblock = 1;
509 udf_release_data(cbh);
511 /* if the current extent is not recorded but allocated, get the
512 block in the extent corresponding to the requested block */
513 if ((laarr[c].extLength >> 30) == EXTENT_NOT_RECORDED_ALLOCATED)
514 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
515 else /* otherwise, allocate a new block */
517 if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
518 goal = UDF_I_NEXT_ALLOC_GOAL(inode);
520 if (!goal)
522 if (!(goal = pgoal))
523 goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
526 if (!(newblocknum = udf_new_block(inode,
527 UDF_I_LOCATION(inode).partitionReferenceNum, goal, err)))
529 udf_release_data(pbh);
530 *err = -ENOSPC;
531 return NULL;
535 /* if the extent the requsted block is located in contains multiple blocks,
536 split the extent into at most three extents. blocks prior to requested
537 block, requested block, and blocks after requested block */
538 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
540 #ifdef UDF_PREALLOCATE
541 /* preallocate blocks */
542 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
543 #endif
545 /* merge any continuous blocks in laarr */
546 udf_merge_extents(inode, laarr, &endnum);
548 /* write back the new extents, inserting new extents if the new number
549 of extents is greater than the old number, and deleting extents if
550 the new number of extents is less than the old number */
551 udf_update_extents(inode, laarr, startnum, endnum, pbloc, pextoffset, &pbh);
553 udf_release_data(pbh);
555 if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
556 UDF_I_LOCATION(inode).partitionReferenceNum, 0)))
558 return NULL;
560 *phys = newblock;
561 *err = 0;
562 *new = 1;
563 UDF_I_NEXT_ALLOC_BLOCK(inode) = block;
564 UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum;
565 inode->i_ctime = CURRENT_TIME;
566 UDF_I_UCTIME(inode) = CURRENT_UTIME;
567 inode->i_blocks += inode->i_sb->s_blocksize / 512;
568 if (IS_SYNC(inode))
569 udf_sync_inode(inode);
570 else
571 mark_inode_dirty(inode);
572 return result;
575 static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum,
576 long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
578 if ((laarr[*c].extLength >> 30) == EXTENT_NOT_RECORDED_ALLOCATED ||
579 (laarr[*c].extLength >> 30) == EXTENT_NOT_RECORDED_NOT_ALLOCATED)
581 int curr = *c;
582 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
583 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
584 int type = laarr[curr].extLength & ~UDF_EXTENT_LENGTH_MASK;
586 if (blen == 1)
588 else if (!offset || blen == offset + 1)
590 laarr[curr+2] = laarr[curr+1];
591 laarr[curr+1] = laarr[curr];
593 else
595 laarr[curr+3] = laarr[curr+1];
596 laarr[curr+2] = laarr[curr+1] = laarr[curr];
599 if (offset)
601 laarr[curr].extLength = type |
602 (offset << inode->i_sb->s_blocksize_bits);
603 curr ++;
604 (*c) ++;
605 (*endnum) ++;
608 laarr[curr].extLocation.logicalBlockNum = newblocknum;
609 if ((type >> 30) == EXTENT_NOT_RECORDED_NOT_ALLOCATED)
610 laarr[curr].extLocation.partitionReferenceNum =
611 UDF_I_LOCATION(inode).partitionReferenceNum;
612 laarr[curr].extLength = (EXTENT_RECORDED_ALLOCATED << 30) |
613 inode->i_sb->s_blocksize;
614 curr ++;
616 if (blen != offset + 1)
618 if ((type >> 30) == EXTENT_NOT_RECORDED_ALLOCATED)
619 laarr[curr].extLocation.logicalBlockNum += (offset + 1);
620 laarr[curr].extLength = type |
621 ((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
622 curr ++;
623 (*endnum) ++;
628 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
629 long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
631 int start, length = 0, currlength = 0, i;
633 if (*endnum >= (c+1) && !lastblock)
634 return;
636 if ((laarr[c+1].extLength >> 30) == EXTENT_NOT_RECORDED_ALLOCATED)
638 start = c+1;
639 length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) +
640 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
642 else
643 start = c;
645 for (i=start+1; i<=*endnum; i++)
647 if (i == *endnum)
649 if (lastblock)
650 length += UDF_DEFAULT_PREALLOC_BLOCKS;
652 else if ((laarr[i].extLength >> 30) == EXTENT_NOT_RECORDED_NOT_ALLOCATED)
653 length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
654 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
655 else
656 break;
659 if (length)
661 int next = laarr[start].extLocation.logicalBlockNum +
662 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
663 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
664 int numalloc = udf_prealloc_blocks(inode,
665 laarr[start].extLocation.partitionReferenceNum,
666 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
667 UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
669 if (numalloc)
671 if (start == (c+1))
672 laarr[start].extLength +=
673 (numalloc << inode->i_sb->s_blocksize_bits);
674 else
676 memmove(&laarr[c+2], &laarr[c+1],
677 sizeof(long_ad) * (*endnum - (c+1)));
678 (*endnum) ++;
679 laarr[c+1].extLocation.logicalBlockNum = next;
680 laarr[c+1].extLocation.partitionReferenceNum =
681 laarr[c].extLocation.partitionReferenceNum;
682 laarr[c+1].extLength = (EXTENT_NOT_RECORDED_ALLOCATED << 30) |
683 (numalloc << inode->i_sb->s_blocksize_bits);
684 start = c+1;
687 for (i=start+1; numalloc && i<*endnum; i++)
689 int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
690 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
692 if (elen > numalloc)
694 laarr[c+1].extLength -=
695 (numalloc << inode->i_sb->s_blocksize_bits);
696 numalloc = 0;
698 else
700 numalloc -= elen;
701 if (*endnum > (i+1))
702 memmove(&laarr[i], &laarr[i+1],
703 sizeof(long_ad) * (*endnum - (i+1)));
704 i --;
705 (*endnum) --;
712 static void udf_merge_extents(struct inode *inode,
713 long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
715 int i;
717 for (i=0; i<(*endnum-1); i++)
719 if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30))
721 if (((laarr[i].extLength >> 30) == EXTENT_NOT_RECORDED_NOT_ALLOCATED) ||
722 ((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
723 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
724 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits)))
726 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
727 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
728 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
730 laarr[i+1].extLength = (laarr[i+1].extLength -
731 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
732 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
733 laarr[i].extLength = (UDF_EXTENT_LENGTH_MASK + 1) -
734 inode->i_sb->s_blocksize;
735 laarr[i+1].extLocation.logicalBlockNum =
736 laarr[i].extLocation.logicalBlockNum +
737 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
738 inode->i_sb->s_blocksize_bits);
740 else
742 laarr[i].extLength = laarr[i+1].extLength +
743 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
744 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
745 if (*endnum > (i+2))
746 memmove(&laarr[i+1], &laarr[i+2],
747 sizeof(long_ad) * (*endnum - (i+2)));
748 i --;
749 (*endnum) --;
756 static void udf_update_extents(struct inode *inode,
757 long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum,
758 lb_addr pbloc, Uint32 pextoffset, struct buffer_head **pbh)
760 int start = 0, i;
761 lb_addr tmploc;
762 Uint32 tmplen;
764 if (startnum > endnum)
766 for (i=0; i<(startnum-endnum); i++)
768 udf_delete_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
769 laarr[i].extLength, *pbh);
772 else if (startnum < endnum)
774 for (i=0; i<(endnum-startnum); i++)
776 udf_insert_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
777 laarr[i].extLength, *pbh);
778 udf_next_aext(inode, &pbloc, &pextoffset, &laarr[i].extLocation,
779 &laarr[i].extLength, pbh, 1);
780 start ++;
784 for (i=start; i<endnum; i++)
786 udf_next_aext(inode, &pbloc, &pextoffset, &tmploc, &tmplen, pbh, 0);
787 udf_write_aext(inode, pbloc, &pextoffset, laarr[i].extLocation,
788 laarr[i].extLength, pbh, 1);
792 struct buffer_head * udf_bread(struct inode * inode, int block,
793 int create, int * err)
795 struct buffer_head * bh = NULL;
796 int prev_blocks;
798 prev_blocks = inode->i_blocks;
800 bh = udf_getblk(inode, block, create, err);
801 if (!bh)
802 return NULL;
804 #if 0
805 if (create &&
806 S_ISDIR(inode->i_mode) &&
807 inode->i_blocks > prev_blocks)
809 int i;
810 struct buffer_head *tmp_bh = NULL;
812 for (i=1;
813 i < UDF_DEFAULT_PREALLOC_DIR_BLOCKS;
814 i++)
816 tmp_bh = udf_getblk(inode, block+i, create, err);
817 if (!tmp_bh)
819 udf_release_data(bh);
820 return 0;
822 udf_release_data(tmp_bh);
825 #endif
827 if (buffer_uptodate(bh))
828 return bh;
829 ll_rw_block(READ, 1, &bh);
830 wait_on_buffer(bh);
831 if (buffer_uptodate(bh))
832 return bh;
833 brelse(bh);
834 *err = -EIO;
835 return NULL;
839 * udf_read_inode
841 * PURPOSE
842 * Read an inode.
844 * DESCRIPTION
845 * This routine is called by iget() [which is called by udf_iget()]
846 * (clean_inode() will have been called first)
847 * when an inode is first read into memory.
849 * HISTORY
850 * July 1, 1997 - Andrew E. Mileski
851 * Written, tested, and released.
853 * 12/19/98 dgb Updated to fix size problems.
856 void
857 udf_read_inode(struct inode *inode)
859 memset(&UDF_I_LOCATION(inode), 0xFF, sizeof(lb_addr));
862 void
863 __udf_read_inode(struct inode *inode)
865 struct buffer_head *bh = NULL;
866 struct FileEntry *fe;
867 Uint16 ident;
870 * Set defaults, but the inode is still incomplete!
871 * Note: get_new_inode() sets the following on a new inode:
872 * i_sb = sb
873 * i_dev = sb->s_dev;
874 * i_no = ino
875 * i_flags = sb->s_flags
876 * i_state = 0
877 * clean_inode(): zero fills and sets
878 * i_count = 1
879 * i_nlink = 1
880 * i_op = NULL;
883 inode->i_blksize = PAGE_SIZE;
885 bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
887 if (!bh)
889 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
890 inode->i_ino);
891 make_bad_inode(inode);
892 return;
895 if (ident != TID_FILE_ENTRY && ident != TID_EXTENDED_FILE_ENTRY)
897 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
898 inode->i_ino, ident);
899 udf_release_data(bh);
900 make_bad_inode(inode);
901 return;
904 fe = (struct FileEntry *)bh->b_data;
906 if (le16_to_cpu(fe->icbTag.strategyType) == 4096)
908 struct buffer_head *ibh = NULL, *nbh = NULL;
909 struct IndirectEntry *ie;
911 ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
912 if (ident == TID_INDIRECT_ENTRY)
914 if (ibh)
916 lb_addr loc;
917 ie = (struct IndirectEntry *)ibh->b_data;
919 loc = lelb_to_cpu(ie->indirectICB.extLocation);
921 if (ie->indirectICB.extLength &&
922 (nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident)))
924 if (ident == TID_FILE_ENTRY ||
925 ident == TID_EXTENDED_FILE_ENTRY)
927 memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(lb_addr));
928 udf_release_data(bh);
929 udf_release_data(ibh);
930 udf_release_data(nbh);
931 __udf_read_inode(inode);
932 return;
934 else
936 udf_release_data(nbh);
937 udf_release_data(ibh);
940 else
941 udf_release_data(ibh);
944 else
945 udf_release_data(ibh);
947 else if (le16_to_cpu(fe->icbTag.strategyType) != 4)
949 printk(KERN_ERR "udf: unsupported strategy type: %d\n",
950 le16_to_cpu(fe->icbTag.strategyType));
951 udf_release_data(bh);
952 make_bad_inode(inode);
953 return;
955 udf_fill_inode(inode, bh);
956 udf_release_data(bh);
959 static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
961 struct FileEntry *fe;
962 struct ExtendedFileEntry *efe;
963 time_t convtime;
964 long convtime_usec;
965 int offset, alen;
967 inode->i_version = ++event;
968 UDF_I_NEW_INODE(inode) = 0;
970 fe = (struct FileEntry *)bh->b_data;
971 efe = (struct ExtendedFileEntry *)bh->b_data;
973 if (fe->descTag.tagIdent == TID_EXTENDED_FILE_ENTRY)
974 UDF_I_EXTENDED_FE(inode) = 1;
975 else /* fe->descTag.tagIdent == TID_FILE_ENTRY */
976 UDF_I_EXTENDED_FE(inode) = 0;
978 if (le16_to_cpu(fe->icbTag.strategyType) == 4)
979 UDF_I_STRAT4096(inode) = 0;
980 else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
981 UDF_I_STRAT4096(inode) = 1;
983 inode->i_uid = le32_to_cpu(fe->uid);
984 if ( inode->i_uid == -1 ) inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
986 inode->i_gid = le32_to_cpu(fe->gid);
987 if ( inode->i_gid == -1 ) inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
989 inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
990 if (!inode->i_nlink)
991 inode->i_nlink = 1;
993 inode->i_size = le64_to_cpu(fe->informationLength);
995 inode->i_mode = udf_convert_permissions(fe);
996 inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
998 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
999 UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
1001 UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICB_FLAG_ALLOC_MASK;
1003 if (UDF_I_EXTENDED_FE(inode) == 0)
1005 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1006 (inode->i_sb->s_blocksize_bits - 9);
1008 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1009 lets_to_cpu(fe->modificationTime)) )
1011 inode->i_mtime = convtime;
1012 UDF_I_UMTIME(inode) = convtime_usec;
1013 inode->i_ctime = convtime;
1014 UDF_I_UCTIME(inode) = convtime_usec;
1016 else
1018 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1019 UDF_I_UMTIME(inode) = 0;
1020 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1021 UDF_I_UCTIME(inode) = 0;
1024 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1025 lets_to_cpu(fe->accessTime)) )
1027 inode->i_atime = convtime;
1028 UDF_I_UATIME(inode) = convtime_usec;
1030 else
1032 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1033 UDF_I_UATIME(inode) = convtime_usec;
1036 UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
1037 UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
1038 UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
1039 offset = sizeof(struct FileEntry) + UDF_I_LENEATTR(inode);
1040 alen = offset + UDF_I_LENALLOC(inode);
1042 else
1044 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1045 (inode->i_sb->s_blocksize_bits - 9);
1047 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1048 lets_to_cpu(efe->modificationTime)) )
1050 inode->i_mtime = convtime;
1051 UDF_I_UMTIME(inode) = convtime_usec;
1053 else
1055 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1056 UDF_I_UMTIME(inode) = 0;
1059 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1060 lets_to_cpu(efe->accessTime)) )
1062 inode->i_atime = convtime;
1063 UDF_I_UATIME(inode) = convtime_usec;
1065 else
1067 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1068 UDF_I_UATIME(inode) = 0;
1071 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1072 lets_to_cpu(efe->createTime)) )
1074 inode->i_ctime = convtime;
1075 UDF_I_UCTIME(inode) = convtime_usec;
1077 else
1079 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1080 UDF_I_UCTIME(inode) = 0;
1083 UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
1084 UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
1085 UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
1086 offset = sizeof(struct ExtendedFileEntry) + UDF_I_LENEATTR(inode);
1087 alen = offset + UDF_I_LENALLOC(inode);
1090 switch (fe->icbTag.fileType)
1092 case FILE_TYPE_DIRECTORY:
1094 inode->i_op = &udf_dir_inode_operations;
1095 inode->i_fop = &udf_dir_operations;
1096 inode->i_mode |= S_IFDIR;
1097 inode->i_nlink ++;
1098 break;
1100 case FILE_TYPE_REGULAR:
1101 case FILE_TYPE_NONE:
1103 if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_IN_ICB)
1104 inode->i_data.a_ops = &udf_adinicb_aops;
1105 else
1106 inode->i_data.a_ops = &udf_aops;
1107 inode->i_op = &udf_file_inode_operations;
1108 inode->i_fop = &udf_file_operations;
1109 inode->i_mode |= S_IFREG;
1110 break;
1112 case FILE_TYPE_BLOCK:
1114 inode->i_mode |= S_IFBLK;
1115 break;
1117 case FILE_TYPE_CHAR:
1119 inode->i_mode |= S_IFCHR;
1120 break;
1122 case FILE_TYPE_FIFO:
1124 init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1125 break;
1127 case FILE_TYPE_SYMLINK:
1129 inode->i_data.a_ops = &udf_symlink_aops;
1130 inode->i_op = &page_symlink_inode_operations;
1131 inode->i_mode = S_IFLNK|S_IRWXUGO;
1132 break;
1134 default:
1136 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
1137 inode->i_ino, fe->icbTag.fileType);
1138 make_bad_inode(inode);
1139 return;
1142 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1144 struct buffer_head *tbh = NULL;
1145 struct DeviceSpecificationExtendedAttr *dsea =
1146 (struct DeviceSpecificationExtendedAttr *)
1147 udf_get_extendedattr(inode, 12, 1, &tbh);
1149 if (dsea)
1151 init_special_inode(inode, inode->i_mode,
1152 ((le32_to_cpu(dsea->majorDeviceIdent)) << 8) |
1153 (le32_to_cpu(dsea->minorDeviceIdent) & 0xFF));
1154 /* Developer ID ??? */
1155 udf_release_data(tbh);
1157 else
1159 make_bad_inode(inode);
1164 static mode_t
1165 udf_convert_permissions(struct FileEntry *fe)
1167 mode_t mode;
1168 Uint32 permissions;
1169 Uint32 flags;
1171 permissions = le32_to_cpu(fe->permissions);
1172 flags = le16_to_cpu(fe->icbTag.flags);
1174 mode = (( permissions ) & S_IRWXO) |
1175 (( permissions >> 2 ) & S_IRWXG) |
1176 (( permissions >> 4 ) & S_IRWXU) |
1177 (( flags & ICB_FLAG_SETUID) ? S_ISUID : 0) |
1178 (( flags & ICB_FLAG_SETGID) ? S_ISGID : 0) |
1179 (( flags & ICB_FLAG_STICKY) ? S_ISVTX : 0);
1181 return mode;
1185 * udf_write_inode
1187 * PURPOSE
1188 * Write out the specified inode.
1190 * DESCRIPTION
1191 * This routine is called whenever an inode is synced.
1192 * Currently this routine is just a placeholder.
1194 * HISTORY
1195 * July 1, 1997 - Andrew E. Mileski
1196 * Written, tested, and released.
1199 void udf_write_inode(struct inode * inode, int sync)
1201 lock_kernel();
1202 udf_update_inode(inode, sync);
1203 unlock_kernel();
1206 int udf_sync_inode(struct inode * inode)
1208 return udf_update_inode(inode, 1);
1211 static int
1212 udf_update_inode(struct inode *inode, int do_sync)
1214 struct buffer_head *bh = NULL;
1215 struct FileEntry *fe;
1216 struct ExtendedFileEntry *efe;
1217 Uint32 udfperms;
1218 Uint16 icbflags;
1219 Uint16 crclen;
1220 int i;
1221 timestamp cpu_time;
1222 int err = 0;
1224 bh = udf_tread(inode->i_sb,
1225 udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0),
1226 inode->i_sb->s_blocksize);
1227 if (!bh)
1229 udf_debug("bread failure\n");
1230 return -EIO;
1232 fe = (struct FileEntry *)bh->b_data;
1233 efe = (struct ExtendedFileEntry *)bh->b_data;
1234 if (UDF_I_NEW_INODE(inode) == 1)
1236 if (UDF_I_EXTENDED_FE(inode) == 0)
1237 memset(bh->b_data, 0x0, sizeof(struct FileEntry));
1238 else
1239 memset(bh->b_data, 0x00, sizeof(struct ExtendedFileEntry));
1240 memset(bh->b_data + udf_file_entry_alloc_offset(inode) +
1241 UDF_I_LENALLOC(inode), 0x0, inode->i_sb->s_blocksize -
1242 udf_file_entry_alloc_offset(inode) - UDF_I_LENALLOC(inode));
1243 UDF_I_NEW_INODE(inode) = 0;
1246 if (inode->i_uid != UDF_SB(inode->i_sb)->s_uid)
1247 fe->uid = cpu_to_le32(inode->i_uid);
1249 if (inode->i_gid != UDF_SB(inode->i_sb)->s_gid)
1250 fe->gid = cpu_to_le32(inode->i_gid);
1252 udfperms = ((inode->i_mode & S_IRWXO) ) |
1253 ((inode->i_mode & S_IRWXG) << 2) |
1254 ((inode->i_mode & S_IRWXU) << 4);
1256 udfperms |= (le32_to_cpu(fe->permissions) &
1257 (PERM_O_DELETE | PERM_O_CHATTR |
1258 PERM_G_DELETE | PERM_G_CHATTR |
1259 PERM_U_DELETE | PERM_U_CHATTR));
1260 fe->permissions = cpu_to_le32(udfperms);
1262 if (S_ISDIR(inode->i_mode))
1263 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1264 else
1265 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1267 fe->informationLength = cpu_to_le64(inode->i_size);
1269 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1271 EntityID *eid;
1272 struct buffer_head *tbh = NULL;
1273 struct DeviceSpecificationExtendedAttr *dsea =
1274 (struct DeviceSpecificationExtendedAttr *)
1275 udf_get_extendedattr(inode, 12, 1, &tbh);
1277 if (!dsea)
1279 dsea = (struct DeviceSpecificationExtendedAttr *)
1280 udf_add_extendedattr(inode,
1281 sizeof(struct DeviceSpecificationExtendedAttr) +
1282 sizeof(EntityID), 12, 0x3, &tbh);
1283 dsea->attrType = 12;
1284 dsea->attrSubtype = 1;
1285 dsea->attrLength = sizeof(struct DeviceSpecificationExtendedAttr) +
1286 sizeof(EntityID);
1287 dsea->impUseLength = sizeof(EntityID);
1289 eid = (EntityID *)dsea->impUse;
1290 memset(eid, 0, sizeof(EntityID));
1291 strcpy(eid->ident, UDF_ID_DEVELOPER);
1292 eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1293 eid->identSuffix[1] = UDF_OS_ID_LINUX;
1294 dsea->majorDeviceIdent = kdev_t_to_nr(inode->i_rdev) >> 8;
1295 dsea->minorDeviceIdent = kdev_t_to_nr(inode->i_rdev) & 0xFF;
1296 mark_buffer_dirty(tbh, 1);
1297 udf_release_data(tbh);
1300 if (UDF_I_EXTENDED_FE(inode) == 0)
1302 fe->logicalBlocksRecorded = cpu_to_le64(
1303 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1304 (inode->i_sb->s_blocksize_bits - 9));
1306 if (udf_time_to_stamp(&cpu_time, inode->i_atime, UDF_I_UATIME(inode)))
1307 fe->accessTime = cpu_to_lets(cpu_time);
1308 if (udf_time_to_stamp(&cpu_time, inode->i_mtime, UDF_I_UMTIME(inode)))
1309 fe->modificationTime = cpu_to_lets(cpu_time);
1310 memset(&(fe->impIdent), 0, sizeof(EntityID));
1311 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1312 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1313 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1314 fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1315 fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1316 fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1317 fe->descTag.tagIdent = le16_to_cpu(TID_FILE_ENTRY);
1318 crclen = sizeof(struct FileEntry);
1320 else
1322 efe->logicalBlocksRecorded = cpu_to_le64(
1323 (inode->i_blocks + (2 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1324 (inode->i_sb->s_blocksize_bits - 9));
1326 if (udf_time_to_stamp(&cpu_time, inode->i_atime, UDF_I_UATIME(inode)))
1327 efe->accessTime = cpu_to_lets(cpu_time);
1328 if (udf_time_to_stamp(&cpu_time, inode->i_mtime, UDF_I_UMTIME(inode)))
1329 efe->modificationTime = cpu_to_lets(cpu_time);
1330 if (udf_time_to_stamp(&cpu_time, inode->i_ctime, UDF_I_UCTIME(inode)))
1331 efe->createTime = cpu_to_lets(cpu_time);
1332 memset(&(efe->impIdent), 0, sizeof(EntityID));
1333 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1334 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1335 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1336 efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1337 efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1338 efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1339 efe->descTag.tagIdent = le16_to_cpu(TID_EXTENDED_FILE_ENTRY);
1340 crclen = sizeof(struct ExtendedFileEntry);
1342 if (UDF_I_STRAT4096(inode))
1344 fe->icbTag.strategyType = cpu_to_le16(4096);
1345 fe->icbTag.strategyParameter = cpu_to_le16(1);
1346 fe->icbTag.numEntries = cpu_to_le16(2);
1348 else
1350 fe->icbTag.strategyType = cpu_to_le16(4);
1351 fe->icbTag.numEntries = cpu_to_le16(1);
1354 if (S_ISDIR(inode->i_mode))
1355 fe->icbTag.fileType = FILE_TYPE_DIRECTORY;
1356 else if (S_ISREG(inode->i_mode))
1357 fe->icbTag.fileType = FILE_TYPE_REGULAR;
1358 else if (S_ISLNK(inode->i_mode))
1359 fe->icbTag.fileType = FILE_TYPE_SYMLINK;
1360 else if (S_ISBLK(inode->i_mode))
1361 fe->icbTag.fileType = FILE_TYPE_BLOCK;
1362 else if (S_ISCHR(inode->i_mode))
1363 fe->icbTag.fileType = FILE_TYPE_CHAR;
1364 else if (S_ISFIFO(inode->i_mode))
1365 fe->icbTag.fileType = FILE_TYPE_FIFO;
1367 icbflags = UDF_I_ALLOCTYPE(inode) |
1368 ((inode->i_mode & S_ISUID) ? ICB_FLAG_SETUID : 0) |
1369 ((inode->i_mode & S_ISGID) ? ICB_FLAG_SETGID : 0) |
1370 ((inode->i_mode & S_ISVTX) ? ICB_FLAG_STICKY : 0) |
1371 (le16_to_cpu(fe->icbTag.flags) &
1372 ~(ICB_FLAG_ALLOC_MASK | ICB_FLAG_SETUID |
1373 ICB_FLAG_SETGID | ICB_FLAG_STICKY));
1375 fe->icbTag.flags = cpu_to_le16(icbflags);
1376 fe->descTag.descVersion = cpu_to_le16(2);
1377 fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
1378 fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1379 crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
1380 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1381 fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
1383 fe->descTag.tagChecksum = 0;
1384 for (i=0; i<16; i++)
1385 if (i != 4)
1386 fe->descTag.tagChecksum += ((Uint8 *)&(fe->descTag))[i];
1388 /* write the data blocks */
1389 mark_buffer_dirty(bh, 1);
1390 if (do_sync)
1392 ll_rw_block(WRITE, 1, &bh);
1393 wait_on_buffer(bh);
1394 if (buffer_req(bh) && !buffer_uptodate(bh))
1396 printk("IO error syncing udf inode [%s:%08lx]\n",
1397 bdevname(inode->i_dev), inode->i_ino);
1398 err = -EIO;
1401 udf_release_data(bh);
1402 return err;
1406 * udf_iget
1408 * PURPOSE
1409 * Get an inode.
1411 * DESCRIPTION
1412 * This routine replaces iget() and read_inode().
1414 * HISTORY
1415 * October 3, 1997 - Andrew E. Mileski
1416 * Written, tested, and released.
1418 * 12/19/98 dgb Added semaphore and changed to be a wrapper of iget
1420 struct inode *
1421 udf_iget(struct super_block *sb, lb_addr ino)
1423 struct inode *inode;
1424 unsigned long block;
1426 block = udf_get_lb_pblock(sb, ino, 0);
1428 /* Get the inode */
1430 inode = iget(sb, block);
1431 /* calls udf_read_inode() ! */
1433 if (!inode)
1435 printk(KERN_ERR "udf: iget() failed\n");
1436 return NULL;
1438 else if (is_bad_inode(inode))
1440 iput(inode);
1441 return NULL;
1443 else if (UDF_I_LOCATION(inode).logicalBlockNum == 0xFFFFFFFF &&
1444 UDF_I_LOCATION(inode).partitionReferenceNum == 0xFFFF)
1446 memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(lb_addr));
1447 __udf_read_inode(inode);
1450 if ( ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum) )
1452 udf_debug("block=%d, partition=%d out of range\n",
1453 ino.logicalBlockNum, ino.partitionReferenceNum);
1454 return NULL;
1457 return inode;
1460 int udf_add_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
1461 lb_addr eloc, Uint32 elen, struct buffer_head **bh, int inc)
1463 int adsize;
1464 short_ad *sad = NULL;
1465 long_ad *lad = NULL;
1466 struct AllocExtDesc *aed;
1467 int ret;
1469 if (!(*bh))
1471 if (!(*bh = udf_tread(inode->i_sb,
1472 udf_get_lb_pblock(inode->i_sb, *bloc, 0),
1473 inode->i_sb->s_blocksize)))
1475 udf_debug("reading block %d failed!\n",
1476 udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1477 return -1;
1481 if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_SHORT)
1482 adsize = sizeof(short_ad);
1483 else if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_LONG)
1484 adsize = sizeof(long_ad);
1485 else
1486 return -1;
1488 if (*extoffset + (2 * adsize) > inode->i_sb->s_blocksize)
1490 char *sptr, *dptr;
1491 struct buffer_head *nbh;
1492 int err, loffset;
1493 lb_addr obloc = *bloc;
1495 if (!(bloc->logicalBlockNum = udf_new_block(inode,
1496 obloc.partitionReferenceNum, obloc.logicalBlockNum, &err)))
1498 return -1;
1500 if (!(nbh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1501 *bloc, 0), inode->i_sb->s_blocksize)))
1503 return -1;
1505 aed = (struct AllocExtDesc *)(nbh->b_data);
1506 aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
1507 if (*extoffset + adsize > inode->i_sb->s_blocksize)
1509 loffset = *extoffset;
1510 aed->lengthAllocDescs = cpu_to_le32(adsize);
1511 sptr = (*bh)->b_data + *extoffset - adsize;
1512 dptr = nbh->b_data + sizeof(struct AllocExtDesc);
1513 memcpy(dptr, sptr, adsize);
1514 *extoffset = sizeof(struct AllocExtDesc) + adsize;
1516 else
1518 loffset = *extoffset + adsize;
1519 aed->lengthAllocDescs = cpu_to_le32(0);
1520 sptr = (*bh)->b_data + *extoffset;
1521 *extoffset = sizeof(struct AllocExtDesc);
1523 if (memcmp(&UDF_I_LOCATION(inode), &obloc, sizeof(lb_addr)))
1525 aed = (struct AllocExtDesc *)(*bh)->b_data;
1526 aed->lengthAllocDescs =
1527 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1529 else
1531 UDF_I_LENALLOC(inode) += adsize;
1532 mark_inode_dirty(inode);
1535 udf_new_tag(nbh->b_data, TID_ALLOC_EXTENT_DESC, 2, 1,
1536 bloc->logicalBlockNum, sizeof(tag));
1537 switch (UDF_I_ALLOCTYPE(inode))
1539 case ICB_FLAG_AD_SHORT:
1541 sad = (short_ad *)sptr;
1542 sad->extLength = cpu_to_le32(
1543 EXTENT_NEXT_EXTENT_ALLOCDECS << 30 |
1544 inode->i_sb->s_blocksize);
1545 sad->extPosition = cpu_to_le32(bloc->logicalBlockNum);
1546 break;
1548 case ICB_FLAG_AD_LONG:
1550 lad = (long_ad *)sptr;
1551 lad->extLength = cpu_to_le32(
1552 EXTENT_NEXT_EXTENT_ALLOCDECS << 30 |
1553 inode->i_sb->s_blocksize);
1554 lad->extLocation = cpu_to_lelb(*bloc);
1555 break;
1558 udf_update_tag((*bh)->b_data, loffset);
1559 mark_buffer_dirty(*bh, 1);
1560 udf_release_data(*bh);
1561 *bh = nbh;
1564 ret = udf_write_aext(inode, *bloc, extoffset, eloc, elen, bh, inc);
1566 if (!memcmp(&UDF_I_LOCATION(inode), bloc, sizeof(lb_addr)))
1568 UDF_I_LENALLOC(inode) += adsize;
1569 mark_inode_dirty(inode);
1571 else
1573 aed = (struct AllocExtDesc *)(*bh)->b_data;
1574 aed->lengthAllocDescs =
1575 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1576 udf_update_tag((*bh)->b_data, *extoffset + (inc ? 0 : adsize));
1577 mark_buffer_dirty(*bh, 1);
1580 return ret;
1583 int udf_write_aext(struct inode *inode, lb_addr bloc, int *extoffset,
1584 lb_addr eloc, Uint32 elen, struct buffer_head **bh, int inc)
1586 int adsize;
1587 short_ad *sad = NULL;
1588 long_ad *lad = NULL;
1590 if (!(*bh))
1592 if (!(*bh = udf_tread(inode->i_sb,
1593 udf_get_lb_pblock(inode->i_sb, bloc, 0),
1594 inode->i_sb->s_blocksize)))
1596 udf_debug("reading block %d failed!\n",
1597 udf_get_lb_pblock(inode->i_sb, bloc, 0));
1598 return -1;
1602 if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_SHORT)
1603 adsize = sizeof(short_ad);
1604 else if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_LONG)
1605 adsize = sizeof(long_ad);
1606 else
1607 return -1;
1609 switch (UDF_I_ALLOCTYPE(inode))
1611 case ICB_FLAG_AD_SHORT:
1613 sad = (short_ad *)((*bh)->b_data + *extoffset);
1614 sad->extLength = cpu_to_le32(elen);
1615 sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
1616 break;
1618 case ICB_FLAG_AD_LONG:
1620 lad = (long_ad *)((*bh)->b_data + *extoffset);
1621 lad->extLength = cpu_to_le32(elen);
1622 lad->extLocation = cpu_to_lelb(eloc);
1623 break;
1627 if (memcmp(&UDF_I_LOCATION(inode), &bloc, sizeof(lb_addr)))
1629 struct AllocExtDesc *aed = (struct AllocExtDesc *)(*bh)->b_data;
1630 udf_update_tag((*bh)->b_data,
1631 le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct AllocExtDesc));
1633 else
1634 mark_inode_dirty(inode);
1636 mark_buffer_dirty(*bh, 1);
1638 if (inc)
1639 *extoffset += adsize;
1640 return (elen >> 30);
1643 int udf_next_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
1644 lb_addr *eloc, Uint32 *elen, struct buffer_head **bh, int inc)
1646 int pos, alen;
1647 Uint8 etype;
1649 if (!(*bh))
1651 if (!(*bh = udf_tread(inode->i_sb,
1652 udf_get_lb_pblock(inode->i_sb, *bloc, 0),
1653 inode->i_sb->s_blocksize)))
1655 udf_debug("reading block %d failed!\n",
1656 udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1657 return -1;
1661 if (!memcmp(&UDF_I_LOCATION(inode), bloc, sizeof(lb_addr)))
1663 pos = udf_file_entry_alloc_offset(inode);
1664 alen = UDF_I_LENALLOC(inode) + pos;
1666 else
1668 struct AllocExtDesc *aed = (struct AllocExtDesc *)(*bh)->b_data;
1670 pos = sizeof(struct AllocExtDesc);
1671 alen = le32_to_cpu(aed->lengthAllocDescs) + pos;
1674 if (!(*extoffset))
1675 *extoffset = pos;
1677 switch (UDF_I_ALLOCTYPE(inode))
1679 case ICB_FLAG_AD_SHORT:
1681 short_ad *sad;
1683 if (!(sad = udf_get_fileshortad((*bh)->b_data, alen, extoffset, inc)))
1684 return -1;
1686 if ((etype = le32_to_cpu(sad->extLength) >> 30) == EXTENT_NEXT_EXTENT_ALLOCDECS)
1688 bloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1689 *extoffset = 0;
1690 udf_release_data(*bh);
1691 *bh = NULL;
1692 return udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, inc);
1694 else
1696 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1697 eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1698 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1700 break;
1702 case ICB_FLAG_AD_LONG:
1704 long_ad *lad;
1706 if (!(lad = udf_get_filelongad((*bh)->b_data, alen, extoffset, inc)))
1707 return -1;
1709 if ((etype = le32_to_cpu(lad->extLength) >> 30) == EXTENT_NEXT_EXTENT_ALLOCDECS)
1711 *bloc = lelb_to_cpu(lad->extLocation);
1712 *extoffset = 0;
1713 udf_release_data(*bh);
1714 *bh = NULL;
1715 return udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, inc);
1717 else
1719 *eloc = lelb_to_cpu(lad->extLocation);
1720 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1722 break;
1724 case ICB_FLAG_AD_IN_ICB:
1726 if (UDF_I_LENALLOC(inode) == 0)
1727 return -1;
1728 etype = EXTENT_RECORDED_ALLOCATED;
1729 *eloc = UDF_I_LOCATION(inode);
1730 *elen = UDF_I_LENALLOC(inode);
1731 break;
1733 default:
1735 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1736 return -1;
1739 if (*elen)
1740 return etype;
1742 udf_debug("Empty Extent, inode=%ld, alloctype=%d, eloc=%d, elen=%d, etype=%d, extoffset=%d\n",
1743 inode->i_ino, UDF_I_ALLOCTYPE(inode), eloc->logicalBlockNum, *elen, etype, *extoffset);
1744 if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_SHORT)
1745 *extoffset -= sizeof(short_ad);
1746 else if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_LONG)
1747 *extoffset -= sizeof(long_ad);
1748 return -1;
1751 int udf_current_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
1752 lb_addr *eloc, Uint32 *elen, struct buffer_head **bh, int inc)
1754 int pos, alen;
1755 Uint8 etype;
1757 if (!(*bh))
1759 if (!(*bh = udf_tread(inode->i_sb,
1760 udf_get_lb_pblock(inode->i_sb, *bloc, 0),
1761 inode->i_sb->s_blocksize)))
1763 udf_debug("reading block %d failed!\n",
1764 udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1765 return -1;
1769 if (!memcmp(&UDF_I_LOCATION(inode), bloc, sizeof(lb_addr)))
1771 if (!(UDF_I_EXTENDED_FE(inode)))
1772 pos = sizeof(struct FileEntry) + UDF_I_LENEATTR(inode);
1773 else
1774 pos = sizeof(struct ExtendedFileEntry) + UDF_I_LENEATTR(inode);
1775 alen = UDF_I_LENALLOC(inode) + pos;
1777 else
1779 struct AllocExtDesc *aed = (struct AllocExtDesc *)(*bh)->b_data;
1781 pos = sizeof(struct AllocExtDesc);
1782 alen = le32_to_cpu(aed->lengthAllocDescs) + pos;
1785 if (!(*extoffset))
1786 *extoffset = pos;
1788 switch (UDF_I_ALLOCTYPE(inode))
1790 case ICB_FLAG_AD_SHORT:
1792 short_ad *sad;
1794 if (!(sad = udf_get_fileshortad((*bh)->b_data, alen, extoffset, inc)))
1795 return -1;
1797 etype = le32_to_cpu(sad->extLength) >> 30;
1798 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1799 eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1800 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1801 break;
1803 case ICB_FLAG_AD_LONG:
1805 long_ad *lad;
1807 if (!(lad = udf_get_filelongad((*bh)->b_data, alen, extoffset, inc)))
1808 return -1;
1810 etype = le32_to_cpu(lad->extLength) >> 30;
1811 *eloc = lelb_to_cpu(lad->extLocation);
1812 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1813 break;
1815 default:
1817 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1818 return -1;
1821 if (*elen)
1822 return etype;
1824 udf_debug("Empty Extent!\n");
1825 if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_SHORT)
1826 *extoffset -= sizeof(short_ad);
1827 else if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_LONG)
1828 *extoffset -= sizeof(long_ad);
1829 return -1;
1832 int udf_insert_aext(struct inode *inode, lb_addr bloc, int extoffset,
1833 lb_addr neloc, Uint32 nelen, struct buffer_head *bh)
1835 lb_addr oeloc;
1836 Uint32 oelen;
1837 int type;
1839 if (!bh)
1841 if (!(bh = udf_tread(inode->i_sb,
1842 udf_get_lb_pblock(inode->i_sb, bloc, 0),
1843 inode->i_sb->s_blocksize)))
1845 udf_debug("reading block %d failed!\n",
1846 udf_get_lb_pblock(inode->i_sb, bloc, 0));
1847 return -1;
1850 else
1851 atomic_inc(&bh->b_count);
1853 while ((type = udf_next_aext(inode, &bloc, &extoffset, &oeloc, &oelen, &bh, 0)) != -1)
1855 udf_write_aext(inode, bloc, &extoffset, neloc, nelen, &bh, 1);
1857 neloc = oeloc;
1858 nelen = (type << 30) | oelen;
1860 udf_add_aext(inode, &bloc, &extoffset, neloc, nelen, &bh, 1);
1861 udf_release_data(bh);
1862 return (nelen >> 30);
1865 int udf_delete_aext(struct inode *inode, lb_addr nbloc, int nextoffset,
1866 lb_addr eloc, Uint32 elen, struct buffer_head *nbh)
1868 struct buffer_head *obh;
1869 lb_addr obloc;
1870 int oextoffset, adsize;
1871 int type;
1872 struct AllocExtDesc *aed;
1874 if (!(nbh))
1876 if (!(nbh = udf_tread(inode->i_sb,
1877 udf_get_lb_pblock(inode->i_sb, nbloc, 0),
1878 inode->i_sb->s_blocksize)))
1880 udf_debug("reading block %d failed!\n",
1881 udf_get_lb_pblock(inode->i_sb, nbloc, 0));
1882 return -1;
1885 else
1886 atomic_inc(&nbh->b_count);
1887 atomic_inc(&nbh->b_count);
1889 if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_SHORT)
1890 adsize = sizeof(short_ad);
1891 else if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_LONG)
1892 adsize = sizeof(long_ad);
1893 else
1894 adsize = 0;
1896 obh = nbh;
1897 obloc = nbloc;
1898 oextoffset = nextoffset;
1900 if (udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1) == -1)
1901 return -1;
1903 while ((type = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) != -1)
1905 udf_write_aext(inode, obloc, &oextoffset, eloc, (type << 30) | elen, &obh, 1);
1906 if (memcmp(&nbloc, &obloc, sizeof(lb_addr)))
1908 obloc = nbloc;
1909 udf_release_data(obh);
1910 atomic_inc(&nbh->b_count);
1911 obh = nbh;
1912 oextoffset = nextoffset - adsize;
1915 memset(&eloc, 0x00, sizeof(lb_addr));
1916 elen = 0;
1918 if (memcmp(&nbloc, &obloc, sizeof(lb_addr)))
1920 udf_free_blocks(inode, nbloc, 0, 1);
1921 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, &obh, 1);
1922 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, &obh, 1);
1923 if (!memcmp(&UDF_I_LOCATION(inode), &obloc, sizeof(lb_addr)))
1925 UDF_I_LENALLOC(inode) -= (adsize * 2);
1926 mark_inode_dirty(inode);
1928 else
1930 aed = (struct AllocExtDesc *)(obh)->b_data;
1931 aed->lengthAllocDescs =
1932 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize));
1933 udf_update_tag((obh)->b_data, oextoffset - (2*adsize));
1934 mark_buffer_dirty(obh, 1);
1937 else
1939 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, &obh, 1);
1940 if (!memcmp(&UDF_I_LOCATION(inode), &obloc, sizeof(lb_addr)))
1942 UDF_I_LENALLOC(inode) -= adsize;
1943 mark_inode_dirty(inode);
1945 else
1947 aed = (struct AllocExtDesc *)(obh)->b_data;
1948 aed->lengthAllocDescs =
1949 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
1950 udf_update_tag((obh)->b_data, oextoffset - adsize);
1951 mark_buffer_dirty(obh, 1);
1955 udf_release_data(nbh);
1956 udf_release_data(obh);
1957 return (elen >> 30);
1960 int inode_bmap(struct inode *inode, int block, lb_addr *bloc, Uint32 *extoffset,
1961 lb_addr *eloc, Uint32 *elen, Uint32 *offset, struct buffer_head **bh)
1963 int etype, lbcount = 0;
1965 if (block < 0)
1967 printk(KERN_ERR "udf: inode_bmap: block < 0\n");
1968 return -1;
1970 if (!inode)
1972 printk(KERN_ERR "udf: inode_bmap: NULL inode\n");
1973 return -1;
1976 *extoffset = 0;
1977 *elen = 0;
1978 *bloc = UDF_I_LOCATION(inode);
1982 if ((etype = udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, 1)) == -1)
1984 *offset = block - lbcount;
1985 return -1;
1987 lbcount += ((*elen + inode->i_sb->s_blocksize - 1) >>
1988 inode->i_sb->s_blocksize_bits);
1989 } while (lbcount <= block);
1991 *offset = block + ((*elen + inode->i_sb->s_blocksize - 1) >>
1992 inode->i_sb->s_blocksize_bits) - lbcount;
1994 return etype;
1997 long udf_locked_block_map(struct inode *inode, long block)
1999 lb_addr eloc, bloc;
2000 Uint32 offset, extoffset, elen;
2001 struct buffer_head *bh = NULL;
2002 int ret;
2004 if (inode_bmap(inode, block, &bloc, &extoffset, &eloc, &elen, &offset, &bh) == EXTENT_RECORDED_ALLOCATED)
2005 ret = udf_get_lb_pblock(inode->i_sb, eloc, offset);
2006 else
2007 ret = 0;
2009 if (bh)
2010 udf_release_data(bh);
2012 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2013 return udf_fixed_to_variable(ret);
2014 else
2015 return ret;
2018 long udf_block_map(struct inode *inode, long block)
2020 int ret;
2022 lock_kernel();
2023 ret = udf_locked_block_map(inode, block);
2024 unlock_kernel();
2025 return ret;