Import 2.3.48
[davej-history.git] / fs / udf / inode.c
blob335f54c759c7e24dc2075fa37f0de0ef7bff3106
1 /*
2 * inode.c
4 * PURPOSE
5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
7 * CONTACTS
8 * E-mail regarding any portion of the Linux UDF file system should be
9 * directed to the development team mailing list (run by majordomo):
10 * linux_udf@hootie.lvld.hp.com
12 * COPYRIGHT
13 * This file is distributed under the terms of the GNU General Public
14 * License (GPL). Copies of the GPL can be obtained from:
15 * ftp://prep.ai.mit.edu/pub/gnu/GPL
16 * Each contributing author retains all rights to their own work.
18 * (C) 1998 Dave Boynton
19 * (C) 1998-2000 Ben Fennema
20 * (C) 1999-2000 Stelias Computing Inc
22 * HISTORY
24 * 10/04/98 dgb Added rudimentary directory functions
25 * 10/07/98 Fully working udf_block_map! It works!
26 * 11/25/98 bmap altered to better support extents
27 * 12/06/98 blf partition support in udf_iget, udf_block_map and udf_read_inode
28 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
29 * block boundaries (which is not actually allowed)
30 * 12/20/98 added support for strategy 4096
31 * 03/07/99 rewrote udf_block_map (again)
32 * New funcs, inode_bmap, udf_next_aext
33 * 04/19/99 Support for writing device EA's for major/minor #
36 #include "udfdecl.h"
37 #include <linux/locks.h>
38 #include <linux/mm.h>
39 #include <linux/smp_lock.h>
41 #include "udf_i.h"
42 #include "udf_sb.h"
44 #define EXTENT_MERGE_SIZE 5
46 static mode_t udf_convert_permissions(struct FileEntry *);
47 static int udf_update_inode(struct inode *, int);
48 static void udf_fill_inode(struct inode *, struct buffer_head *);
49 static struct buffer_head *inode_getblk(struct inode *, long, int *, long *, int *);
50 static void udf_split_extents(struct inode *, int *, int, int,
51 long_ad [EXTENT_MERGE_SIZE], int *);
52 static void udf_prealloc_extents(struct inode *, int, int,
53 long_ad [EXTENT_MERGE_SIZE], int *);
54 static void udf_merge_extents(struct inode *,
55 long_ad [EXTENT_MERGE_SIZE], int *);
56 static void udf_update_extents(struct inode *,
57 long_ad [EXTENT_MERGE_SIZE], int, int,
58 lb_addr, Uint32, struct buffer_head **);
59 static int udf_get_block(struct inode *, long, struct buffer_head *, int);
62 * udf_put_inode
64 * PURPOSE
66 * DESCRIPTION
67 * This routine is called whenever the kernel no longer needs the inode.
69 * HISTORY
70 * July 1, 1997 - Andrew E. Mileski
71 * Written, tested, and released.
73 * Called at each iput()
75 void udf_put_inode(struct inode * inode)
77 udf_discard_prealloc(inode);
81 * udf_delete_inode
83 * PURPOSE
84 * Clean-up before the specified inode is destroyed.
86 * DESCRIPTION
87 * This routine is called when the kernel destroys an inode structure
88 * ie. when iput() finds i_count == 0.
90 * HISTORY
91 * July 1, 1997 - Andrew E. Mileski
92 * Written, tested, and released.
94 * Called at the last iput() if i_nlink is zero.
96 void udf_delete_inode(struct inode * inode)
98 inode->i_size = 0;
99 if (inode->i_blocks)
100 udf_truncate(inode);
101 udf_free_inode(inode);
104 void udf_discard_prealloc(struct inode * inode)
106 if (inode->i_size && UDF_I_ALLOCTYPE(inode) != ICB_FLAG_AD_IN_ICB)
107 udf_trunc(inode);
110 static int udf_alloc_block(struct inode *inode, Uint16 partition,
111 Uint32 goal, int *err)
113 int result = 0;
114 wait_on_super(inode->i_sb);
116 result = udf_new_block(inode, partition, goal, err);
118 return result;
121 static int udf_writepage(struct dentry *dentry, struct page *page)
123 return block_write_full_page(page,udf_get_block);
125 static int udf_readpage(struct dentry *dentry, struct page *page)
127 return block_read_full_page(page,udf_get_block);
129 static int udf_prepare_write(struct page *page, unsigned from, unsigned to)
131 return block_prepare_write(page,from,to,udf_get_block);
133 static int udf_bmap(struct address_space *mapping, long block)
135 return generic_block_bmap(mapping,block,udf_get_block);
137 static struct address_space_operations udf_aops = {
138 readpage: udf_readpage,
139 writepage: udf_writepage,
140 prepare_write: udf_prepare_write,
141 commit_write: generic_commit_write,
142 bmap: udf_bmap
145 void udf_expand_file_adinicb(struct file * filp, int newsize, int * err)
147 struct inode * inode = filp->f_dentry->d_inode;
148 struct buffer_head *bh = NULL;
149 struct page *page;
150 unsigned long kaddr = 0;
152 /* from now on we have normal address_space methods */
153 inode->i_data.a_ops = &udf_aops;
155 if (!UDF_I_LENALLOC(inode))
157 UDF_I_ALLOCTYPE(inode) = ICB_FLAG_AD_LONG;
158 mark_inode_dirty(inode);
159 return;
162 bh = udf_tread(inode->i_sb, inode->i_ino, inode->i_sb->s_blocksize);
163 if (!bh)
164 return;
165 page = grab_cache_page(&inode->i_data, 0);
166 if (!PageLocked(page))
167 BUG();
168 if (!Page_Uptodate(page))
170 kaddr = kmap(page);
171 memset((char *)kaddr + UDF_I_LENALLOC(inode), 0x00,
172 PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
173 memcpy((char *)kaddr, bh->b_data + udf_file_entry_alloc_offset(inode),
174 UDF_I_LENALLOC(inode));
175 kunmap(page);
177 memset(bh->b_data + udf_file_entry_alloc_offset(inode),
178 0, UDF_I_LENALLOC(inode));
179 UDF_I_LENALLOC(inode) = 0;
180 UDF_I_ALLOCTYPE(inode) = ICB_FLAG_AD_LONG;
181 inode->i_blocks = inode->i_sb->s_blocksize / 512;
182 mark_buffer_dirty(bh, 1);
183 udf_release_data(bh);
185 inode->i_data.a_ops->writepage(filp->f_dentry, page);
186 UnlockPage(page);
187 page_cache_release(page);
189 mark_inode_dirty(inode);
190 inode->i_version ++;
193 struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err)
195 long_ad newad;
196 int newblock;
197 struct buffer_head *sbh = NULL, *dbh = NULL;
199 struct udf_fileident_bh sfibh, dfibh;
200 loff_t f_pos = udf_ext0_offset(inode) >> 2;
201 int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
202 struct FileIdentDesc cfi, *sfi, *dfi;
204 if (!inode->i_size)
206 UDF_I_ALLOCTYPE(inode) = ICB_FLAG_AD_LONG;
207 mark_inode_dirty(inode);
208 return NULL;
211 /* alloc block, and copy data to it */
212 *block = udf_alloc_block(inode,
213 UDF_I_LOCATION(inode).partitionReferenceNum,
214 UDF_I_LOCATION(inode).logicalBlockNum, err);
216 if (!(*block))
217 return NULL;
218 newblock = udf_get_pblock(inode->i_sb, *block,
219 UDF_I_LOCATION(inode).partitionReferenceNum, 0);
220 if (!newblock)
221 return NULL;
222 sbh = udf_tread(inode->i_sb, inode->i_ino, inode->i_sb->s_blocksize);
223 if (!sbh)
224 return NULL;
225 dbh = udf_tread(inode->i_sb, newblock, inode->i_sb->s_blocksize);
226 if (!dbh)
227 return NULL;
229 sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
230 sfibh.sbh = sfibh.ebh = sbh;
231 dfibh.soffset = dfibh.eoffset = 0;
232 dfibh.sbh = dfibh.ebh = dbh;
233 while ( (f_pos < size) )
235 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL);
236 if (!sfi)
238 udf_release_data(sbh);
239 udf_release_data(dbh);
240 return NULL;
242 sfi->descTag.tagLocation = *block;
243 dfibh.soffset = dfibh.eoffset;
244 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
245 dfi = (struct FileIdentDesc *)(dbh->b_data + dfibh.soffset);
246 if (udf_write_fi(sfi, dfi, &dfibh, sfi->impUse,
247 sfi->fileIdent + sfi->lengthOfImpUse))
249 udf_release_data(sbh);
250 udf_release_data(dbh);
251 return NULL;
254 mark_buffer_dirty(dbh, 1);
256 memset(sbh->b_data + udf_file_entry_alloc_offset(inode),
257 0, UDF_I_LENALLOC(inode));
259 memset(&newad, 0x00, sizeof(long_ad));
260 newad.extLength = inode->i_size;
261 newad.extLocation.logicalBlockNum = *block;
262 newad.extLocation.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
263 /* UniqueID stuff */
265 memcpy(sbh->b_data + udf_file_entry_alloc_offset(inode),
266 &newad, sizeof(newad));
268 UDF_I_LENALLOC(inode) = sizeof(newad);
269 UDF_I_ALLOCTYPE(inode) = ICB_FLAG_AD_LONG;
270 inode->i_blocks = inode->i_sb->s_blocksize / 512;
271 mark_buffer_dirty(sbh, 1);
272 udf_release_data(sbh);
273 mark_inode_dirty(inode);
274 inode->i_version ++;
275 return dbh;
278 static int udf_get_block(struct inode *inode, long block, struct buffer_head *bh_result, int create)
280 int err, new;
281 struct buffer_head *bh;
282 unsigned long phys;
284 if (!create)
286 phys = udf_block_map(inode, block);
287 if (phys)
289 bh_result->b_dev = inode->i_dev;
290 bh_result->b_blocknr = phys;
291 bh_result->b_state |= (1UL << BH_Mapped);
293 return 0;
296 err = -EIO;
297 new = 0;
298 bh = NULL;
300 lock_kernel();
302 if (block < 0)
303 goto abort_negative;
305 if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1)
307 UDF_I_NEXT_ALLOC_BLOCK(inode) ++;
308 UDF_I_NEXT_ALLOC_GOAL(inode) ++;
311 err = 0;
313 bh = inode_getblk(inode, block, &err, &phys, &new);
314 if (bh)
315 BUG();
316 if (err)
317 goto abort;
318 if (!phys)
319 BUG();
321 bh_result->b_dev = inode->i_dev;
322 bh_result->b_blocknr = phys;
323 bh_result->b_state |= (1UL << BH_Mapped);
324 if (new)
325 bh_result->b_state |= (1UL << BH_New);
326 abort:
327 unlock_kernel();
328 return err;
330 abort_negative:
331 udf_warning(inode->i_sb, "udf_get_block", "block < 0");
332 goto abort;
335 struct buffer_head * udf_getblk(struct inode * inode, long block,
336 int create, int * err)
338 struct buffer_head dummy;
339 int error;
341 dummy.b_state = 0;
342 dummy.b_blocknr = -1000;
343 error = udf_get_block(inode, block, &dummy, create);
344 *err = error;
345 if (!error & buffer_mapped(&dummy))
347 struct buffer_head *bh;
348 bh = getblk(dummy.b_dev, dummy.b_blocknr, inode->i_sb->s_blocksize);
349 if (buffer_new(&dummy))
351 if (!buffer_uptodate(bh))
352 wait_on_buffer(bh);
353 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
354 mark_buffer_uptodate(bh, 1);
355 mark_buffer_dirty(bh, 1);
357 return bh;
359 return NULL;
362 static struct buffer_head * inode_getblk(struct inode * inode, long block,
363 int *err, long *phys, int *new)
365 struct buffer_head *pbh = NULL, *cbh = NULL, *result = NULL;
366 long_ad laarr[EXTENT_MERGE_SIZE];
367 Uint32 pextoffset = 0, cextoffset = 0, nextoffset = 0;
368 int count = 0, startnum = 0, endnum = 0;
369 Uint32 elen = 0;
370 lb_addr eloc, pbloc = UDF_I_LOCATION(inode), cbloc = UDF_I_LOCATION(inode);
371 int c = 1;
372 int lbcount = 0, b_off = 0, offset = 0;
373 Uint32 newblocknum, newblock;
374 char etype;
375 int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum;
376 char lastblock = 0;
378 pextoffset = cextoffset = nextoffset = udf_file_entry_alloc_offset(inode);
379 b_off = block << inode->i_sb->s_blocksize_bits;
380 pbloc = cbloc = UDF_I_LOCATION(inode);
382 /* find the extent which contains the block we are looking for.
383 alternate between laarr[0] and laarr[1] for locations of the
384 current extent, and the previous extent */
387 if (pbh != cbh)
389 udf_release_data(pbh);
390 pbh = cbh;
391 atomic_inc(&cbh->b_count);
392 pbloc = cbloc;
395 lbcount += elen;
397 pextoffset = cextoffset;
398 cextoffset = nextoffset;
400 if ((etype = udf_next_aext(inode, &cbloc, &nextoffset, &eloc, &elen, &cbh, 1)) == -1)
401 break;
403 c = !c;
405 laarr[c].extLength = (etype << 30) | elen;
406 laarr[c].extLocation = eloc;
408 if (etype != EXTENT_NOT_RECORDED_NOT_ALLOCATED)
409 pgoal = eloc.logicalBlockNum +
410 ((elen + inode->i_sb->s_blocksize - 1) >>
411 inode->i_sb->s_blocksize);
413 count ++;
414 } while (lbcount + elen <= b_off);
416 b_off -= lbcount;
417 offset = b_off >> inode->i_sb->s_blocksize_bits;
419 /* if the extent is allocated and recorded, return the block
420 if the extent is not a multiple of the blocksize, round up */
422 if (etype == EXTENT_RECORDED_ALLOCATED)
424 if (elen & (inode->i_sb->s_blocksize - 1))
426 elen = (EXTENT_RECORDED_ALLOCATED << 30) |
427 ((elen + inode->i_sb->s_blocksize - 1) &
428 ~(inode->i_sb->s_blocksize - 1));
429 etype = udf_write_aext(inode, cbloc, &cextoffset, eloc, elen, &cbh, 1);
431 udf_release_data(pbh);
432 udf_release_data(cbh);
433 newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
434 *phys = newblock;
435 return NULL;
438 if (etype == -1)
440 endnum = startnum = ((count > 1) ? 1 : count);
441 c = !c;
442 laarr[c].extLength = (EXTENT_NOT_RECORDED_NOT_ALLOCATED << 30) |
443 ((offset + 1) << inode->i_sb->s_blocksize_bits);
444 memset(&laarr[c].extLocation, 0x00, sizeof(lb_addr));
445 count ++;
446 endnum ++;
447 lastblock = 1;
449 else
450 endnum = startnum = ((count > 2) ? 2 : count);
452 /* if the current extent is in position 0, swap it with the previous */
453 if (!c && count != 1)
455 laarr[2] = laarr[0];
456 laarr[0] = laarr[1];
457 laarr[1] = laarr[2];
458 c = 1;
461 /* if the current block is located in a extent, read the next extent */
462 if (etype != -1)
464 if ((etype = udf_next_aext(inode, &cbloc, &nextoffset, &eloc, &elen, &cbh, 0)) != -1)
466 laarr[c+1].extLength = (etype << 30) | elen;
467 laarr[c+1].extLocation = eloc;
468 count ++;
469 startnum ++;
470 endnum ++;
472 else
473 lastblock = 1;
475 udf_release_data(cbh);
477 *err = -EFBIG;
479 /* Check file limits.. */
481 unsigned long limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
482 if (limit < RLIM_INFINITY)
484 limit >>= inode->i_sb->s_blocksize_bits;
485 if (block >= limit)
487 send_sig(SIGXFSZ, current, 0);
488 *err = -EFBIG;
489 return NULL;
494 /* if the current extent is not recorded but allocated, get the
495 block in the extent corresponding to the requested block */
496 if ((laarr[c].extLength >> 30) == EXTENT_NOT_RECORDED_ALLOCATED)
497 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
498 else /* otherwise, allocate a new block */
500 if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
501 goal = UDF_I_NEXT_ALLOC_GOAL(inode);
503 if (!goal)
505 if (!(goal = pgoal))
506 goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
509 if (!(newblocknum = udf_alloc_block(inode,
510 UDF_I_LOCATION(inode).partitionReferenceNum, goal, err)))
512 udf_release_data(pbh);
513 *err = -ENOSPC;
514 return NULL;
518 /* if the extent the requsted block is located in contains multiple blocks,
519 split the extent into at most three extents. blocks prior to requested
520 block, requested block, and blocks after requested block */
521 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
523 #ifdef UDF_PREALLOCATE
524 /* preallocate blocks */
525 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
526 #endif
528 /* merge any continuous blocks in laarr */
529 udf_merge_extents(inode, laarr, &endnum);
531 /* write back the new extents, inserting new extents if the new number
532 of extents is greater than the old number, and deleting extents if
533 the new number of extents is less than the old number */
534 udf_update_extents(inode, laarr, startnum, endnum, pbloc, pextoffset, &pbh);
536 udf_release_data(pbh);
538 if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
539 UDF_I_LOCATION(inode).partitionReferenceNum, 0)))
541 return NULL;
543 *phys = newblock;
544 *err = 0;
545 *new = 1;
546 UDF_I_NEXT_ALLOC_BLOCK(inode) = block;
547 UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum;
548 inode->i_ctime = CURRENT_TIME;
549 UDF_I_UCTIME(inode) = CURRENT_UTIME;
550 inode->i_blocks += inode->i_sb->s_blocksize / 512;
551 if (IS_SYNC(inode))
552 udf_sync_inode(inode);
553 else
554 mark_inode_dirty(inode);
555 return result;
558 static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum,
559 long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
561 if ((laarr[*c].extLength >> 30) == EXTENT_NOT_RECORDED_ALLOCATED ||
562 (laarr[*c].extLength >> 30) == EXTENT_NOT_RECORDED_NOT_ALLOCATED)
564 int curr = *c;
565 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
566 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
567 int type = laarr[curr].extLength & ~UDF_EXTENT_LENGTH_MASK;
569 if (blen == 1)
571 else if (!offset || blen == offset + 1)
573 laarr[curr+2] = laarr[curr+1];
574 laarr[curr+1] = laarr[curr];
576 else
578 laarr[curr+3] = laarr[curr+1];
579 laarr[curr+2] = laarr[curr+1] = laarr[curr];
582 if (offset)
584 laarr[curr].extLength = type |
585 (offset << inode->i_sb->s_blocksize_bits);
586 curr ++;
587 (*c) ++;
588 (*endnum) ++;
591 laarr[curr].extLocation.logicalBlockNum = newblocknum;
592 if ((type >> 30) == EXTENT_NOT_RECORDED_NOT_ALLOCATED)
593 laarr[curr].extLocation.partitionReferenceNum =
594 UDF_I_LOCATION(inode).partitionReferenceNum;
595 laarr[curr].extLength = (EXTENT_RECORDED_ALLOCATED << 30) |
596 inode->i_sb->s_blocksize;
597 curr ++;
599 if (blen != offset + 1)
601 if ((type >> 30) == EXTENT_NOT_RECORDED_ALLOCATED)
602 laarr[curr].extLocation.logicalBlockNum += (offset + 1);
603 laarr[curr].extLength = type |
604 ((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
605 curr ++;
606 (*endnum) ++;
611 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
612 long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
614 int start, length = 0, currlength = 0, i;
616 if (*endnum == (c+1) && !lastblock)
617 return;
619 if ((laarr[c+1].extLength >> 30) == EXTENT_NOT_RECORDED_ALLOCATED)
621 start = c+1;
622 length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) +
623 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
625 else
626 start = c;
628 for (i=start+1; i<=*endnum; i++)
630 if (i == *endnum)
632 if (lastblock)
633 length += UDF_DEFAULT_PREALLOC_BLOCKS;
635 else if ((laarr[i].extLength >> 30) == EXTENT_NOT_RECORDED_NOT_ALLOCATED)
636 length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
637 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
638 else
639 break;
642 if (length)
644 int next = laarr[start].extLocation.logicalBlockNum +
645 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
646 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
647 int numalloc = udf_alloc_blocks(inode,
648 laarr[start].extLocation.partitionReferenceNum,
649 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
650 UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
652 if (numalloc)
654 if (start == (c+1))
655 laarr[start].extLength +=
656 (numalloc << inode->i_sb->s_blocksize_bits);
657 else
659 memmove(&laarr[c+2], &laarr[c+1],
660 sizeof(long_ad) * (*endnum - (c+1)));
661 (*endnum) ++;
662 laarr[c+1].extLocation.logicalBlockNum = next;
663 laarr[c+1].extLocation.partitionReferenceNum =
664 laarr[c].extLocation.partitionReferenceNum;
665 laarr[c+1].extLength = (EXTENT_NOT_RECORDED_ALLOCATED << 30) |
666 (numalloc << inode->i_sb->s_blocksize_bits);
667 start = c+1;
670 for (i=start+1; numalloc && i<*endnum; i++)
672 int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
673 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
675 if (elen > numalloc)
677 laarr[c+1].extLength -=
678 (numalloc << inode->i_sb->s_blocksize_bits);
679 numalloc = 0;
681 else
683 numalloc -= elen;
684 if (*endnum > (i+1))
685 memmove(&laarr[i], &laarr[i+1],
686 sizeof(long_ad) * (*endnum - (i+1)));
687 i --;
688 (*endnum) --;
695 static void udf_merge_extents(struct inode *inode,
696 long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
698 int i;
700 for (i=0; i<(*endnum-1); i++)
702 if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30))
704 if (((laarr[i].extLength >> 30) == EXTENT_NOT_RECORDED_NOT_ALLOCATED) ||
705 ((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
706 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
707 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits)))
709 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
710 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
711 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
713 laarr[i+1].extLength = (laarr[i+1].extLength -
714 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
715 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
716 laarr[i].extLength = (UDF_EXTENT_LENGTH_MASK + 1) -
717 inode->i_sb->s_blocksize;
718 laarr[i+1].extLocation.logicalBlockNum =
719 laarr[i].extLocation.logicalBlockNum +
720 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
721 inode->i_sb->s_blocksize_bits);
723 else
725 laarr[i].extLength = laarr[i+1].extLength +
726 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
727 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
728 if (*endnum > (i+2))
729 memmove(&laarr[i+1], &laarr[i+2],
730 sizeof(long_ad) * (*endnum - (i+2)));
731 i --;
732 (*endnum) --;
739 static void udf_update_extents(struct inode *inode,
740 long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum,
741 lb_addr pbloc, Uint32 pextoffset, struct buffer_head **pbh)
743 int start = 0, i;
744 lb_addr tmploc;
745 Uint32 tmplen;
747 if (startnum > endnum)
749 for (i=0; i<(startnum-endnum); i++)
751 udf_delete_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
752 laarr[i].extLength, *pbh);
755 else if (startnum < endnum)
757 for (i=0; i<(endnum-startnum); i++)
759 udf_insert_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
760 laarr[i].extLength, *pbh);
761 udf_next_aext(inode, &pbloc, &pextoffset, &laarr[i].extLocation,
762 &laarr[i].extLength, pbh, 1);
763 start ++;
767 for (i=start; i<endnum; i++)
769 udf_next_aext(inode, &pbloc, &pextoffset, &tmploc, &tmplen, pbh, 0);
770 udf_write_aext(inode, pbloc, &pextoffset, laarr[i].extLocation,
771 laarr[i].extLength, pbh, 1);
775 struct buffer_head * udf_bread(struct inode * inode, int block,
776 int create, int * err)
778 struct buffer_head * bh = NULL;
779 int prev_blocks;
781 prev_blocks = inode->i_blocks;
783 bh = udf_getblk(inode, block, create, err);
784 if (!bh)
785 return NULL;
787 #if 0
788 if (create &&
789 S_ISDIR(inode->i_mode) &&
790 inode->i_blocks > prev_blocks)
792 int i;
793 struct buffer_head *tmp_bh = NULL;
795 for (i=1;
796 i < UDF_DEFAULT_PREALLOC_DIR_BLOCKS;
797 i++)
799 tmp_bh = udf_getblk(inode, block+i, create, err);
800 if (!tmp_bh)
802 udf_release_data(bh);
803 return 0;
805 udf_release_data(tmp_bh);
808 #endif
810 if (buffer_uptodate(bh))
811 return bh;
812 ll_rw_block(READ, 1, &bh);
813 wait_on_buffer(bh);
814 if (buffer_uptodate(bh))
815 return bh;
816 brelse(bh);
817 *err = -EIO;
818 return NULL;
822 * udf_read_inode
824 * PURPOSE
825 * Read an inode.
827 * DESCRIPTION
828 * This routine is called by iget() [which is called by udf_iget()]
829 * (clean_inode() will have been called first)
830 * when an inode is first read into memory.
832 * HISTORY
833 * July 1, 1997 - Andrew E. Mileski
834 * Written, tested, and released.
836 * 12/19/98 dgb Updated to fix size problems.
839 void
840 udf_read_inode(struct inode *inode)
842 memset(&UDF_I_LOCATION(inode), 0xFF, sizeof(lb_addr));
845 void
846 __udf_read_inode(struct inode *inode)
848 struct buffer_head *bh = NULL;
849 struct FileEntry *fe;
850 Uint16 ident;
853 * Set defaults, but the inode is still incomplete!
854 * Note: get_new_inode() sets the following on a new inode:
855 * i_sb = sb
856 * i_dev = sb->s_dev;
857 * i_no = ino
858 * i_flags = sb->s_flags
859 * i_state = 0
860 * clean_inode(): zero fills and sets
861 * i_count = 1
862 * i_nlink = 1
863 * i_op = NULL;
866 inode->i_blksize = PAGE_SIZE;
867 inode->i_version = 1;
869 bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
871 if (!bh)
873 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
874 inode->i_ino);
875 make_bad_inode(inode);
876 return;
879 if (ident != TID_FILE_ENTRY && ident != TID_EXTENDED_FILE_ENTRY)
881 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
882 inode->i_ino, ident);
883 udf_release_data(bh);
884 make_bad_inode(inode);
885 return;
888 fe = (struct FileEntry *)bh->b_data;
890 if (le16_to_cpu(fe->icbTag.strategyType) == 4096)
892 struct buffer_head *ibh = NULL, *nbh = NULL;
893 struct IndirectEntry *ie;
895 ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
896 if (ident == TID_INDIRECT_ENTRY)
898 if (ibh)
900 lb_addr loc;
901 ie = (struct IndirectEntry *)ibh->b_data;
903 loc = lelb_to_cpu(ie->indirectICB.extLocation);
905 if (ie->indirectICB.extLength &&
906 (nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident)))
908 if (ident == TID_FILE_ENTRY ||
909 ident == TID_EXTENDED_FILE_ENTRY)
911 memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(lb_addr));
912 udf_release_data(bh);
913 udf_release_data(ibh);
914 udf_release_data(nbh);
915 __udf_read_inode(inode);
916 return;
918 else
920 udf_release_data(nbh);
921 udf_release_data(ibh);
924 else
925 udf_release_data(ibh);
928 else
929 udf_release_data(ibh);
931 else if (le16_to_cpu(fe->icbTag.strategyType) != 4)
933 printk(KERN_ERR "udf: unsupported strategy type: %d\n",
934 le16_to_cpu(fe->icbTag.strategyType));
935 udf_release_data(bh);
936 make_bad_inode(inode);
937 return;
939 udf_fill_inode(inode, bh);
940 udf_release_data(bh);
943 static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
945 struct FileEntry *fe;
946 struct ExtendedFileEntry *efe;
947 time_t convtime;
948 long convtime_usec;
949 int offset, alen;
951 fe = (struct FileEntry *)bh->b_data;
952 efe = (struct ExtendedFileEntry *)bh->b_data;
954 if (fe->descTag.tagIdent == TID_EXTENDED_FILE_ENTRY)
955 UDF_I_EXTENDED_FE(inode) = 1;
956 else /* fe->descTag.tagIdent == TID_FILE_ENTRY */
957 UDF_I_EXTENDED_FE(inode) = 0;
959 if (le16_to_cpu(fe->icbTag.strategyType) == 4)
960 UDF_I_STRAT4096(inode) = 0;
961 else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
962 UDF_I_STRAT4096(inode) = 1;
964 inode->i_uid = le32_to_cpu(fe->uid);
965 if ( inode->i_uid == -1 ) inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
967 inode->i_gid = le32_to_cpu(fe->gid);
968 if ( inode->i_gid == -1 ) inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
970 inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
971 if (!inode->i_nlink)
972 inode->i_nlink = 1;
974 inode->i_size = le64_to_cpu(fe->informationLength);
975 #if BITS_PER_LONG < 64
976 if (le64_to_cpu(fe->informationLength) & 0xFFFFFFFF00000000ULL)
977 inode->i_size = (Uint32)-1;
978 #endif
980 inode->i_mode = udf_convert_permissions(fe);
981 inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
983 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
984 UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
986 UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICB_FLAG_ALLOC_MASK;
988 if (UDF_I_EXTENDED_FE(inode) == 0)
990 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
991 (inode->i_sb->s_blocksize_bits - 9);
993 if ( udf_stamp_to_time(&convtime, &convtime_usec,
994 lets_to_cpu(fe->modificationTime)) )
996 inode->i_mtime = convtime;
997 UDF_I_UMTIME(inode) = convtime_usec;
998 inode->i_ctime = convtime;
999 UDF_I_UCTIME(inode) = convtime_usec;
1001 else
1003 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1004 UDF_I_UMTIME(inode) = 0;
1005 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1006 UDF_I_UCTIME(inode) = 0;
1009 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1010 lets_to_cpu(fe->accessTime)) )
1012 inode->i_atime = convtime;
1013 UDF_I_UATIME(inode) = convtime_usec;
1015 else
1017 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1018 UDF_I_UATIME(inode) = convtime_usec;
1021 UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
1022 UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
1023 UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
1024 offset = sizeof(struct FileEntry) + UDF_I_LENEATTR(inode);
1025 alen = offset + UDF_I_LENALLOC(inode);
1027 else
1029 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1030 (inode->i_sb->s_blocksize_bits - 9);
1032 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1033 lets_to_cpu(efe->modificationTime)) )
1035 inode->i_mtime = convtime;
1036 UDF_I_UMTIME(inode) = convtime_usec;
1038 else
1040 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1041 UDF_I_UMTIME(inode) = 0;
1044 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1045 lets_to_cpu(efe->accessTime)) )
1047 inode->i_atime = convtime;
1048 UDF_I_UATIME(inode) = convtime_usec;
1050 else
1052 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1053 UDF_I_UATIME(inode) = 0;
1056 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1057 lets_to_cpu(efe->createTime)) )
1059 inode->i_ctime = convtime;
1060 UDF_I_UCTIME(inode) = convtime_usec;
1062 else
1064 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1065 UDF_I_UCTIME(inode) = 0;
1068 UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
1069 UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
1070 UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
1071 offset = sizeof(struct ExtendedFileEntry) + UDF_I_LENEATTR(inode);
1072 alen = offset + UDF_I_LENALLOC(inode);
1075 switch (fe->icbTag.fileType)
1077 case FILE_TYPE_DIRECTORY:
1079 inode->i_op = &udf_dir_inode_operations;
1080 inode->i_fop = &udf_dir_operations;
1081 inode->i_mode |= S_IFDIR;
1082 inode->i_nlink ++;
1083 break;
1085 case FILE_TYPE_REGULAR:
1086 case FILE_TYPE_NONE:
1088 if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_IN_ICB)
1089 inode->i_data.a_ops = &udf_adinicb_aops;
1090 else
1091 inode->i_data.a_ops = &udf_aops;
1092 inode->i_op = &udf_file_inode_operations;
1093 inode->i_fop = &udf_file_operations;
1094 inode->i_mode |= S_IFREG;
1095 break;
1097 case FILE_TYPE_BLOCK:
1099 inode->i_mode |= S_IFBLK;
1100 break;
1102 case FILE_TYPE_CHAR:
1104 inode->i_mode |= S_IFCHR;
1105 break;
1107 case FILE_TYPE_FIFO:
1109 init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1110 break;
1112 case FILE_TYPE_SYMLINK:
1114 inode->i_data.a_ops = &udf_symlink_aops;
1115 inode->i_op = &page_symlink_inode_operations;
1116 inode->i_mode = S_IFLNK|S_IRWXUGO;
1117 break;
1119 default:
1121 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
1122 inode->i_ino, fe->icbTag.fileType);
1123 make_bad_inode(inode);
1124 return;
1127 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1129 struct buffer_head *tbh = NULL;
1130 struct DeviceSpecificationExtendedAttr *dsea =
1131 (struct DeviceSpecificationExtendedAttr *)
1132 udf_get_extendedattr(inode, 12, 1, &tbh);
1134 if (dsea)
1136 init_special_inode(inode, inode->i_mode,
1137 ((le32_to_cpu(dsea->majorDeviceIdent)) << 8) |
1138 (le32_to_cpu(dsea->minorDeviceIdent) & 0xFF));
1139 /* Developer ID ??? */
1140 udf_release_data(tbh);
1142 else
1144 make_bad_inode(inode);
1149 static mode_t
1150 udf_convert_permissions(struct FileEntry *fe)
1152 mode_t mode;
1153 Uint32 permissions;
1154 Uint32 flags;
1156 permissions = le32_to_cpu(fe->permissions);
1157 flags = le16_to_cpu(fe->icbTag.flags);
1159 mode = (( permissions ) & S_IRWXO) |
1160 (( permissions >> 2 ) & S_IRWXG) |
1161 (( permissions >> 4 ) & S_IRWXU) |
1162 (( flags & ICB_FLAG_SETUID) ? S_ISUID : 0) |
1163 (( flags & ICB_FLAG_SETGID) ? S_ISGID : 0) |
1164 (( flags & ICB_FLAG_STICKY) ? S_ISVTX : 0);
1166 return mode;
1170 * udf_write_inode
1172 * PURPOSE
1173 * Write out the specified inode.
1175 * DESCRIPTION
1176 * This routine is called whenever an inode is synced.
1177 * Currently this routine is just a placeholder.
1179 * HISTORY
1180 * July 1, 1997 - Andrew E. Mileski
1181 * Written, tested, and released.
1184 void udf_write_inode(struct inode * inode)
1186 udf_update_inode(inode, 0);
1189 int udf_sync_inode(struct inode * inode)
1191 return udf_update_inode(inode, 1);
1194 static int
1195 udf_update_inode(struct inode *inode, int do_sync)
1197 struct buffer_head *bh = NULL;
1198 struct FileEntry *fe;
1199 struct ExtendedFileEntry *efe;
1200 Uint32 udfperms;
1201 Uint16 icbflags;
1202 Uint16 crclen;
1203 int i;
1204 timestamp cpu_time;
1205 int err = 0;
1207 bh = udf_tread(inode->i_sb,
1208 udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0),
1209 inode->i_sb->s_blocksize);
1210 if (!bh)
1212 udf_debug("bread failure\n");
1213 return -EIO;
1215 fe = (struct FileEntry *)bh->b_data;
1216 efe = (struct ExtendedFileEntry *)bh->b_data;
1218 if (inode->i_uid != UDF_SB(inode->i_sb)->s_uid)
1219 fe->uid = cpu_to_le32(inode->i_uid);
1221 if (inode->i_gid != UDF_SB(inode->i_sb)->s_gid)
1222 fe->gid = cpu_to_le32(inode->i_gid);
1224 udfperms = ((inode->i_mode & S_IRWXO) ) |
1225 ((inode->i_mode & S_IRWXG) << 2) |
1226 ((inode->i_mode & S_IRWXU) << 4);
1228 udfperms |= (le32_to_cpu(fe->permissions) &
1229 (PERM_O_DELETE | PERM_O_CHATTR |
1230 PERM_G_DELETE | PERM_G_CHATTR |
1231 PERM_U_DELETE | PERM_U_CHATTR));
1232 fe->permissions = cpu_to_le32(udfperms);
1234 if (S_ISDIR(inode->i_mode))
1235 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1236 else
1237 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1240 fe->informationLength = cpu_to_le64(inode->i_size);
1242 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1244 EntityID *eid;
1245 struct buffer_head *tbh = NULL;
1246 struct DeviceSpecificationExtendedAttr *dsea =
1247 (struct DeviceSpecificationExtendedAttr *)
1248 udf_get_extendedattr(inode, 12, 1, &tbh);
1250 if (!dsea)
1252 dsea = (struct DeviceSpecificationExtendedAttr *)
1253 udf_add_extendedattr(inode,
1254 sizeof(struct DeviceSpecificationExtendedAttr) +
1255 sizeof(EntityID), 12, 0x3, &tbh);
1256 dsea->attrType = 12;
1257 dsea->attrSubtype = 1;
1258 dsea->attrLength = sizeof(struct DeviceSpecificationExtendedAttr) +
1259 sizeof(EntityID);
1260 dsea->impUseLength = sizeof(EntityID);
1262 eid = (EntityID *)dsea->impUse;
1263 memset(eid, 0, sizeof(EntityID));
1264 strcpy(eid->ident, UDF_ID_DEVELOPER);
1265 eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1266 eid->identSuffix[1] = UDF_OS_ID_LINUX;
1267 dsea->majorDeviceIdent = kdev_t_to_nr(inode->i_rdev) >> 8;
1268 dsea->minorDeviceIdent = kdev_t_to_nr(inode->i_rdev) & 0xFF;
1269 mark_buffer_dirty(tbh, 1);
1270 udf_release_data(tbh);
1273 if (UDF_I_EXTENDED_FE(inode) == 0)
1275 fe->logicalBlocksRecorded = cpu_to_le64(
1276 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1277 (inode->i_sb->s_blocksize_bits - 9));
1279 if (udf_time_to_stamp(&cpu_time, inode->i_atime, UDF_I_UATIME(inode)))
1280 fe->accessTime = cpu_to_lets(cpu_time);
1281 if (udf_time_to_stamp(&cpu_time, inode->i_mtime, UDF_I_UMTIME(inode)))
1282 fe->modificationTime = cpu_to_lets(cpu_time);
1283 memset(&(fe->impIdent), 0, sizeof(EntityID));
1284 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1285 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1286 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1287 fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1288 fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1289 fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1290 fe->descTag.tagIdent = le16_to_cpu(TID_FILE_ENTRY);
1291 crclen = sizeof(struct FileEntry);
1293 else
1295 efe->logicalBlocksRecorded = cpu_to_le64(
1296 (inode->i_blocks + (2 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1297 (inode->i_sb->s_blocksize_bits - 9));
1299 if (udf_time_to_stamp(&cpu_time, inode->i_atime, UDF_I_UATIME(inode)))
1300 efe->accessTime = cpu_to_lets(cpu_time);
1301 if (udf_time_to_stamp(&cpu_time, inode->i_mtime, UDF_I_UMTIME(inode)))
1302 efe->modificationTime = cpu_to_lets(cpu_time);
1303 if (udf_time_to_stamp(&cpu_time, inode->i_ctime, UDF_I_UCTIME(inode)))
1304 efe->createTime = cpu_to_lets(cpu_time);
1305 memset(&(efe->impIdent), 0, sizeof(EntityID));
1306 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1307 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1308 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1309 efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1310 efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1311 efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1312 efe->descTag.tagIdent = le16_to_cpu(TID_EXTENDED_FILE_ENTRY);
1313 crclen = sizeof(struct ExtendedFileEntry);
1315 fe->icbTag.strategyType = UDF_I_STRAT4096(inode) ? cpu_to_le16(4096) :
1316 cpu_to_le16(4);
1318 if (S_ISDIR(inode->i_mode))
1319 fe->icbTag.fileType = FILE_TYPE_DIRECTORY;
1320 else if (S_ISREG(inode->i_mode))
1321 fe->icbTag.fileType = FILE_TYPE_REGULAR;
1322 else if (S_ISLNK(inode->i_mode))
1323 fe->icbTag.fileType = FILE_TYPE_SYMLINK;
1324 else if (S_ISBLK(inode->i_mode))
1325 fe->icbTag.fileType = FILE_TYPE_BLOCK;
1326 else if (S_ISCHR(inode->i_mode))
1327 fe->icbTag.fileType = FILE_TYPE_CHAR;
1328 else if (S_ISFIFO(inode->i_mode))
1329 fe->icbTag.fileType = FILE_TYPE_FIFO;
1331 icbflags = UDF_I_ALLOCTYPE(inode) |
1332 ((inode->i_mode & S_ISUID) ? ICB_FLAG_SETUID : 0) |
1333 ((inode->i_mode & S_ISGID) ? ICB_FLAG_SETGID : 0) |
1334 ((inode->i_mode & S_ISVTX) ? ICB_FLAG_STICKY : 0) |
1335 (le16_to_cpu(fe->icbTag.flags) &
1336 ~(ICB_FLAG_ALLOC_MASK | ICB_FLAG_SETUID |
1337 ICB_FLAG_SETGID | ICB_FLAG_STICKY));
1339 fe->icbTag.flags = cpu_to_le16(icbflags);
1340 fe->descTag.descVersion = cpu_to_le16(2);
1341 fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
1342 fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1343 crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
1344 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1345 fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
1347 fe->descTag.tagChecksum = 0;
1348 for (i=0; i<16; i++)
1349 if (i != 4)
1350 fe->descTag.tagChecksum += ((Uint8 *)&(fe->descTag))[i];
1352 /* write the data blocks */
1353 mark_buffer_dirty(bh, 1);
1354 if (do_sync)
1356 ll_rw_block(WRITE, 1, &bh);
1357 wait_on_buffer(bh);
1358 if (buffer_req(bh) && !buffer_uptodate(bh))
1360 printk("IO error syncing udf inode [%s:%08lx]\n",
1361 bdevname(inode->i_dev), inode->i_ino);
1362 err = -EIO;
1365 udf_release_data(bh);
1366 return err;
1370 * udf_iget
1372 * PURPOSE
1373 * Get an inode.
1375 * DESCRIPTION
1376 * This routine replaces iget() and read_inode().
1378 * HISTORY
1379 * October 3, 1997 - Andrew E. Mileski
1380 * Written, tested, and released.
1382 * 12/19/98 dgb Added semaphore and changed to be a wrapper of iget
1384 struct inode *
1385 udf_iget(struct super_block *sb, lb_addr ino)
1387 struct inode *inode;
1388 unsigned long block;
1390 block = udf_get_lb_pblock(sb, ino, 0);
1392 /* Get the inode */
1394 inode = iget(sb, block);
1395 /* calls udf_read_inode() ! */
1397 if (!inode)
1399 printk(KERN_ERR "udf: iget() failed\n");
1400 return NULL;
1402 else if (is_bad_inode(inode))
1404 iput(inode);
1405 return NULL;
1407 else if (UDF_I_LOCATION(inode).logicalBlockNum == 0xFFFFFFFF &&
1408 UDF_I_LOCATION(inode).partitionReferenceNum == 0xFFFF)
1410 memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(lb_addr));
1411 __udf_read_inode(inode);
1414 if ( ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum) )
1416 udf_debug("block=%d, partition=%d out of range\n",
1417 ino.logicalBlockNum, ino.partitionReferenceNum);
1418 return NULL;
1421 return inode;
1424 int udf_add_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
1425 lb_addr eloc, Uint32 elen, struct buffer_head **bh, int inc)
1427 int adsize;
1428 short_ad *sad = NULL;
1429 long_ad *lad = NULL;
1430 struct AllocExtDesc *aed;
1431 int ret;
1433 if (!(*bh))
1435 if (!(*bh = udf_tread(inode->i_sb,
1436 udf_get_lb_pblock(inode->i_sb, *bloc, 0),
1437 inode->i_sb->s_blocksize)))
1439 udf_debug("reading block %d failed!\n",
1440 udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1441 return -1;
1445 if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_SHORT)
1446 adsize = sizeof(short_ad);
1447 else if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_LONG)
1448 adsize = sizeof(long_ad);
1449 else
1450 return -1;
1452 if (*extoffset + (2 * adsize) > inode->i_sb->s_blocksize)
1454 char *sptr, *dptr;
1455 struct buffer_head *nbh;
1456 int err, loffset;
1457 Uint32 lblock = bloc->logicalBlockNum;
1458 Uint16 lpart = bloc->partitionReferenceNum;
1460 if (!(bloc->logicalBlockNum = udf_new_block(inode,
1461 lpart, lblock, &err)))
1463 return -1;
1465 if (!(nbh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1466 *bloc, 0), inode->i_sb->s_blocksize)))
1468 return -1;
1470 aed = (struct AllocExtDesc *)(nbh->b_data);
1471 aed->previousAllocExtLocation = cpu_to_le32(lblock);
1472 if (*extoffset + adsize > inode->i_sb->s_blocksize)
1474 loffset = *extoffset;
1475 aed->lengthAllocDescs = cpu_to_le32(adsize);
1476 sptr = (*bh)->b_data + *extoffset - adsize;
1477 dptr = nbh->b_data + sizeof(struct AllocExtDesc);
1478 memcpy(dptr, sptr, adsize);
1479 *extoffset = sizeof(struct AllocExtDesc) + adsize;
1481 else
1483 loffset = *extoffset + adsize;
1484 aed->lengthAllocDescs = cpu_to_le32(0);
1485 sptr = (*bh)->b_data + *extoffset;
1486 *extoffset = sizeof(struct AllocExtDesc);
1488 if (UDF_I_LOCATION(inode).logicalBlockNum == lblock)
1489 UDF_I_LENALLOC(inode) += adsize;
1490 else
1492 aed = (struct AllocExtDesc *)(*bh)->b_data;
1493 aed->lengthAllocDescs =
1494 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1497 udf_new_tag(nbh->b_data, TID_ALLOC_EXTENT_DESC, 2, 1,
1498 bloc->logicalBlockNum, sizeof(tag));
1499 switch (UDF_I_ALLOCTYPE(inode))
1501 case ICB_FLAG_AD_SHORT:
1503 sad = (short_ad *)sptr;
1504 sad->extLength = EXTENT_NEXT_EXTENT_ALLOCDECS << 30 |
1505 inode->i_sb->s_blocksize;
1506 sad->extPosition = cpu_to_le32(bloc->logicalBlockNum);
1507 break;
1509 case ICB_FLAG_AD_LONG:
1511 lad = (long_ad *)sptr;
1512 lad->extLength = EXTENT_NEXT_EXTENT_ALLOCDECS << 30 |
1513 inode->i_sb->s_blocksize;
1514 lad->extLocation = cpu_to_lelb(*bloc);
1515 break;
1518 udf_update_tag((*bh)->b_data, loffset);
1519 mark_buffer_dirty(*bh, 1);
1520 udf_release_data(*bh);
1521 *bh = nbh;
1524 ret = udf_write_aext(inode, *bloc, extoffset, eloc, elen, bh, inc);
1526 if (!memcmp(&UDF_I_LOCATION(inode), bloc, sizeof(lb_addr)))
1528 UDF_I_LENALLOC(inode) += adsize;
1529 mark_inode_dirty(inode);
1531 else
1533 aed = (struct AllocExtDesc *)(*bh)->b_data;
1534 aed->lengthAllocDescs =
1535 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1536 udf_update_tag((*bh)->b_data, *extoffset + (inc ? 0 : adsize));
1537 mark_buffer_dirty(*bh, 1);
1540 return ret;
1543 int udf_write_aext(struct inode *inode, lb_addr bloc, int *extoffset,
1544 lb_addr eloc, Uint32 elen, struct buffer_head **bh, int inc)
1546 int adsize;
1547 short_ad *sad = NULL;
1548 long_ad *lad = NULL;
1550 if (!(*bh))
1552 if (!(*bh = udf_tread(inode->i_sb,
1553 udf_get_lb_pblock(inode->i_sb, bloc, 0),
1554 inode->i_sb->s_blocksize)))
1556 udf_debug("reading block %d failed!\n",
1557 udf_get_lb_pblock(inode->i_sb, bloc, 0));
1558 return -1;
1562 if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_SHORT)
1563 adsize = sizeof(short_ad);
1564 else if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_LONG)
1565 adsize = sizeof(long_ad);
1566 else
1567 return -1;
1569 switch (UDF_I_ALLOCTYPE(inode))
1571 case ICB_FLAG_AD_SHORT:
1573 sad = (short_ad *)((*bh)->b_data + *extoffset);
1574 sad->extLength = cpu_to_le32(elen);
1575 sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
1576 break;
1578 case ICB_FLAG_AD_LONG:
1580 lad = (long_ad *)((*bh)->b_data + *extoffset);
1581 lad->extLength = cpu_to_le32(elen);
1582 lad->extLocation = cpu_to_lelb(eloc);
1583 break;
1587 if (memcmp(&UDF_I_LOCATION(inode), &bloc, sizeof(lb_addr)))
1589 struct AllocExtDesc *aed = (struct AllocExtDesc *)(*bh)->b_data;
1590 udf_update_tag((*bh)->b_data,
1591 le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct AllocExtDesc));
1594 mark_buffer_dirty(*bh, 1);
1596 if (inc)
1597 *extoffset += adsize;
1598 return (elen >> 30);
1601 int udf_next_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
1602 lb_addr *eloc, Uint32 *elen, struct buffer_head **bh, int inc)
1604 int pos, alen;
1605 Uint8 etype;
1607 if (!(*bh))
1609 if (!(*bh = udf_tread(inode->i_sb,
1610 udf_get_lb_pblock(inode->i_sb, *bloc, 0),
1611 inode->i_sb->s_blocksize)))
1613 udf_debug("reading block %d failed!\n",
1614 udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1615 return -1;
1619 if (!memcmp(&UDF_I_LOCATION(inode), bloc, sizeof(lb_addr)))
1621 pos = udf_file_entry_alloc_offset(inode);
1622 alen = UDF_I_LENALLOC(inode) + pos;
1624 else
1626 struct AllocExtDesc *aed = (struct AllocExtDesc *)(*bh)->b_data;
1628 pos = sizeof(struct AllocExtDesc);
1629 alen = le32_to_cpu(aed->lengthAllocDescs) + pos;
1632 if (!(*extoffset))
1633 *extoffset = pos;
1635 switch (UDF_I_ALLOCTYPE(inode))
1637 case ICB_FLAG_AD_SHORT:
1639 short_ad *sad;
1641 if (!(sad = udf_get_fileshortad((*bh)->b_data, alen, extoffset, inc)))
1642 return -1;
1644 if ((etype = le32_to_cpu(sad->extLength) >> 30) == EXTENT_NEXT_EXTENT_ALLOCDECS)
1646 bloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1647 *extoffset = 0;
1648 udf_release_data(*bh);
1649 *bh = NULL;
1650 return udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, inc);
1652 else
1654 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1655 eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1656 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1658 break;
1660 case ICB_FLAG_AD_LONG:
1662 long_ad *lad;
1664 if (!(lad = udf_get_filelongad((*bh)->b_data, alen, extoffset, inc)))
1665 return -1;
1667 if ((etype = le32_to_cpu(lad->extLength) >> 30) == EXTENT_NEXT_EXTENT_ALLOCDECS)
1669 *bloc = lelb_to_cpu(lad->extLocation);
1670 *extoffset = 0;
1671 udf_release_data(*bh);
1672 *bh = NULL;
1673 return udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, inc);
1675 else
1677 *eloc = lelb_to_cpu(lad->extLocation);
1678 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1680 break;
1682 case ICB_FLAG_AD_IN_ICB:
1684 *bloc = *eloc = UDF_I_LOCATION(inode);
1685 *elen = UDF_I_LENALLOC(inode);
1686 *extoffset = udf_file_entry_alloc_offset(inode);
1687 etype = EXTENT_RECORDED_ALLOCATED;
1688 break;
1690 default:
1692 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1693 return -1;
1696 if (*elen)
1697 return etype;
1699 udf_debug("Empty Extent, inode=%ld, alloctype=%d, elen=%d, etype=%d, extoffset=%d\n",
1700 inode->i_ino, UDF_I_ALLOCTYPE(inode), *elen, etype, *extoffset);
1701 if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_SHORT)
1702 *extoffset -= sizeof(short_ad);
1703 else if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_LONG)
1704 *extoffset -= sizeof(long_ad);
1705 return -1;
1708 int udf_current_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
1709 lb_addr *eloc, Uint32 *elen, struct buffer_head **bh, int inc)
1711 int pos, alen;
1712 Uint8 etype;
1714 if (!(*bh))
1716 if (!(*bh = udf_tread(inode->i_sb,
1717 udf_get_lb_pblock(inode->i_sb, *bloc, 0),
1718 inode->i_sb->s_blocksize)))
1720 udf_debug("reading block %d failed!\n",
1721 udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1722 return -1;
1726 if (!memcmp(&UDF_I_LOCATION(inode), bloc, sizeof(lb_addr)))
1728 if (!(UDF_I_EXTENDED_FE(inode)))
1729 pos = sizeof(struct FileEntry) + UDF_I_LENEATTR(inode);
1730 else
1731 pos = sizeof(struct ExtendedFileEntry) + UDF_I_LENEATTR(inode);
1732 alen = UDF_I_LENALLOC(inode) + pos;
1734 else
1736 struct AllocExtDesc *aed = (struct AllocExtDesc *)(*bh)->b_data;
1738 pos = sizeof(struct AllocExtDesc);
1739 alen = le32_to_cpu(aed->lengthAllocDescs) + pos;
1742 if (!(*extoffset))
1743 *extoffset = pos;
1745 switch (UDF_I_ALLOCTYPE(inode))
1747 case ICB_FLAG_AD_SHORT:
1749 short_ad *sad;
1751 if (!(sad = udf_get_fileshortad((*bh)->b_data, alen, extoffset, inc)))
1752 return -1;
1754 etype = le32_to_cpu(sad->extLength) >> 30;
1755 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1756 eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1757 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1758 break;
1760 case ICB_FLAG_AD_LONG:
1762 long_ad *lad;
1764 if (!(lad = udf_get_filelongad((*bh)->b_data, alen, extoffset, inc)))
1765 return -1;
1767 etype = le32_to_cpu(lad->extLength) >> 30;
1768 *eloc = lelb_to_cpu(lad->extLocation);
1769 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1770 break;
1772 default:
1774 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1775 return -1;
1778 if (*elen)
1779 return etype;
1781 udf_debug("Empty Extent!\n");
1782 if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_SHORT)
1783 *extoffset -= sizeof(short_ad);
1784 else if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_LONG)
1785 *extoffset -= sizeof(long_ad);
1786 return -1;
1789 int udf_insert_aext(struct inode *inode, lb_addr bloc, int extoffset,
1790 lb_addr neloc, Uint32 nelen, struct buffer_head *bh)
1792 lb_addr oeloc;
1793 Uint32 oelen;
1794 int type;
1796 if (!bh)
1798 if (!(bh = udf_tread(inode->i_sb,
1799 udf_get_lb_pblock(inode->i_sb, bloc, 0),
1800 inode->i_sb->s_blocksize)))
1802 udf_debug("reading block %d failed!\n",
1803 udf_get_lb_pblock(inode->i_sb, bloc, 0));
1804 return -1;
1807 else
1808 atomic_inc(&bh->b_count);
1810 while ((type = udf_next_aext(inode, &bloc, &extoffset, &oeloc, &oelen, &bh, 0)) != -1)
1812 udf_write_aext(inode, bloc, &extoffset, neloc, nelen, &bh, 1);
1814 neloc = oeloc;
1815 nelen = (type << 30) | oelen;
1817 udf_add_aext(inode, &bloc, &extoffset, neloc, nelen, &bh, 1);
1818 udf_release_data(bh);
1819 return (nelen >> 30);
1822 int udf_delete_aext(struct inode *inode, lb_addr nbloc, int nextoffset,
1823 lb_addr eloc, Uint32 elen, struct buffer_head *nbh)
1825 struct buffer_head *obh;
1826 lb_addr obloc;
1827 int oextoffset, adsize;
1828 char type;
1829 struct AllocExtDesc *aed;
1831 if (!(nbh))
1833 if (!(nbh = udf_tread(inode->i_sb,
1834 udf_get_lb_pblock(inode->i_sb, nbloc, 0),
1835 inode->i_sb->s_blocksize)))
1837 udf_debug("reading block %d failed!\n",
1838 udf_get_lb_pblock(inode->i_sb, nbloc, 0));
1839 return -1;
1842 else
1843 atomic_inc(&nbh->b_count);
1844 atomic_inc(&nbh->b_count);
1846 if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_SHORT)
1847 adsize = sizeof(short_ad);
1848 else if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_LONG)
1849 adsize = sizeof(long_ad);
1850 else
1851 adsize = 0;
1853 obh = nbh;
1854 obloc = nbloc;
1855 oextoffset = nextoffset;
1857 if (udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1) == -1)
1858 return -1;
1860 while ((type = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) != -1)
1862 udf_write_aext(inode, obloc, &oextoffset, eloc, (type << 30) | elen, &obh, 1);
1863 if (memcmp(&nbloc, &obloc, sizeof(lb_addr)))
1865 obloc = nbloc;
1866 udf_release_data(obh);
1867 atomic_inc(&nbh->b_count);
1868 obh = nbh;
1869 oextoffset = nextoffset - adsize;
1872 memset(&eloc, 0x00, sizeof(lb_addr));
1873 elen = 0;
1875 if (memcmp(&nbloc, &obloc, sizeof(lb_addr)))
1877 udf_free_blocks(inode, nbloc, 0, 1);
1878 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, &obh, 1);
1879 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, &obh, 1);
1880 if (!memcmp(&UDF_I_LOCATION(inode), &obloc, sizeof(lb_addr)))
1882 UDF_I_LENALLOC(inode) -= (adsize * 2);
1883 mark_inode_dirty(inode);
1885 else
1887 aed = (struct AllocExtDesc *)(obh)->b_data;
1888 aed->lengthAllocDescs =
1889 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize));
1890 udf_update_tag((obh)->b_data, oextoffset - (2*adsize));
1891 mark_buffer_dirty(obh, 1);
1894 else
1896 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, &obh, 1);
1897 if (!memcmp(&UDF_I_LOCATION(inode), &obloc, sizeof(lb_addr)))
1899 UDF_I_LENALLOC(inode) -= adsize;
1900 mark_inode_dirty(inode);
1902 else
1904 aed = (struct AllocExtDesc *)(obh)->b_data;
1905 aed->lengthAllocDescs =
1906 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
1907 udf_update_tag((obh)->b_data, oextoffset - adsize);
1908 mark_buffer_dirty(obh, 1);
1912 udf_release_data(nbh);
1913 udf_release_data(obh);
1914 return (elen >> 30);
1917 int inode_bmap(struct inode *inode, int block, lb_addr *bloc, Uint32 *extoffset,
1918 lb_addr *eloc, Uint32 *elen, Uint32 *offset, struct buffer_head **bh)
1920 int etype, lbcount = 0, b_off;
1922 if (block < 0)
1924 printk(KERN_ERR "udf: inode_bmap: block < 0\n");
1925 return -1;
1927 if (!inode)
1929 printk(KERN_ERR "udf: inode_bmap: NULL inode\n");
1930 return -1;
1933 *extoffset = udf_file_entry_alloc_offset(inode);
1934 *elen = 0;
1935 b_off = block << inode->i_sb->s_blocksize_bits;
1936 *bloc = UDF_I_LOCATION(inode);
1940 lbcount += *elen;
1942 if ((etype = udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, 1)) == -1)
1944 *offset = (b_off - lbcount) >> inode->i_sb->s_blocksize_bits;
1945 return -1;
1947 } while (lbcount + *elen <= b_off);
1949 *offset = (b_off - lbcount) >> inode->i_sb->s_blocksize_bits;
1951 return etype;
1954 long udf_locked_block_map(struct inode *inode, long block)
1956 lb_addr eloc, bloc;
1957 Uint32 offset, extoffset, elen;
1958 struct buffer_head *bh = NULL;
1959 int ret;
1961 if (inode_bmap(inode, block, &bloc, &extoffset, &eloc, &elen, &offset, &bh) == EXTENT_RECORDED_ALLOCATED)
1962 ret = udf_get_lb_pblock(inode->i_sb, eloc, offset);
1963 else
1964 ret = 0;
1966 if (bh)
1967 udf_release_data(bh);
1969 if (UDF_SB(inode->i_sb)->s_flags & UDF_FLAG_VARCONV)
1970 return udf_fixed_to_variable(ret);
1971 else
1972 return ret;
1975 long udf_block_map(struct inode *inode, long block)
1977 int ret;
1979 lock_kernel();
1980 ret = udf_locked_block_map(inode, block);
1981 unlock_kernel();
1982 return ret;