[PATCH] FUSE: bump interface minor version
[linux-2.6.git] / fs / udf / inode.c
blobb83890beaaacbec45e199c9cf8fc78599423ff07
1 /*
2 * inode.c
4 * PURPOSE
5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
7 * CONTACTS
8 * E-mail regarding any portion of the Linux UDF file system should be
9 * directed to the development team mailing list (run by majordomo):
10 * linux_udf@hpesjro.fc.hp.com
12 * COPYRIGHT
13 * This file is distributed under the terms of the GNU General Public
14 * License (GPL). Copies of the GPL can be obtained from:
15 * ftp://prep.ai.mit.edu/pub/gnu/GPL
16 * Each contributing author retains all rights to their own work.
18 * (C) 1998 Dave Boynton
19 * (C) 1998-2004 Ben Fennema
20 * (C) 1999-2000 Stelias Computing Inc
22 * HISTORY
24 * 10/04/98 dgb Added rudimentary directory functions
25 * 10/07/98 Fully working udf_block_map! It works!
26 * 11/25/98 bmap altered to better support extents
27 * 12/06/98 blf partition support in udf_iget, udf_block_map and udf_read_inode
28 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
29 * block boundaries (which is not actually allowed)
30 * 12/20/98 added support for strategy 4096
31 * 03/07/99 rewrote udf_block_map (again)
32 * New funcs, inode_bmap, udf_next_aext
33 * 04/19/99 Support for writing device EA's for major/minor #
36 #include "udfdecl.h"
37 #include <linux/mm.h>
38 #include <linux/smp_lock.h>
39 #include <linux/module.h>
40 #include <linux/pagemap.h>
41 #include <linux/buffer_head.h>
42 #include <linux/writeback.h>
43 #include <linux/slab.h>
45 #include "udf_i.h"
46 #include "udf_sb.h"
48 MODULE_AUTHOR("Ben Fennema");
49 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
50 MODULE_LICENSE("GPL");
52 #define EXTENT_MERGE_SIZE 5
54 static mode_t udf_convert_permissions(struct fileEntry *);
55 static int udf_update_inode(struct inode *, int);
56 static void udf_fill_inode(struct inode *, struct buffer_head *);
57 static struct buffer_head *inode_getblk(struct inode *, long, int *,
58 long *, int *);
59 static int8_t udf_insert_aext(struct inode *, kernel_lb_addr, int,
60 kernel_lb_addr, uint32_t, struct buffer_head *);
61 static void udf_split_extents(struct inode *, int *, int, int,
62 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
63 static void udf_prealloc_extents(struct inode *, int, int,
64 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
65 static void udf_merge_extents(struct inode *,
66 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
67 static void udf_update_extents(struct inode *,
68 kernel_long_ad [EXTENT_MERGE_SIZE], int, int,
69 kernel_lb_addr, uint32_t, struct buffer_head **);
70 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
73 * udf_delete_inode
75 * PURPOSE
76 * Clean-up before the specified inode is destroyed.
78 * DESCRIPTION
79 * This routine is called when the kernel destroys an inode structure
80 * ie. when iput() finds i_count == 0.
82 * HISTORY
83 * July 1, 1997 - Andrew E. Mileski
84 * Written, tested, and released.
86 * Called at the last iput() if i_nlink is zero.
88 void udf_delete_inode(struct inode * inode)
90 truncate_inode_pages(&inode->i_data, 0);
92 if (is_bad_inode(inode))
93 goto no_delete;
95 inode->i_size = 0;
96 udf_truncate(inode);
97 lock_kernel();
99 udf_update_inode(inode, IS_SYNC(inode));
100 udf_free_inode(inode);
102 unlock_kernel();
103 return;
104 no_delete:
105 clear_inode(inode);
108 void udf_clear_inode(struct inode *inode)
110 if (!(inode->i_sb->s_flags & MS_RDONLY)) {
111 lock_kernel();
112 udf_discard_prealloc(inode);
113 unlock_kernel();
116 kfree(UDF_I_DATA(inode));
117 UDF_I_DATA(inode) = NULL;
120 static int udf_writepage(struct page *page, struct writeback_control *wbc)
122 return block_write_full_page(page, udf_get_block, wbc);
125 static int udf_readpage(struct file *file, struct page *page)
127 return block_read_full_page(page, udf_get_block);
130 static int udf_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
132 return block_prepare_write(page, from, to, udf_get_block);
135 static sector_t udf_bmap(struct address_space *mapping, sector_t block)
137 return generic_block_bmap(mapping,block,udf_get_block);
140 struct address_space_operations udf_aops = {
141 .readpage = udf_readpage,
142 .writepage = udf_writepage,
143 .sync_page = block_sync_page,
144 .prepare_write = udf_prepare_write,
145 .commit_write = generic_commit_write,
146 .bmap = udf_bmap,
149 void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
151 struct page *page;
152 char *kaddr;
153 struct writeback_control udf_wbc = {
154 .sync_mode = WB_SYNC_NONE,
155 .nr_to_write = 1,
158 /* from now on we have normal address_space methods */
159 inode->i_data.a_ops = &udf_aops;
161 if (!UDF_I_LENALLOC(inode))
163 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
164 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
165 else
166 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
167 mark_inode_dirty(inode);
168 return;
171 page = grab_cache_page(inode->i_mapping, 0);
172 BUG_ON(!PageLocked(page));
174 if (!PageUptodate(page))
176 kaddr = kmap(page);
177 memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
178 PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
179 memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode),
180 UDF_I_LENALLOC(inode));
181 flush_dcache_page(page);
182 SetPageUptodate(page);
183 kunmap(page);
185 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00,
186 UDF_I_LENALLOC(inode));
187 UDF_I_LENALLOC(inode) = 0;
188 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
189 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
190 else
191 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
193 inode->i_data.a_ops->writepage(page, &udf_wbc);
194 page_cache_release(page);
196 mark_inode_dirty(inode);
199 struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err)
201 int newblock;
202 struct buffer_head *sbh = NULL, *dbh = NULL;
203 kernel_lb_addr bloc, eloc;
204 uint32_t elen, extoffset;
205 uint8_t alloctype;
207 struct udf_fileident_bh sfibh, dfibh;
208 loff_t f_pos = udf_ext0_offset(inode) >> 2;
209 int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
210 struct fileIdentDesc cfi, *sfi, *dfi;
212 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
213 alloctype = ICBTAG_FLAG_AD_SHORT;
214 else
215 alloctype = ICBTAG_FLAG_AD_LONG;
217 if (!inode->i_size)
219 UDF_I_ALLOCTYPE(inode) = alloctype;
220 mark_inode_dirty(inode);
221 return NULL;
224 /* alloc block, and copy data to it */
225 *block = udf_new_block(inode->i_sb, inode,
226 UDF_I_LOCATION(inode).partitionReferenceNum,
227 UDF_I_LOCATION(inode).logicalBlockNum, err);
229 if (!(*block))
230 return NULL;
231 newblock = udf_get_pblock(inode->i_sb, *block,
232 UDF_I_LOCATION(inode).partitionReferenceNum, 0);
233 if (!newblock)
234 return NULL;
235 dbh = udf_tgetblk(inode->i_sb, newblock);
236 if (!dbh)
237 return NULL;
238 lock_buffer(dbh);
239 memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
240 set_buffer_uptodate(dbh);
241 unlock_buffer(dbh);
242 mark_buffer_dirty_inode(dbh, inode);
244 sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
245 sbh = sfibh.sbh = sfibh.ebh = NULL;
246 dfibh.soffset = dfibh.eoffset = 0;
247 dfibh.sbh = dfibh.ebh = dbh;
248 while ( (f_pos < size) )
250 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
251 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL, NULL, NULL);
252 if (!sfi)
254 udf_release_data(dbh);
255 return NULL;
257 UDF_I_ALLOCTYPE(inode) = alloctype;
258 sfi->descTag.tagLocation = cpu_to_le32(*block);
259 dfibh.soffset = dfibh.eoffset;
260 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
261 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
262 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
263 sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse)))
265 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
266 udf_release_data(dbh);
267 return NULL;
270 mark_buffer_dirty_inode(dbh, inode);
272 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode));
273 UDF_I_LENALLOC(inode) = 0;
274 bloc = UDF_I_LOCATION(inode);
275 eloc.logicalBlockNum = *block;
276 eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
277 elen = inode->i_size;
278 UDF_I_LENEXTENTS(inode) = elen;
279 extoffset = udf_file_entry_alloc_offset(inode);
280 udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &sbh, 0);
281 /* UniqueID stuff */
283 udf_release_data(sbh);
284 mark_inode_dirty(inode);
285 return dbh;
288 static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
290 int err, new;
291 struct buffer_head *bh;
292 unsigned long phys;
294 if (!create)
296 phys = udf_block_map(inode, block);
297 if (phys)
298 map_bh(bh_result, inode->i_sb, phys);
299 return 0;
302 err = -EIO;
303 new = 0;
304 bh = NULL;
306 lock_kernel();
308 if (block < 0)
309 goto abort_negative;
311 if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1)
313 UDF_I_NEXT_ALLOC_BLOCK(inode) ++;
314 UDF_I_NEXT_ALLOC_GOAL(inode) ++;
317 err = 0;
319 bh = inode_getblk(inode, block, &err, &phys, &new);
320 if (bh)
321 BUG();
322 if (err)
323 goto abort;
324 if (!phys)
325 BUG();
327 if (new)
328 set_buffer_new(bh_result);
329 map_bh(bh_result, inode->i_sb, phys);
330 abort:
331 unlock_kernel();
332 return err;
334 abort_negative:
335 udf_warning(inode->i_sb, "udf_get_block", "block < 0");
336 goto abort;
339 static struct buffer_head *
340 udf_getblk(struct inode *inode, long block, int create, int *err)
342 struct buffer_head dummy;
344 dummy.b_state = 0;
345 dummy.b_blocknr = -1000;
346 *err = udf_get_block(inode, block, &dummy, create);
347 if (!*err && buffer_mapped(&dummy))
349 struct buffer_head *bh;
350 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
351 if (buffer_new(&dummy))
353 lock_buffer(bh);
354 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
355 set_buffer_uptodate(bh);
356 unlock_buffer(bh);
357 mark_buffer_dirty_inode(bh, inode);
359 return bh;
361 return NULL;
364 static struct buffer_head * inode_getblk(struct inode * inode, long block,
365 int *err, long *phys, int *new)
367 struct buffer_head *pbh = NULL, *cbh = NULL, *nbh = NULL, *result = NULL;
368 kernel_long_ad laarr[EXTENT_MERGE_SIZE];
369 uint32_t pextoffset = 0, cextoffset = 0, nextoffset = 0;
370 int count = 0, startnum = 0, endnum = 0;
371 uint32_t elen = 0;
372 kernel_lb_addr eloc, pbloc, cbloc, nbloc;
373 int c = 1;
374 uint64_t lbcount = 0, b_off = 0;
375 uint32_t newblocknum, newblock, offset = 0;
376 int8_t etype;
377 int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum;
378 char lastblock = 0;
380 pextoffset = cextoffset = nextoffset = udf_file_entry_alloc_offset(inode);
381 b_off = (uint64_t)block << inode->i_sb->s_blocksize_bits;
382 pbloc = cbloc = nbloc = UDF_I_LOCATION(inode);
384 /* find the extent which contains the block we are looking for.
385 alternate between laarr[0] and laarr[1] for locations of the
386 current extent, and the previous extent */
389 if (pbh != cbh)
391 udf_release_data(pbh);
392 atomic_inc(&cbh->b_count);
393 pbh = cbh;
395 if (cbh != nbh)
397 udf_release_data(cbh);
398 atomic_inc(&nbh->b_count);
399 cbh = nbh;
402 lbcount += elen;
404 pbloc = cbloc;
405 cbloc = nbloc;
407 pextoffset = cextoffset;
408 cextoffset = nextoffset;
410 if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) == -1)
411 break;
413 c = !c;
415 laarr[c].extLength = (etype << 30) | elen;
416 laarr[c].extLocation = eloc;
418 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
419 pgoal = eloc.logicalBlockNum +
420 ((elen + inode->i_sb->s_blocksize - 1) >>
421 inode->i_sb->s_blocksize_bits);
423 count ++;
424 } while (lbcount + elen <= b_off);
426 b_off -= lbcount;
427 offset = b_off >> inode->i_sb->s_blocksize_bits;
429 /* if the extent is allocated and recorded, return the block
430 if the extent is not a multiple of the blocksize, round up */
432 if (etype == (EXT_RECORDED_ALLOCATED >> 30))
434 if (elen & (inode->i_sb->s_blocksize - 1))
436 elen = EXT_RECORDED_ALLOCATED |
437 ((elen + inode->i_sb->s_blocksize - 1) &
438 ~(inode->i_sb->s_blocksize - 1));
439 etype = udf_write_aext(inode, nbloc, &cextoffset, eloc, elen, nbh, 1);
441 udf_release_data(pbh);
442 udf_release_data(cbh);
443 udf_release_data(nbh);
444 newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
445 *phys = newblock;
446 return NULL;
449 if (etype == -1)
451 endnum = startnum = ((count > 1) ? 1 : count);
452 if (laarr[c].extLength & (inode->i_sb->s_blocksize - 1))
454 laarr[c].extLength =
455 (laarr[c].extLength & UDF_EXTENT_FLAG_MASK) |
456 (((laarr[c].extLength & UDF_EXTENT_LENGTH_MASK) +
457 inode->i_sb->s_blocksize - 1) &
458 ~(inode->i_sb->s_blocksize - 1));
459 UDF_I_LENEXTENTS(inode) =
460 (UDF_I_LENEXTENTS(inode) + inode->i_sb->s_blocksize - 1) &
461 ~(inode->i_sb->s_blocksize - 1);
463 c = !c;
464 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
465 ((offset + 1) << inode->i_sb->s_blocksize_bits);
466 memset(&laarr[c].extLocation, 0x00, sizeof(kernel_lb_addr));
467 count ++;
468 endnum ++;
469 lastblock = 1;
471 else
472 endnum = startnum = ((count > 2) ? 2 : count);
474 /* if the current extent is in position 0, swap it with the previous */
475 if (!c && count != 1)
477 laarr[2] = laarr[0];
478 laarr[0] = laarr[1];
479 laarr[1] = laarr[2];
480 c = 1;
483 /* if the current block is located in a extent, read the next extent */
484 if (etype != -1)
486 if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 0)) != -1)
488 laarr[c+1].extLength = (etype << 30) | elen;
489 laarr[c+1].extLocation = eloc;
490 count ++;
491 startnum ++;
492 endnum ++;
494 else
495 lastblock = 1;
497 udf_release_data(cbh);
498 udf_release_data(nbh);
500 /* if the current extent is not recorded but allocated, get the
501 block in the extent corresponding to the requested block */
502 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
503 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
504 else /* otherwise, allocate a new block */
506 if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
507 goal = UDF_I_NEXT_ALLOC_GOAL(inode);
509 if (!goal)
511 if (!(goal = pgoal))
512 goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
515 if (!(newblocknum = udf_new_block(inode->i_sb, inode,
516 UDF_I_LOCATION(inode).partitionReferenceNum, goal, err)))
518 udf_release_data(pbh);
519 *err = -ENOSPC;
520 return NULL;
522 UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize;
525 /* if the extent the requsted block is located in contains multiple blocks,
526 split the extent into at most three extents. blocks prior to requested
527 block, requested block, and blocks after requested block */
528 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
530 #ifdef UDF_PREALLOCATE
531 /* preallocate blocks */
532 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
533 #endif
535 /* merge any continuous blocks in laarr */
536 udf_merge_extents(inode, laarr, &endnum);
538 /* write back the new extents, inserting new extents if the new number
539 of extents is greater than the old number, and deleting extents if
540 the new number of extents is less than the old number */
541 udf_update_extents(inode, laarr, startnum, endnum, pbloc, pextoffset, &pbh);
543 udf_release_data(pbh);
545 if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
546 UDF_I_LOCATION(inode).partitionReferenceNum, 0)))
548 return NULL;
550 *phys = newblock;
551 *err = 0;
552 *new = 1;
553 UDF_I_NEXT_ALLOC_BLOCK(inode) = block;
554 UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum;
555 inode->i_ctime = current_fs_time(inode->i_sb);
557 if (IS_SYNC(inode))
558 udf_sync_inode(inode);
559 else
560 mark_inode_dirty(inode);
561 return result;
564 static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum,
565 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
567 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
568 (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
570 int curr = *c;
571 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
572 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
573 int8_t etype = (laarr[curr].extLength >> 30);
575 if (blen == 1)
577 else if (!offset || blen == offset + 1)
579 laarr[curr+2] = laarr[curr+1];
580 laarr[curr+1] = laarr[curr];
582 else
584 laarr[curr+3] = laarr[curr+1];
585 laarr[curr+2] = laarr[curr+1] = laarr[curr];
588 if (offset)
590 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
592 udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset);
593 laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
594 (offset << inode->i_sb->s_blocksize_bits);
595 laarr[curr].extLocation.logicalBlockNum = 0;
596 laarr[curr].extLocation.partitionReferenceNum = 0;
598 else
599 laarr[curr].extLength = (etype << 30) |
600 (offset << inode->i_sb->s_blocksize_bits);
601 curr ++;
602 (*c) ++;
603 (*endnum) ++;
606 laarr[curr].extLocation.logicalBlockNum = newblocknum;
607 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
608 laarr[curr].extLocation.partitionReferenceNum =
609 UDF_I_LOCATION(inode).partitionReferenceNum;
610 laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
611 inode->i_sb->s_blocksize;
612 curr ++;
614 if (blen != offset + 1)
616 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
617 laarr[curr].extLocation.logicalBlockNum += (offset + 1);
618 laarr[curr].extLength = (etype << 30) |
619 ((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
620 curr ++;
621 (*endnum) ++;
626 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
627 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
629 int start, length = 0, currlength = 0, i;
631 if (*endnum >= (c+1))
633 if (!lastblock)
634 return;
635 else
636 start = c;
638 else
640 if ((laarr[c+1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
642 start = c+1;
643 length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) +
644 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
646 else
647 start = c;
650 for (i=start+1; i<=*endnum; i++)
652 if (i == *endnum)
654 if (lastblock)
655 length += UDF_DEFAULT_PREALLOC_BLOCKS;
657 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
658 length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
659 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
660 else
661 break;
664 if (length)
666 int next = laarr[start].extLocation.logicalBlockNum +
667 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
668 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
669 int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
670 laarr[start].extLocation.partitionReferenceNum,
671 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
672 UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
674 if (numalloc)
676 if (start == (c+1))
677 laarr[start].extLength +=
678 (numalloc << inode->i_sb->s_blocksize_bits);
679 else
681 memmove(&laarr[c+2], &laarr[c+1],
682 sizeof(long_ad) * (*endnum - (c+1)));
683 (*endnum) ++;
684 laarr[c+1].extLocation.logicalBlockNum = next;
685 laarr[c+1].extLocation.partitionReferenceNum =
686 laarr[c].extLocation.partitionReferenceNum;
687 laarr[c+1].extLength = EXT_NOT_RECORDED_ALLOCATED |
688 (numalloc << inode->i_sb->s_blocksize_bits);
689 start = c+1;
692 for (i=start+1; numalloc && i<*endnum; i++)
694 int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
695 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
697 if (elen > numalloc)
699 laarr[i].extLength -=
700 (numalloc << inode->i_sb->s_blocksize_bits);
701 numalloc = 0;
703 else
705 numalloc -= elen;
706 if (*endnum > (i+1))
707 memmove(&laarr[i], &laarr[i+1],
708 sizeof(long_ad) * (*endnum - (i+1)));
709 i --;
710 (*endnum) --;
713 UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits;
718 static void udf_merge_extents(struct inode *inode,
719 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
721 int i;
723 for (i=0; i<(*endnum-1); i++)
725 if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30))
727 if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
728 ((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
729 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
730 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits)))
732 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
733 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
734 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
736 laarr[i+1].extLength = (laarr[i+1].extLength -
737 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
738 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
739 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
740 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
741 laarr[i+1].extLocation.logicalBlockNum =
742 laarr[i].extLocation.logicalBlockNum +
743 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
744 inode->i_sb->s_blocksize_bits);
746 else
748 laarr[i].extLength = laarr[i+1].extLength +
749 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
750 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
751 if (*endnum > (i+2))
752 memmove(&laarr[i+1], &laarr[i+2],
753 sizeof(long_ad) * (*endnum - (i+2)));
754 i --;
755 (*endnum) --;
759 else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
760 ((laarr[i+1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)))
762 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
763 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
764 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
765 laarr[i].extLocation.logicalBlockNum = 0;
766 laarr[i].extLocation.partitionReferenceNum = 0;
768 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
769 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
770 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
772 laarr[i+1].extLength = (laarr[i+1].extLength -
773 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
774 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
775 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
776 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
778 else
780 laarr[i].extLength = laarr[i+1].extLength +
781 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
782 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
783 if (*endnum > (i+2))
784 memmove(&laarr[i+1], &laarr[i+2],
785 sizeof(long_ad) * (*endnum - (i+2)));
786 i --;
787 (*endnum) --;
790 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
792 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
793 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
794 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
795 laarr[i].extLocation.logicalBlockNum = 0;
796 laarr[i].extLocation.partitionReferenceNum = 0;
797 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) |
798 EXT_NOT_RECORDED_NOT_ALLOCATED;
803 static void udf_update_extents(struct inode *inode,
804 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum,
805 kernel_lb_addr pbloc, uint32_t pextoffset, struct buffer_head **pbh)
807 int start = 0, i;
808 kernel_lb_addr tmploc;
809 uint32_t tmplen;
811 if (startnum > endnum)
813 for (i=0; i<(startnum-endnum); i++)
815 udf_delete_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
816 laarr[i].extLength, *pbh);
819 else if (startnum < endnum)
821 for (i=0; i<(endnum-startnum); i++)
823 udf_insert_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
824 laarr[i].extLength, *pbh);
825 udf_next_aext(inode, &pbloc, &pextoffset, &laarr[i].extLocation,
826 &laarr[i].extLength, pbh, 1);
827 start ++;
831 for (i=start; i<endnum; i++)
833 udf_next_aext(inode, &pbloc, &pextoffset, &tmploc, &tmplen, pbh, 0);
834 udf_write_aext(inode, pbloc, &pextoffset, laarr[i].extLocation,
835 laarr[i].extLength, *pbh, 1);
839 struct buffer_head * udf_bread(struct inode * inode, int block,
840 int create, int * err)
842 struct buffer_head * bh = NULL;
844 bh = udf_getblk(inode, block, create, err);
845 if (!bh)
846 return NULL;
848 if (buffer_uptodate(bh))
849 return bh;
850 ll_rw_block(READ, 1, &bh);
851 wait_on_buffer(bh);
852 if (buffer_uptodate(bh))
853 return bh;
854 brelse(bh);
855 *err = -EIO;
856 return NULL;
859 void udf_truncate(struct inode * inode)
861 int offset;
862 int err;
864 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
865 S_ISLNK(inode->i_mode)))
866 return;
867 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
868 return;
870 lock_kernel();
871 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
873 if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
874 inode->i_size))
876 udf_expand_file_adinicb(inode, inode->i_size, &err);
877 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
879 inode->i_size = UDF_I_LENALLOC(inode);
880 unlock_kernel();
881 return;
883 else
884 udf_truncate_extents(inode);
886 else
888 offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
889 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode));
890 UDF_I_LENALLOC(inode) = inode->i_size;
893 else
895 block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block);
896 udf_truncate_extents(inode);
899 inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
900 if (IS_SYNC(inode))
901 udf_sync_inode (inode);
902 else
903 mark_inode_dirty(inode);
904 unlock_kernel();
907 static void
908 __udf_read_inode(struct inode *inode)
910 struct buffer_head *bh = NULL;
911 struct fileEntry *fe;
912 uint16_t ident;
915 * Set defaults, but the inode is still incomplete!
916 * Note: get_new_inode() sets the following on a new inode:
917 * i_sb = sb
918 * i_no = ino
919 * i_flags = sb->s_flags
920 * i_state = 0
921 * clean_inode(): zero fills and sets
922 * i_count = 1
923 * i_nlink = 1
924 * i_op = NULL;
926 inode->i_blksize = PAGE_SIZE;
928 bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
930 if (!bh)
932 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
933 inode->i_ino);
934 make_bad_inode(inode);
935 return;
938 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
939 ident != TAG_IDENT_USE)
941 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
942 inode->i_ino, ident);
943 udf_release_data(bh);
944 make_bad_inode(inode);
945 return;
948 fe = (struct fileEntry *)bh->b_data;
950 if (le16_to_cpu(fe->icbTag.strategyType) == 4096)
952 struct buffer_head *ibh = NULL, *nbh = NULL;
953 struct indirectEntry *ie;
955 ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
956 if (ident == TAG_IDENT_IE)
958 if (ibh)
960 kernel_lb_addr loc;
961 ie = (struct indirectEntry *)ibh->b_data;
963 loc = lelb_to_cpu(ie->indirectICB.extLocation);
965 if (ie->indirectICB.extLength &&
966 (nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident)))
968 if (ident == TAG_IDENT_FE ||
969 ident == TAG_IDENT_EFE)
971 memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(kernel_lb_addr));
972 udf_release_data(bh);
973 udf_release_data(ibh);
974 udf_release_data(nbh);
975 __udf_read_inode(inode);
976 return;
978 else
980 udf_release_data(nbh);
981 udf_release_data(ibh);
984 else
985 udf_release_data(ibh);
988 else
989 udf_release_data(ibh);
991 else if (le16_to_cpu(fe->icbTag.strategyType) != 4)
993 printk(KERN_ERR "udf: unsupported strategy type: %d\n",
994 le16_to_cpu(fe->icbTag.strategyType));
995 udf_release_data(bh);
996 make_bad_inode(inode);
997 return;
999 udf_fill_inode(inode, bh);
1000 udf_release_data(bh);
1003 static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1005 struct fileEntry *fe;
1006 struct extendedFileEntry *efe;
1007 time_t convtime;
1008 long convtime_usec;
1009 int offset;
1011 fe = (struct fileEntry *)bh->b_data;
1012 efe = (struct extendedFileEntry *)bh->b_data;
1014 if (le16_to_cpu(fe->icbTag.strategyType) == 4)
1015 UDF_I_STRAT4096(inode) = 0;
1016 else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
1017 UDF_I_STRAT4096(inode) = 1;
1019 UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
1020 UDF_I_UNIQUE(inode) = 0;
1021 UDF_I_LENEATTR(inode) = 0;
1022 UDF_I_LENEXTENTS(inode) = 0;
1023 UDF_I_LENALLOC(inode) = 0;
1024 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
1025 UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
1026 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE)
1028 UDF_I_EFE(inode) = 1;
1029 UDF_I_USE(inode) = 0;
1030 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
1031 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1033 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE)
1035 UDF_I_EFE(inode) = 0;
1036 UDF_I_USE(inode) = 0;
1037 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
1038 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1040 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1042 UDF_I_EFE(inode) = 0;
1043 UDF_I_USE(inode) = 1;
1044 UDF_I_LENALLOC(inode) =
1045 le32_to_cpu(
1046 ((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
1047 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry), GFP_KERNEL);
1048 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1049 return;
1052 inode->i_uid = le32_to_cpu(fe->uid);
1053 if ( inode->i_uid == -1 ) inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
1055 inode->i_gid = le32_to_cpu(fe->gid);
1056 if ( inode->i_gid == -1 ) inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
1058 inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
1059 if (!inode->i_nlink)
1060 inode->i_nlink = 1;
1062 inode->i_size = le64_to_cpu(fe->informationLength);
1063 UDF_I_LENEXTENTS(inode) = inode->i_size;
1065 inode->i_mode = udf_convert_permissions(fe);
1066 inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
1068 if (UDF_I_EFE(inode) == 0)
1070 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1071 (inode->i_sb->s_blocksize_bits - 9);
1073 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1074 lets_to_cpu(fe->accessTime)) )
1076 inode->i_atime.tv_sec = convtime;
1077 inode->i_atime.tv_nsec = convtime_usec * 1000;
1079 else
1081 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1084 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1085 lets_to_cpu(fe->modificationTime)) )
1087 inode->i_mtime.tv_sec = convtime;
1088 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1090 else
1092 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1095 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1096 lets_to_cpu(fe->attrTime)) )
1098 inode->i_ctime.tv_sec = convtime;
1099 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1101 else
1103 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1106 UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
1107 UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
1108 UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
1109 offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
1111 else
1113 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1114 (inode->i_sb->s_blocksize_bits - 9);
1116 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1117 lets_to_cpu(efe->accessTime)) )
1119 inode->i_atime.tv_sec = convtime;
1120 inode->i_atime.tv_nsec = convtime_usec * 1000;
1122 else
1124 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1127 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1128 lets_to_cpu(efe->modificationTime)) )
1130 inode->i_mtime.tv_sec = convtime;
1131 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1133 else
1135 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1138 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1139 lets_to_cpu(efe->createTime)) )
1141 UDF_I_CRTIME(inode).tv_sec = convtime;
1142 UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000;
1144 else
1146 UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb);
1149 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1150 lets_to_cpu(efe->attrTime)) )
1152 inode->i_ctime.tv_sec = convtime;
1153 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1155 else
1157 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1160 UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
1161 UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
1162 UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
1163 offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
1166 switch (fe->icbTag.fileType)
1168 case ICBTAG_FILE_TYPE_DIRECTORY:
1170 inode->i_op = &udf_dir_inode_operations;
1171 inode->i_fop = &udf_dir_operations;
1172 inode->i_mode |= S_IFDIR;
1173 inode->i_nlink ++;
1174 break;
1176 case ICBTAG_FILE_TYPE_REALTIME:
1177 case ICBTAG_FILE_TYPE_REGULAR:
1178 case ICBTAG_FILE_TYPE_UNDEF:
1180 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
1181 inode->i_data.a_ops = &udf_adinicb_aops;
1182 else
1183 inode->i_data.a_ops = &udf_aops;
1184 inode->i_op = &udf_file_inode_operations;
1185 inode->i_fop = &udf_file_operations;
1186 inode->i_mode |= S_IFREG;
1187 break;
1189 case ICBTAG_FILE_TYPE_BLOCK:
1191 inode->i_mode |= S_IFBLK;
1192 break;
1194 case ICBTAG_FILE_TYPE_CHAR:
1196 inode->i_mode |= S_IFCHR;
1197 break;
1199 case ICBTAG_FILE_TYPE_FIFO:
1201 init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1202 break;
1204 case ICBTAG_FILE_TYPE_SOCKET:
1206 init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1207 break;
1209 case ICBTAG_FILE_TYPE_SYMLINK:
1211 inode->i_data.a_ops = &udf_symlink_aops;
1212 inode->i_op = &page_symlink_inode_operations;
1213 inode->i_mode = S_IFLNK|S_IRWXUGO;
1214 break;
1216 default:
1218 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
1219 inode->i_ino, fe->icbTag.fileType);
1220 make_bad_inode(inode);
1221 return;
1224 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1226 struct deviceSpec *dsea =
1227 (struct deviceSpec *)
1228 udf_get_extendedattr(inode, 12, 1);
1230 if (dsea)
1232 init_special_inode(inode, inode->i_mode, MKDEV(
1233 le32_to_cpu(dsea->majorDeviceIdent),
1234 le32_to_cpu(dsea->minorDeviceIdent)));
1235 /* Developer ID ??? */
1237 else
1239 make_bad_inode(inode);
1244 static mode_t
1245 udf_convert_permissions(struct fileEntry *fe)
1247 mode_t mode;
1248 uint32_t permissions;
1249 uint32_t flags;
1251 permissions = le32_to_cpu(fe->permissions);
1252 flags = le16_to_cpu(fe->icbTag.flags);
1254 mode = (( permissions ) & S_IRWXO) |
1255 (( permissions >> 2 ) & S_IRWXG) |
1256 (( permissions >> 4 ) & S_IRWXU) |
1257 (( flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1258 (( flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1259 (( flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1261 return mode;
1265 * udf_write_inode
1267 * PURPOSE
1268 * Write out the specified inode.
1270 * DESCRIPTION
1271 * This routine is called whenever an inode is synced.
1272 * Currently this routine is just a placeholder.
1274 * HISTORY
1275 * July 1, 1997 - Andrew E. Mileski
1276 * Written, tested, and released.
1279 int udf_write_inode(struct inode * inode, int sync)
1281 int ret;
1282 lock_kernel();
1283 ret = udf_update_inode(inode, sync);
1284 unlock_kernel();
1285 return ret;
1288 int udf_sync_inode(struct inode * inode)
1290 return udf_update_inode(inode, 1);
1293 static int
1294 udf_update_inode(struct inode *inode, int do_sync)
1296 struct buffer_head *bh = NULL;
1297 struct fileEntry *fe;
1298 struct extendedFileEntry *efe;
1299 uint32_t udfperms;
1300 uint16_t icbflags;
1301 uint16_t crclen;
1302 int i;
1303 kernel_timestamp cpu_time;
1304 int err = 0;
1306 bh = udf_tread(inode->i_sb,
1307 udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
1309 if (!bh)
1311 udf_debug("bread failure\n");
1312 return -EIO;
1315 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
1317 fe = (struct fileEntry *)bh->b_data;
1318 efe = (struct extendedFileEntry *)bh->b_data;
1320 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1322 struct unallocSpaceEntry *use =
1323 (struct unallocSpaceEntry *)bh->b_data;
1325 use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1326 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1327 crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
1328 sizeof(tag);
1329 use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1330 use->descTag.descCRCLength = cpu_to_le16(crclen);
1331 use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
1333 use->descTag.tagChecksum = 0;
1334 for (i=0; i<16; i++)
1335 if (i != 4)
1336 use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i];
1338 mark_buffer_dirty(bh);
1339 udf_release_data(bh);
1340 return err;
1343 if (inode->i_uid != UDF_SB(inode->i_sb)->s_uid)
1344 fe->uid = cpu_to_le32(inode->i_uid);
1346 if (inode->i_gid != UDF_SB(inode->i_sb)->s_gid)
1347 fe->gid = cpu_to_le32(inode->i_gid);
1349 udfperms = ((inode->i_mode & S_IRWXO) ) |
1350 ((inode->i_mode & S_IRWXG) << 2) |
1351 ((inode->i_mode & S_IRWXU) << 4);
1353 udfperms |= (le32_to_cpu(fe->permissions) &
1354 (FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1355 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1356 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1357 fe->permissions = cpu_to_le32(udfperms);
1359 if (S_ISDIR(inode->i_mode))
1360 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1361 else
1362 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1364 fe->informationLength = cpu_to_le64(inode->i_size);
1366 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1368 regid *eid;
1369 struct deviceSpec *dsea =
1370 (struct deviceSpec *)
1371 udf_get_extendedattr(inode, 12, 1);
1373 if (!dsea)
1375 dsea = (struct deviceSpec *)
1376 udf_add_extendedattr(inode,
1377 sizeof(struct deviceSpec) +
1378 sizeof(regid), 12, 0x3);
1379 dsea->attrType = cpu_to_le32(12);
1380 dsea->attrSubtype = 1;
1381 dsea->attrLength = cpu_to_le32(sizeof(struct deviceSpec) +
1382 sizeof(regid));
1383 dsea->impUseLength = cpu_to_le32(sizeof(regid));
1385 eid = (regid *)dsea->impUse;
1386 memset(eid, 0, sizeof(regid));
1387 strcpy(eid->ident, UDF_ID_DEVELOPER);
1388 eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1389 eid->identSuffix[1] = UDF_OS_ID_LINUX;
1390 dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1391 dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1394 if (UDF_I_EFE(inode) == 0)
1396 memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1397 fe->logicalBlocksRecorded = cpu_to_le64(
1398 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1399 (inode->i_sb->s_blocksize_bits - 9));
1401 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1402 fe->accessTime = cpu_to_lets(cpu_time);
1403 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1404 fe->modificationTime = cpu_to_lets(cpu_time);
1405 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1406 fe->attrTime = cpu_to_lets(cpu_time);
1407 memset(&(fe->impIdent), 0, sizeof(regid));
1408 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1409 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1410 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1411 fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1412 fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1413 fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1414 fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1415 crclen = sizeof(struct fileEntry);
1417 else
1419 memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1420 efe->objectSize = cpu_to_le64(inode->i_size);
1421 efe->logicalBlocksRecorded = cpu_to_le64(
1422 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1423 (inode->i_sb->s_blocksize_bits - 9));
1425 if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec ||
1426 (UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec &&
1427 UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec))
1429 UDF_I_CRTIME(inode) = inode->i_atime;
1431 if (UDF_I_CRTIME(inode).tv_sec > inode->i_mtime.tv_sec ||
1432 (UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec &&
1433 UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec))
1435 UDF_I_CRTIME(inode) = inode->i_mtime;
1437 if (UDF_I_CRTIME(inode).tv_sec > inode->i_ctime.tv_sec ||
1438 (UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec &&
1439 UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec))
1441 UDF_I_CRTIME(inode) = inode->i_ctime;
1444 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1445 efe->accessTime = cpu_to_lets(cpu_time);
1446 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1447 efe->modificationTime = cpu_to_lets(cpu_time);
1448 if (udf_time_to_stamp(&cpu_time, UDF_I_CRTIME(inode)))
1449 efe->createTime = cpu_to_lets(cpu_time);
1450 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1451 efe->attrTime = cpu_to_lets(cpu_time);
1453 memset(&(efe->impIdent), 0, sizeof(regid));
1454 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1455 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1456 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1457 efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1458 efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1459 efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1460 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1461 crclen = sizeof(struct extendedFileEntry);
1463 if (UDF_I_STRAT4096(inode))
1465 fe->icbTag.strategyType = cpu_to_le16(4096);
1466 fe->icbTag.strategyParameter = cpu_to_le16(1);
1467 fe->icbTag.numEntries = cpu_to_le16(2);
1469 else
1471 fe->icbTag.strategyType = cpu_to_le16(4);
1472 fe->icbTag.numEntries = cpu_to_le16(1);
1475 if (S_ISDIR(inode->i_mode))
1476 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1477 else if (S_ISREG(inode->i_mode))
1478 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1479 else if (S_ISLNK(inode->i_mode))
1480 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1481 else if (S_ISBLK(inode->i_mode))
1482 fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1483 else if (S_ISCHR(inode->i_mode))
1484 fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1485 else if (S_ISFIFO(inode->i_mode))
1486 fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1487 else if (S_ISSOCK(inode->i_mode))
1488 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1490 icbflags = UDF_I_ALLOCTYPE(inode) |
1491 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1492 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1493 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1494 (le16_to_cpu(fe->icbTag.flags) &
1495 ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1496 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1498 fe->icbTag.flags = cpu_to_le16(icbflags);
1499 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1500 fe->descTag.descVersion = cpu_to_le16(3);
1501 else
1502 fe->descTag.descVersion = cpu_to_le16(2);
1503 fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
1504 fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1505 crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
1506 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1507 fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
1509 fe->descTag.tagChecksum = 0;
1510 for (i=0; i<16; i++)
1511 if (i != 4)
1512 fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i];
1514 /* write the data blocks */
1515 mark_buffer_dirty(bh);
1516 if (do_sync)
1518 sync_dirty_buffer(bh);
1519 if (buffer_req(bh) && !buffer_uptodate(bh))
1521 printk("IO error syncing udf inode [%s:%08lx]\n",
1522 inode->i_sb->s_id, inode->i_ino);
1523 err = -EIO;
1526 udf_release_data(bh);
1527 return err;
1530 struct inode *
1531 udf_iget(struct super_block *sb, kernel_lb_addr ino)
1533 unsigned long block = udf_get_lb_pblock(sb, ino, 0);
1534 struct inode *inode = iget_locked(sb, block);
1536 if (!inode)
1537 return NULL;
1539 if (inode->i_state & I_NEW) {
1540 memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(kernel_lb_addr));
1541 __udf_read_inode(inode);
1542 unlock_new_inode(inode);
1545 if (is_bad_inode(inode))
1546 goto out_iput;
1548 if (ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) {
1549 udf_debug("block=%d, partition=%d out of range\n",
1550 ino.logicalBlockNum, ino.partitionReferenceNum);
1551 make_bad_inode(inode);
1552 goto out_iput;
1555 return inode;
1557 out_iput:
1558 iput(inode);
1559 return NULL;
1562 int8_t udf_add_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1563 kernel_lb_addr eloc, uint32_t elen, struct buffer_head **bh, int inc)
1565 int adsize;
1566 short_ad *sad = NULL;
1567 long_ad *lad = NULL;
1568 struct allocExtDesc *aed;
1569 int8_t etype;
1570 uint8_t *ptr;
1572 if (!*bh)
1573 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1574 else
1575 ptr = (*bh)->b_data + *extoffset;
1577 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1578 adsize = sizeof(short_ad);
1579 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1580 adsize = sizeof(long_ad);
1581 else
1582 return -1;
1584 if (*extoffset + (2 * adsize) > inode->i_sb->s_blocksize)
1586 char *sptr, *dptr;
1587 struct buffer_head *nbh;
1588 int err, loffset;
1589 kernel_lb_addr obloc = *bloc;
1591 if (!(bloc->logicalBlockNum = udf_new_block(inode->i_sb, NULL,
1592 obloc.partitionReferenceNum, obloc.logicalBlockNum, &err)))
1594 return -1;
1596 if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1597 *bloc, 0))))
1599 return -1;
1601 lock_buffer(nbh);
1602 memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
1603 set_buffer_uptodate(nbh);
1604 unlock_buffer(nbh);
1605 mark_buffer_dirty_inode(nbh, inode);
1607 aed = (struct allocExtDesc *)(nbh->b_data);
1608 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
1609 aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
1610 if (*extoffset + adsize > inode->i_sb->s_blocksize)
1612 loffset = *extoffset;
1613 aed->lengthAllocDescs = cpu_to_le32(adsize);
1614 sptr = ptr - adsize;
1615 dptr = nbh->b_data + sizeof(struct allocExtDesc);
1616 memcpy(dptr, sptr, adsize);
1617 *extoffset = sizeof(struct allocExtDesc) + adsize;
1619 else
1621 loffset = *extoffset + adsize;
1622 aed->lengthAllocDescs = cpu_to_le32(0);
1623 sptr = ptr;
1624 *extoffset = sizeof(struct allocExtDesc);
1626 if (*bh)
1628 aed = (struct allocExtDesc *)(*bh)->b_data;
1629 aed->lengthAllocDescs =
1630 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1632 else
1634 UDF_I_LENALLOC(inode) += adsize;
1635 mark_inode_dirty(inode);
1638 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1639 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
1640 bloc->logicalBlockNum, sizeof(tag));
1641 else
1642 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
1643 bloc->logicalBlockNum, sizeof(tag));
1644 switch (UDF_I_ALLOCTYPE(inode))
1646 case ICBTAG_FLAG_AD_SHORT:
1648 sad = (short_ad *)sptr;
1649 sad->extLength = cpu_to_le32(
1650 EXT_NEXT_EXTENT_ALLOCDECS |
1651 inode->i_sb->s_blocksize);
1652 sad->extPosition = cpu_to_le32(bloc->logicalBlockNum);
1653 break;
1655 case ICBTAG_FLAG_AD_LONG:
1657 lad = (long_ad *)sptr;
1658 lad->extLength = cpu_to_le32(
1659 EXT_NEXT_EXTENT_ALLOCDECS |
1660 inode->i_sb->s_blocksize);
1661 lad->extLocation = cpu_to_lelb(*bloc);
1662 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1663 break;
1666 if (*bh)
1668 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1669 udf_update_tag((*bh)->b_data, loffset);
1670 else
1671 udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1672 mark_buffer_dirty_inode(*bh, inode);
1673 udf_release_data(*bh);
1675 else
1676 mark_inode_dirty(inode);
1677 *bh = nbh;
1680 etype = udf_write_aext(inode, *bloc, extoffset, eloc, elen, *bh, inc);
1682 if (!*bh)
1684 UDF_I_LENALLOC(inode) += adsize;
1685 mark_inode_dirty(inode);
1687 else
1689 aed = (struct allocExtDesc *)(*bh)->b_data;
1690 aed->lengthAllocDescs =
1691 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1692 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1693 udf_update_tag((*bh)->b_data, *extoffset + (inc ? 0 : adsize));
1694 else
1695 udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1696 mark_buffer_dirty_inode(*bh, inode);
1699 return etype;
1702 int8_t udf_write_aext(struct inode *inode, kernel_lb_addr bloc, int *extoffset,
1703 kernel_lb_addr eloc, uint32_t elen, struct buffer_head *bh, int inc)
1705 int adsize;
1706 uint8_t *ptr;
1708 if (!bh)
1709 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1710 else
1712 ptr = bh->b_data + *extoffset;
1713 atomic_inc(&bh->b_count);
1716 switch (UDF_I_ALLOCTYPE(inode))
1718 case ICBTAG_FLAG_AD_SHORT:
1720 short_ad *sad = (short_ad *)ptr;
1721 sad->extLength = cpu_to_le32(elen);
1722 sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
1723 adsize = sizeof(short_ad);
1724 break;
1726 case ICBTAG_FLAG_AD_LONG:
1728 long_ad *lad = (long_ad *)ptr;
1729 lad->extLength = cpu_to_le32(elen);
1730 lad->extLocation = cpu_to_lelb(eloc);
1731 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1732 adsize = sizeof(long_ad);
1733 break;
1735 default:
1736 return -1;
1739 if (bh)
1741 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1743 struct allocExtDesc *aed = (struct allocExtDesc *)(bh)->b_data;
1744 udf_update_tag((bh)->b_data,
1745 le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
1747 mark_buffer_dirty_inode(bh, inode);
1748 udf_release_data(bh);
1750 else
1751 mark_inode_dirty(inode);
1753 if (inc)
1754 *extoffset += adsize;
1755 return (elen >> 30);
1758 int8_t udf_next_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1759 kernel_lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1761 int8_t etype;
1763 while ((etype = udf_current_aext(inode, bloc, extoffset, eloc, elen, bh, inc)) ==
1764 (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
1766 *bloc = *eloc;
1767 *extoffset = sizeof(struct allocExtDesc);
1768 udf_release_data(*bh);
1769 if (!(*bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
1771 udf_debug("reading block %d failed!\n",
1772 udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1773 return -1;
1777 return etype;
1780 int8_t udf_current_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1781 kernel_lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1783 int alen;
1784 int8_t etype;
1785 uint8_t *ptr;
1787 if (!*bh)
1789 if (!(*extoffset))
1790 *extoffset = udf_file_entry_alloc_offset(inode);
1791 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1792 alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
1794 else
1796 if (!(*extoffset))
1797 *extoffset = sizeof(struct allocExtDesc);
1798 ptr = (*bh)->b_data + *extoffset;
1799 alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)(*bh)->b_data)->lengthAllocDescs);
1802 switch (UDF_I_ALLOCTYPE(inode))
1804 case ICBTAG_FLAG_AD_SHORT:
1806 short_ad *sad;
1808 if (!(sad = udf_get_fileshortad(ptr, alen, extoffset, inc)))
1809 return -1;
1811 etype = le32_to_cpu(sad->extLength) >> 30;
1812 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1813 eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1814 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1815 break;
1817 case ICBTAG_FLAG_AD_LONG:
1819 long_ad *lad;
1821 if (!(lad = udf_get_filelongad(ptr, alen, extoffset, inc)))
1822 return -1;
1824 etype = le32_to_cpu(lad->extLength) >> 30;
1825 *eloc = lelb_to_cpu(lad->extLocation);
1826 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1827 break;
1829 default:
1831 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1832 return -1;
1836 return etype;
1839 static int8_t
1840 udf_insert_aext(struct inode *inode, kernel_lb_addr bloc, int extoffset,
1841 kernel_lb_addr neloc, uint32_t nelen, struct buffer_head *bh)
1843 kernel_lb_addr oeloc;
1844 uint32_t oelen;
1845 int8_t etype;
1847 if (bh)
1848 atomic_inc(&bh->b_count);
1850 while ((etype = udf_next_aext(inode, &bloc, &extoffset, &oeloc, &oelen, &bh, 0)) != -1)
1852 udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1);
1854 neloc = oeloc;
1855 nelen = (etype << 30) | oelen;
1857 udf_add_aext(inode, &bloc, &extoffset, neloc, nelen, &bh, 1);
1858 udf_release_data(bh);
1859 return (nelen >> 30);
1862 int8_t udf_delete_aext(struct inode *inode, kernel_lb_addr nbloc, int nextoffset,
1863 kernel_lb_addr eloc, uint32_t elen, struct buffer_head *nbh)
1865 struct buffer_head *obh;
1866 kernel_lb_addr obloc;
1867 int oextoffset, adsize;
1868 int8_t etype;
1869 struct allocExtDesc *aed;
1871 if (nbh)
1873 atomic_inc(&nbh->b_count);
1874 atomic_inc(&nbh->b_count);
1877 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1878 adsize = sizeof(short_ad);
1879 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1880 adsize = sizeof(long_ad);
1881 else
1882 adsize = 0;
1884 obh = nbh;
1885 obloc = nbloc;
1886 oextoffset = nextoffset;
1888 if (udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1) == -1)
1889 return -1;
1891 while ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) != -1)
1893 udf_write_aext(inode, obloc, &oextoffset, eloc, (etype << 30) | elen, obh, 1);
1894 if (obh != nbh)
1896 obloc = nbloc;
1897 udf_release_data(obh);
1898 atomic_inc(&nbh->b_count);
1899 obh = nbh;
1900 oextoffset = nextoffset - adsize;
1903 memset(&eloc, 0x00, sizeof(kernel_lb_addr));
1904 elen = 0;
1906 if (nbh != obh)
1908 udf_free_blocks(inode->i_sb, inode, nbloc, 0, 1);
1909 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1910 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1911 if (!obh)
1913 UDF_I_LENALLOC(inode) -= (adsize * 2);
1914 mark_inode_dirty(inode);
1916 else
1918 aed = (struct allocExtDesc *)(obh)->b_data;
1919 aed->lengthAllocDescs =
1920 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize));
1921 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1922 udf_update_tag((obh)->b_data, oextoffset - (2*adsize));
1923 else
1924 udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
1925 mark_buffer_dirty_inode(obh, inode);
1928 else
1930 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1931 if (!obh)
1933 UDF_I_LENALLOC(inode) -= adsize;
1934 mark_inode_dirty(inode);
1936 else
1938 aed = (struct allocExtDesc *)(obh)->b_data;
1939 aed->lengthAllocDescs =
1940 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
1941 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1942 udf_update_tag((obh)->b_data, oextoffset - adsize);
1943 else
1944 udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
1945 mark_buffer_dirty_inode(obh, inode);
1949 udf_release_data(nbh);
1950 udf_release_data(obh);
1951 return (elen >> 30);
1954 int8_t inode_bmap(struct inode *inode, int block, kernel_lb_addr *bloc, uint32_t *extoffset,
1955 kernel_lb_addr *eloc, uint32_t *elen, uint32_t *offset, struct buffer_head **bh)
1957 uint64_t lbcount = 0, bcount = (uint64_t)block << inode->i_sb->s_blocksize_bits;
1958 int8_t etype;
1960 if (block < 0)
1962 printk(KERN_ERR "udf: inode_bmap: block < 0\n");
1963 return -1;
1965 if (!inode)
1967 printk(KERN_ERR "udf: inode_bmap: NULL inode\n");
1968 return -1;
1971 *extoffset = 0;
1972 *elen = 0;
1973 *bloc = UDF_I_LOCATION(inode);
1977 if ((etype = udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, 1)) == -1)
1979 *offset = bcount - lbcount;
1980 UDF_I_LENEXTENTS(inode) = lbcount;
1981 return -1;
1983 lbcount += *elen;
1984 } while (lbcount <= bcount);
1986 *offset = bcount + *elen - lbcount;
1988 return etype;
1991 long udf_block_map(struct inode *inode, long block)
1993 kernel_lb_addr eloc, bloc;
1994 uint32_t offset, extoffset, elen;
1995 struct buffer_head *bh = NULL;
1996 int ret;
1998 lock_kernel();
2000 if (inode_bmap(inode, block, &bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
2001 ret = udf_get_lb_pblock(inode->i_sb, eloc, offset >> inode->i_sb->s_blocksize_bits);
2002 else
2003 ret = 0;
2005 unlock_kernel();
2006 udf_release_data(bh);
2008 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2009 return udf_fixed_to_variable(ret);
2010 else
2011 return ret;