5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
8 * E-mail regarding any portion of the Linux UDF file system should be
9 * directed to the development team mailing list (run by majordomo):
10 * linux_udf@hootie.lvld.hp.com
13 * This file is distributed under the terms of the GNU General Public
14 * License (GPL). Copies of the GPL can be obtained from:
15 * ftp://prep.ai.mit.edu/pub/gnu/GPL
16 * Each contributing author retains all rights to their own work.
18 * (C) 1998 Dave Boynton
19 * (C) 1998-2000 Ben Fennema
20 * (C) 1999-2000 Stelias Computing Inc
24 * 10/04/98 dgb Added rudimentary directory functions
25 * 10/07/98 Fully working udf_block_map! It works!
26 * 11/25/98 bmap altered to better support extents
27 * 12/06/98 blf partition support in udf_iget, udf_block_map and udf_read_inode
28 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
29 * block boundaries (which is not actually allowed)
30 * 12/20/98 added support for strategy 4096
31 * 03/07/99 rewrote udf_block_map (again)
32 * New funcs, inode_bmap, udf_next_aext
33 * 04/19/99 Support for writing device EA's for major/minor #
37 #include <linux/locks.h>
39 #include <linux/smp_lock.h>
44 #define EXTENT_MERGE_SIZE 5
46 static mode_t
udf_convert_permissions(struct FileEntry
*);
47 static int udf_update_inode(struct inode
*, int);
48 static void udf_fill_inode(struct inode
*, struct buffer_head
*);
49 static struct buffer_head
*inode_getblk(struct inode
*, long, int *, long *, int *);
50 static void udf_split_extents(struct inode
*, int *, int, int,
51 long_ad
[EXTENT_MERGE_SIZE
], int *);
52 static void udf_prealloc_extents(struct inode
*, int, int,
53 long_ad
[EXTENT_MERGE_SIZE
], int *);
54 static void udf_merge_extents(struct inode
*,
55 long_ad
[EXTENT_MERGE_SIZE
], int *);
56 static void udf_update_extents(struct inode
*,
57 long_ad
[EXTENT_MERGE_SIZE
], int, int,
58 lb_addr
, Uint32
, struct buffer_head
**);
59 static int udf_get_block(struct inode
*, long, struct buffer_head
*, int);
67 * This routine is called whenever the kernel no longer needs the inode.
70 * July 1, 1997 - Andrew E. Mileski
71 * Written, tested, and released.
73 * Called at each iput()
75 void udf_put_inode(struct inode
* inode
)
77 if (!(inode
->i_sb
->s_flags
& MS_RDONLY
))
80 udf_discard_prealloc(inode
);
81 /* write the root inode on put, if dirty */
82 if (!inode
->i_sb
->s_root
&& inode
->i_state
& I_DIRTY
)
83 udf_update_inode(inode
, IS_SYNC(inode
));
92 * Clean-up before the specified inode is destroyed.
95 * This routine is called when the kernel destroys an inode structure
96 * ie. when iput() finds i_count == 0.
99 * July 1, 1997 - Andrew E. Mileski
100 * Written, tested, and released.
102 * Called at the last iput() if i_nlink is zero.
104 void udf_delete_inode(struct inode
* inode
)
108 if (is_bad_inode(inode
))
116 udf_update_inode(inode
, IS_SYNC(inode
));
117 udf_free_inode(inode
);
122 void udf_discard_prealloc(struct inode
* inode
)
124 if (inode
->i_size
&& UDF_I_ALLOCTYPE(inode
) != ICB_FLAG_AD_IN_ICB
)
128 static int udf_writepage(struct file
*file
, struct page
*page
)
130 return block_write_full_page(page
, udf_get_block
);
133 static int udf_readpage(struct file
*file
, struct page
*page
)
135 return block_read_full_page(page
, udf_get_block
);
138 static int udf_prepare_write(struct file
*file
, struct page
*page
, unsigned from
, unsigned to
)
140 return block_prepare_write(page
, from
, to
, udf_get_block
);
143 static int udf_bmap(struct address_space
*mapping
, long block
)
145 return generic_block_bmap(mapping
,block
,udf_get_block
);
148 struct address_space_operations udf_aops
= {
149 readpage
: udf_readpage
,
150 writepage
: udf_writepage
,
151 sync_page
: block_sync_page
,
152 prepare_write
: udf_prepare_write
,
153 commit_write
: generic_commit_write
,
157 void udf_expand_file_adinicb(struct inode
* inode
, int newsize
, int * err
)
159 struct buffer_head
*bh
= NULL
;
161 unsigned long kaddr
= 0;
164 /* from now on we have normal address_space methods */
165 inode
->i_data
.a_ops
= &udf_aops
;
167 if (!UDF_I_LENALLOC(inode
))
169 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_USE_SHORT_AD
))
170 UDF_I_ALLOCTYPE(inode
) = ICB_FLAG_AD_SHORT
;
172 UDF_I_ALLOCTYPE(inode
) = ICB_FLAG_AD_LONG
;
173 mark_inode_dirty(inode
);
177 block
= udf_get_lb_pblock(inode
->i_sb
, UDF_I_LOCATION(inode
), 0);
178 bh
= udf_tread(inode
->i_sb
, block
, inode
->i_sb
->s_blocksize
);
181 page
= grab_cache_page(inode
->i_mapping
, 0);
182 if (!PageLocked(page
))
184 if (!Page_Uptodate(page
))
187 memset((char *)kaddr
+ UDF_I_LENALLOC(inode
), 0x00,
188 PAGE_CACHE_SIZE
- UDF_I_LENALLOC(inode
));
189 memcpy((char *)kaddr
, bh
->b_data
+ udf_file_entry_alloc_offset(inode
),
190 UDF_I_LENALLOC(inode
));
191 flush_dcache_page(page
);
192 SetPageUptodate(page
);
195 memset(bh
->b_data
+ udf_file_entry_alloc_offset(inode
),
196 0, UDF_I_LENALLOC(inode
));
197 UDF_I_LENALLOC(inode
) = 0;
198 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_USE_SHORT_AD
))
199 UDF_I_ALLOCTYPE(inode
) = ICB_FLAG_AD_SHORT
;
201 UDF_I_ALLOCTYPE(inode
) = ICB_FLAG_AD_LONG
;
202 inode
->i_blocks
= inode
->i_sb
->s_blocksize
/ 512;
203 mark_buffer_dirty(bh
);
204 udf_release_data(bh
);
206 inode
->i_data
.a_ops
->writepage(NULL
, page
);
208 page_cache_release(page
);
210 mark_inode_dirty(inode
);
214 struct buffer_head
* udf_expand_dir_adinicb(struct inode
*inode
, int *block
, int *err
)
217 struct buffer_head
*sbh
= NULL
, *dbh
= NULL
;
219 Uint32 elen
, extoffset
;
221 struct udf_fileident_bh sfibh
, dfibh
;
222 loff_t f_pos
= udf_ext0_offset(inode
) >> 2;
223 int size
= (udf_ext0_offset(inode
) + inode
->i_size
) >> 2;
224 struct FileIdentDesc cfi
, *sfi
, *dfi
;
228 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_USE_SHORT_AD
))
229 UDF_I_ALLOCTYPE(inode
) = ICB_FLAG_AD_SHORT
;
231 UDF_I_ALLOCTYPE(inode
) = ICB_FLAG_AD_LONG
;
232 mark_inode_dirty(inode
);
236 /* alloc block, and copy data to it */
237 *block
= udf_new_block(inode
,
238 UDF_I_LOCATION(inode
).partitionReferenceNum
,
239 UDF_I_LOCATION(inode
).logicalBlockNum
, err
);
243 newblock
= udf_get_pblock(inode
->i_sb
, *block
,
244 UDF_I_LOCATION(inode
).partitionReferenceNum
, 0);
247 sbh
= udf_tread(inode
->i_sb
, inode
->i_ino
, inode
->i_sb
->s_blocksize
);
250 dbh
= udf_tread(inode
->i_sb
, newblock
, inode
->i_sb
->s_blocksize
);
254 sfibh
.soffset
= sfibh
.eoffset
= (f_pos
& ((inode
->i_sb
->s_blocksize
- 1) >> 2)) << 2;
255 sfibh
.sbh
= sfibh
.ebh
= sbh
;
256 dfibh
.soffset
= dfibh
.eoffset
= 0;
257 dfibh
.sbh
= dfibh
.ebh
= dbh
;
258 while ( (f_pos
< size
) )
260 sfi
= udf_fileident_read(inode
, &f_pos
, &sfibh
, &cfi
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
);
263 udf_release_data(sbh
);
264 udf_release_data(dbh
);
267 sfi
->descTag
.tagLocation
= *block
;
268 dfibh
.soffset
= dfibh
.eoffset
;
269 dfibh
.eoffset
+= (sfibh
.eoffset
- sfibh
.soffset
);
270 dfi
= (struct FileIdentDesc
*)(dbh
->b_data
+ dfibh
.soffset
);
271 if (udf_write_fi(sfi
, dfi
, &dfibh
, sfi
->impUse
,
272 sfi
->fileIdent
+ sfi
->lengthOfImpUse
))
274 udf_release_data(sbh
);
275 udf_release_data(dbh
);
279 mark_buffer_dirty(dbh
);
281 memset(sbh
->b_data
+ udf_file_entry_alloc_offset(inode
),
282 0, UDF_I_LENALLOC(inode
));
284 UDF_I_LENALLOC(inode
) = 0;
285 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_USE_SHORT_AD
))
286 UDF_I_ALLOCTYPE(inode
) = ICB_FLAG_AD_SHORT
;
288 UDF_I_ALLOCTYPE(inode
) = ICB_FLAG_AD_LONG
;
289 bloc
= UDF_I_LOCATION(inode
);
290 eloc
.logicalBlockNum
= *block
;
291 eloc
.partitionReferenceNum
= UDF_I_LOCATION(inode
).partitionReferenceNum
;
292 elen
= inode
->i_size
;
293 extoffset
= udf_file_entry_alloc_offset(inode
);
294 udf_add_aext(inode
, &bloc
, &extoffset
, eloc
, elen
, &sbh
, 0);
297 inode
->i_blocks
= inode
->i_sb
->s_blocksize
/ 512;
298 mark_buffer_dirty(sbh
);
299 udf_release_data(sbh
);
300 mark_inode_dirty(inode
);
305 static int udf_get_block(struct inode
*inode
, long block
, struct buffer_head
*bh_result
, int create
)
308 struct buffer_head
*bh
;
313 phys
= udf_block_map(inode
, block
);
316 bh_result
->b_dev
= inode
->i_dev
;
317 bh_result
->b_blocknr
= phys
;
318 bh_result
->b_state
|= (1UL << BH_Mapped
);
332 if (block
== UDF_I_NEXT_ALLOC_BLOCK(inode
) + 1)
334 UDF_I_NEXT_ALLOC_BLOCK(inode
) ++;
335 UDF_I_NEXT_ALLOC_GOAL(inode
) ++;
340 bh
= inode_getblk(inode
, block
, &err
, &phys
, &new);
348 bh_result
->b_dev
= inode
->i_dev
;
349 bh_result
->b_blocknr
= phys
;
350 bh_result
->b_state
|= (1UL << BH_Mapped
);
352 bh_result
->b_state
|= (1UL << BH_New
);
358 udf_warning(inode
->i_sb
, "udf_get_block", "block < 0");
362 struct buffer_head
* udf_getblk(struct inode
* inode
, long block
,
363 int create
, int * err
)
365 struct buffer_head dummy
;
369 dummy
.b_blocknr
= -1000;
370 error
= udf_get_block(inode
, block
, &dummy
, create
);
372 if (!error
& buffer_mapped(&dummy
))
374 struct buffer_head
*bh
;
375 bh
= getblk(dummy
.b_dev
, dummy
.b_blocknr
, inode
->i_sb
->s_blocksize
);
376 if (buffer_new(&dummy
))
378 if (!buffer_uptodate(bh
))
380 memset(bh
->b_data
, 0x00, inode
->i_sb
->s_blocksize
);
381 mark_buffer_uptodate(bh
, 1);
382 mark_buffer_dirty(bh
);
389 static struct buffer_head
* inode_getblk(struct inode
* inode
, long block
,
390 int *err
, long *phys
, int *new)
392 struct buffer_head
*pbh
= NULL
, *cbh
= NULL
, *result
= NULL
;
393 long_ad laarr
[EXTENT_MERGE_SIZE
];
394 Uint32 pextoffset
= 0, cextoffset
= 0, nextoffset
= 0;
395 int count
= 0, startnum
= 0, endnum
= 0;
397 lb_addr eloc
, pbloc
= UDF_I_LOCATION(inode
), cbloc
= UDF_I_LOCATION(inode
);
399 int lbcount
= 0, b_off
= 0, offset
= 0;
400 Uint32 newblocknum
, newblock
;
402 int goal
= 0, pgoal
= UDF_I_LOCATION(inode
).logicalBlockNum
;
405 pextoffset
= cextoffset
= nextoffset
= udf_file_entry_alloc_offset(inode
);
406 b_off
= block
<< inode
->i_sb
->s_blocksize_bits
;
407 pbloc
= cbloc
= UDF_I_LOCATION(inode
);
409 /* find the extent which contains the block we are looking for.
410 alternate between laarr[0] and laarr[1] for locations of the
411 current extent, and the previous extent */
416 udf_release_data(pbh
);
418 atomic_inc(&cbh
->b_count
);
424 pextoffset
= cextoffset
;
425 cextoffset
= nextoffset
;
427 if ((etype
= udf_next_aext(inode
, &cbloc
, &nextoffset
, &eloc
, &elen
, &cbh
, 1)) == -1)
432 laarr
[c
].extLength
= (etype
<< 30) | elen
;
433 laarr
[c
].extLocation
= eloc
;
435 if (etype
!= EXTENT_NOT_RECORDED_NOT_ALLOCATED
)
436 pgoal
= eloc
.logicalBlockNum
+
437 ((elen
+ inode
->i_sb
->s_blocksize
- 1) >>
438 inode
->i_sb
->s_blocksize
);
441 } while (lbcount
+ elen
<= b_off
);
444 offset
= b_off
>> inode
->i_sb
->s_blocksize_bits
;
446 /* if the extent is allocated and recorded, return the block
447 if the extent is not a multiple of the blocksize, round up */
449 if (etype
== EXTENT_RECORDED_ALLOCATED
)
451 if (elen
& (inode
->i_sb
->s_blocksize
- 1))
453 elen
= (EXTENT_RECORDED_ALLOCATED
<< 30) |
454 ((elen
+ inode
->i_sb
->s_blocksize
- 1) &
455 ~(inode
->i_sb
->s_blocksize
- 1));
456 etype
= udf_write_aext(inode
, cbloc
, &cextoffset
, eloc
, elen
, &cbh
, 1);
458 udf_release_data(pbh
);
459 udf_release_data(cbh
);
460 newblock
= udf_get_lb_pblock(inode
->i_sb
, eloc
, offset
);
467 endnum
= startnum
= ((count
> 1) ? 1 : count
);
468 if (laarr
[c
].extLength
& (inode
->i_sb
->s_blocksize
- 1))
471 (laarr
[c
].extLength
& UDF_EXTENT_FLAG_MASK
) |
472 (((laarr
[c
].extLength
& UDF_EXTENT_LENGTH_MASK
) +
473 inode
->i_sb
->s_blocksize
- 1) &
474 ~(inode
->i_sb
->s_blocksize
- 1));
477 laarr
[c
].extLength
= (EXTENT_NOT_RECORDED_NOT_ALLOCATED
<< 30) |
478 ((offset
+ 1) << inode
->i_sb
->s_blocksize_bits
);
479 memset(&laarr
[c
].extLocation
, 0x00, sizeof(lb_addr
));
485 endnum
= startnum
= ((count
> 2) ? 2 : count
);
487 /* if the current extent is in position 0, swap it with the previous */
488 if (!c
&& count
!= 1)
496 /* if the current block is located in a extent, read the next extent */
499 if ((etype
= udf_next_aext(inode
, &cbloc
, &nextoffset
, &eloc
, &elen
, &cbh
, 0)) != -1)
501 laarr
[c
+1].extLength
= (etype
<< 30) | elen
;
502 laarr
[c
+1].extLocation
= eloc
;
510 udf_release_data(cbh
);
512 /* if the current extent is not recorded but allocated, get the
513 block in the extent corresponding to the requested block */
514 if ((laarr
[c
].extLength
>> 30) == EXTENT_NOT_RECORDED_ALLOCATED
)
515 newblocknum
= laarr
[c
].extLocation
.logicalBlockNum
+ offset
;
516 else /* otherwise, allocate a new block */
518 if (UDF_I_NEXT_ALLOC_BLOCK(inode
) == block
)
519 goal
= UDF_I_NEXT_ALLOC_GOAL(inode
);
524 goal
= UDF_I_LOCATION(inode
).logicalBlockNum
+ 1;
527 if (!(newblocknum
= udf_new_block(inode
,
528 UDF_I_LOCATION(inode
).partitionReferenceNum
, goal
, err
)))
530 udf_release_data(pbh
);
536 /* if the extent the requsted block is located in contains multiple blocks,
537 split the extent into at most three extents. blocks prior to requested
538 block, requested block, and blocks after requested block */
539 udf_split_extents(inode
, &c
, offset
, newblocknum
, laarr
, &endnum
);
541 #ifdef UDF_PREALLOCATE
542 /* preallocate blocks */
543 udf_prealloc_extents(inode
, c
, lastblock
, laarr
, &endnum
);
546 /* merge any continuous blocks in laarr */
547 udf_merge_extents(inode
, laarr
, &endnum
);
549 /* write back the new extents, inserting new extents if the new number
550 of extents is greater than the old number, and deleting extents if
551 the new number of extents is less than the old number */
552 udf_update_extents(inode
, laarr
, startnum
, endnum
, pbloc
, pextoffset
, &pbh
);
554 udf_release_data(pbh
);
556 if (!(newblock
= udf_get_pblock(inode
->i_sb
, newblocknum
,
557 UDF_I_LOCATION(inode
).partitionReferenceNum
, 0)))
564 UDF_I_NEXT_ALLOC_BLOCK(inode
) = block
;
565 UDF_I_NEXT_ALLOC_GOAL(inode
) = newblocknum
;
566 inode
->i_ctime
= CURRENT_TIME
;
567 UDF_I_UCTIME(inode
) = CURRENT_UTIME
;
568 inode
->i_blocks
+= inode
->i_sb
->s_blocksize
/ 512;
570 udf_sync_inode(inode
);
572 mark_inode_dirty(inode
);
576 static void udf_split_extents(struct inode
*inode
, int *c
, int offset
, int newblocknum
,
577 long_ad laarr
[EXTENT_MERGE_SIZE
], int *endnum
)
579 if ((laarr
[*c
].extLength
>> 30) == EXTENT_NOT_RECORDED_ALLOCATED
||
580 (laarr
[*c
].extLength
>> 30) == EXTENT_NOT_RECORDED_NOT_ALLOCATED
)
583 int blen
= ((laarr
[curr
].extLength
& UDF_EXTENT_LENGTH_MASK
) +
584 inode
->i_sb
->s_blocksize
- 1) >> inode
->i_sb
->s_blocksize_bits
;
585 int type
= laarr
[curr
].extLength
& ~UDF_EXTENT_LENGTH_MASK
;
589 else if (!offset
|| blen
== offset
+ 1)
591 laarr
[curr
+2] = laarr
[curr
+1];
592 laarr
[curr
+1] = laarr
[curr
];
596 laarr
[curr
+3] = laarr
[curr
+1];
597 laarr
[curr
+2] = laarr
[curr
+1] = laarr
[curr
];
602 laarr
[curr
].extLength
= type
|
603 (offset
<< inode
->i_sb
->s_blocksize_bits
);
609 laarr
[curr
].extLocation
.logicalBlockNum
= newblocknum
;
610 if ((type
>> 30) == EXTENT_NOT_RECORDED_NOT_ALLOCATED
)
611 laarr
[curr
].extLocation
.partitionReferenceNum
=
612 UDF_I_LOCATION(inode
).partitionReferenceNum
;
613 laarr
[curr
].extLength
= (EXTENT_RECORDED_ALLOCATED
<< 30) |
614 inode
->i_sb
->s_blocksize
;
617 if (blen
!= offset
+ 1)
619 if ((type
>> 30) == EXTENT_NOT_RECORDED_ALLOCATED
)
620 laarr
[curr
].extLocation
.logicalBlockNum
+= (offset
+ 1);
621 laarr
[curr
].extLength
= type
|
622 ((blen
- (offset
+ 1)) << inode
->i_sb
->s_blocksize_bits
);
629 static void udf_prealloc_extents(struct inode
*inode
, int c
, int lastblock
,
630 long_ad laarr
[EXTENT_MERGE_SIZE
], int *endnum
)
632 int start
, length
= 0, currlength
= 0, i
;
634 if (*endnum
>= (c
+1) && !lastblock
)
637 if ((laarr
[c
+1].extLength
>> 30) == EXTENT_NOT_RECORDED_ALLOCATED
)
640 length
= currlength
= (((laarr
[c
+1].extLength
& UDF_EXTENT_LENGTH_MASK
) +
641 inode
->i_sb
->s_blocksize
- 1) >> inode
->i_sb
->s_blocksize_bits
);
646 for (i
=start
+1; i
<=*endnum
; i
++)
651 length
+= UDF_DEFAULT_PREALLOC_BLOCKS
;
653 else if ((laarr
[i
].extLength
>> 30) == EXTENT_NOT_RECORDED_NOT_ALLOCATED
)
654 length
+= (((laarr
[i
].extLength
& UDF_EXTENT_LENGTH_MASK
) +
655 inode
->i_sb
->s_blocksize
- 1) >> inode
->i_sb
->s_blocksize_bits
);
662 int next
= laarr
[start
].extLocation
.logicalBlockNum
+
663 (((laarr
[start
].extLength
& UDF_EXTENT_LENGTH_MASK
) +
664 inode
->i_sb
->s_blocksize
- 1) >> inode
->i_sb
->s_blocksize_bits
);
665 int numalloc
= udf_prealloc_blocks(inode
,
666 laarr
[start
].extLocation
.partitionReferenceNum
,
667 next
, (UDF_DEFAULT_PREALLOC_BLOCKS
> length
? length
:
668 UDF_DEFAULT_PREALLOC_BLOCKS
) - currlength
);
673 laarr
[start
].extLength
+=
674 (numalloc
<< inode
->i_sb
->s_blocksize_bits
);
677 memmove(&laarr
[c
+2], &laarr
[c
+1],
678 sizeof(long_ad
) * (*endnum
- (c
+1)));
680 laarr
[c
+1].extLocation
.logicalBlockNum
= next
;
681 laarr
[c
+1].extLocation
.partitionReferenceNum
=
682 laarr
[c
].extLocation
.partitionReferenceNum
;
683 laarr
[c
+1].extLength
= (EXTENT_NOT_RECORDED_ALLOCATED
<< 30) |
684 (numalloc
<< inode
->i_sb
->s_blocksize_bits
);
688 for (i
=start
+1; numalloc
&& i
<*endnum
; i
++)
690 int elen
= ((laarr
[i
].extLength
& UDF_EXTENT_LENGTH_MASK
) +
691 inode
->i_sb
->s_blocksize
- 1) >> inode
->i_sb
->s_blocksize_bits
;
695 laarr
[c
+1].extLength
-=
696 (numalloc
<< inode
->i_sb
->s_blocksize_bits
);
703 memmove(&laarr
[i
], &laarr
[i
+1],
704 sizeof(long_ad
) * (*endnum
- (i
+1)));
713 static void udf_merge_extents(struct inode
*inode
,
714 long_ad laarr
[EXTENT_MERGE_SIZE
], int *endnum
)
718 for (i
=0; i
<(*endnum
-1); i
++)
720 if ((laarr
[i
].extLength
>> 30) == (laarr
[i
+1].extLength
>> 30))
722 if (((laarr
[i
].extLength
>> 30) == EXTENT_NOT_RECORDED_NOT_ALLOCATED
) ||
723 ((laarr
[i
+1].extLocation
.logicalBlockNum
- laarr
[i
].extLocation
.logicalBlockNum
) ==
724 (((laarr
[i
].extLength
& UDF_EXTENT_LENGTH_MASK
) +
725 inode
->i_sb
->s_blocksize
- 1) >> inode
->i_sb
->s_blocksize_bits
)))
727 if (((laarr
[i
].extLength
& UDF_EXTENT_LENGTH_MASK
) +
728 (laarr
[i
+1].extLength
& UDF_EXTENT_LENGTH_MASK
) +
729 inode
->i_sb
->s_blocksize
- 1) & ~UDF_EXTENT_LENGTH_MASK
)
731 laarr
[i
+1].extLength
= (laarr
[i
+1].extLength
-
732 (laarr
[i
].extLength
& UDF_EXTENT_LENGTH_MASK
) +
733 UDF_EXTENT_LENGTH_MASK
) & ~(inode
->i_sb
->s_blocksize
-1);
734 laarr
[i
].extLength
= (UDF_EXTENT_LENGTH_MASK
+ 1) -
735 inode
->i_sb
->s_blocksize
;
736 laarr
[i
+1].extLocation
.logicalBlockNum
=
737 laarr
[i
].extLocation
.logicalBlockNum
+
738 ((laarr
[i
].extLength
& UDF_EXTENT_LENGTH_MASK
) >>
739 inode
->i_sb
->s_blocksize_bits
);
743 laarr
[i
].extLength
= laarr
[i
+1].extLength
+
744 (((laarr
[i
].extLength
& UDF_EXTENT_LENGTH_MASK
) +
745 inode
->i_sb
->s_blocksize
- 1) & ~(inode
->i_sb
->s_blocksize
-1));
747 memmove(&laarr
[i
+1], &laarr
[i
+2],
748 sizeof(long_ad
) * (*endnum
- (i
+2)));
757 static void udf_update_extents(struct inode
*inode
,
758 long_ad laarr
[EXTENT_MERGE_SIZE
], int startnum
, int endnum
,
759 lb_addr pbloc
, Uint32 pextoffset
, struct buffer_head
**pbh
)
765 if (startnum
> endnum
)
767 for (i
=0; i
<(startnum
-endnum
); i
++)
769 udf_delete_aext(inode
, pbloc
, pextoffset
, laarr
[i
].extLocation
,
770 laarr
[i
].extLength
, *pbh
);
773 else if (startnum
< endnum
)
775 for (i
=0; i
<(endnum
-startnum
); i
++)
777 udf_insert_aext(inode
, pbloc
, pextoffset
, laarr
[i
].extLocation
,
778 laarr
[i
].extLength
, *pbh
);
779 udf_next_aext(inode
, &pbloc
, &pextoffset
, &laarr
[i
].extLocation
,
780 &laarr
[i
].extLength
, pbh
, 1);
785 for (i
=start
; i
<endnum
; i
++)
787 udf_next_aext(inode
, &pbloc
, &pextoffset
, &tmploc
, &tmplen
, pbh
, 0);
788 udf_write_aext(inode
, pbloc
, &pextoffset
, laarr
[i
].extLocation
,
789 laarr
[i
].extLength
, pbh
, 1);
793 struct buffer_head
* udf_bread(struct inode
* inode
, int block
,
794 int create
, int * err
)
796 struct buffer_head
* bh
= NULL
;
799 prev_blocks
= inode
->i_blocks
;
801 bh
= udf_getblk(inode
, block
, create
, err
);
807 S_ISDIR(inode
->i_mode
) &&
808 inode
->i_blocks
> prev_blocks
)
811 struct buffer_head
*tmp_bh
= NULL
;
814 i
< UDF_DEFAULT_PREALLOC_DIR_BLOCKS
;
817 tmp_bh
= udf_getblk(inode
, block
+i
, create
, err
);
820 udf_release_data(bh
);
823 udf_release_data(tmp_bh
);
828 if (buffer_uptodate(bh
))
830 ll_rw_block(READ
, 1, &bh
);
832 if (buffer_uptodate(bh
))
846 * This routine is called by iget() [which is called by udf_iget()]
847 * (clean_inode() will have been called first)
848 * when an inode is first read into memory.
851 * July 1, 1997 - Andrew E. Mileski
852 * Written, tested, and released.
854 * 12/19/98 dgb Updated to fix size problems.
858 udf_read_inode(struct inode
*inode
)
860 memset(&UDF_I_LOCATION(inode
), 0xFF, sizeof(lb_addr
));
864 __udf_read_inode(struct inode
*inode
)
866 struct buffer_head
*bh
= NULL
;
867 struct FileEntry
*fe
;
871 * Set defaults, but the inode is still incomplete!
872 * Note: get_new_inode() sets the following on a new inode:
876 * i_flags = sb->s_flags
878 * clean_inode(): zero fills and sets
884 inode
->i_blksize
= PAGE_SIZE
;
886 bh
= udf_read_ptagged(inode
->i_sb
, UDF_I_LOCATION(inode
), 0, &ident
);
890 printk(KERN_ERR
"udf: udf_read_inode(ino %ld) failed !bh\n",
892 make_bad_inode(inode
);
896 if (ident
!= TID_FILE_ENTRY
&& ident
!= TID_EXTENDED_FILE_ENTRY
)
898 printk(KERN_ERR
"udf: udf_read_inode(ino %ld) failed ident=%d\n",
899 inode
->i_ino
, ident
);
900 udf_release_data(bh
);
901 make_bad_inode(inode
);
905 fe
= (struct FileEntry
*)bh
->b_data
;
907 if (le16_to_cpu(fe
->icbTag
.strategyType
) == 4096)
909 struct buffer_head
*ibh
= NULL
, *nbh
= NULL
;
910 struct IndirectEntry
*ie
;
912 ibh
= udf_read_ptagged(inode
->i_sb
, UDF_I_LOCATION(inode
), 1, &ident
);
913 if (ident
== TID_INDIRECT_ENTRY
)
918 ie
= (struct IndirectEntry
*)ibh
->b_data
;
920 loc
= lelb_to_cpu(ie
->indirectICB
.extLocation
);
922 if (ie
->indirectICB
.extLength
&&
923 (nbh
= udf_read_ptagged(inode
->i_sb
, loc
, 0, &ident
)))
925 if (ident
== TID_FILE_ENTRY
||
926 ident
== TID_EXTENDED_FILE_ENTRY
)
928 memcpy(&UDF_I_LOCATION(inode
), &loc
, sizeof(lb_addr
));
929 udf_release_data(bh
);
930 udf_release_data(ibh
);
931 udf_release_data(nbh
);
932 __udf_read_inode(inode
);
937 udf_release_data(nbh
);
938 udf_release_data(ibh
);
942 udf_release_data(ibh
);
946 udf_release_data(ibh
);
948 else if (le16_to_cpu(fe
->icbTag
.strategyType
) != 4)
950 printk(KERN_ERR
"udf: unsupported strategy type: %d\n",
951 le16_to_cpu(fe
->icbTag
.strategyType
));
952 udf_release_data(bh
);
953 make_bad_inode(inode
);
956 udf_fill_inode(inode
, bh
);
957 udf_release_data(bh
);
960 static void udf_fill_inode(struct inode
*inode
, struct buffer_head
*bh
)
962 struct FileEntry
*fe
;
963 struct ExtendedFileEntry
*efe
;
968 inode
->i_version
= ++event
;
969 UDF_I_NEW_INODE(inode
) = 0;
971 fe
= (struct FileEntry
*)bh
->b_data
;
972 efe
= (struct ExtendedFileEntry
*)bh
->b_data
;
974 if (fe
->descTag
.tagIdent
== TID_EXTENDED_FILE_ENTRY
)
975 UDF_I_EXTENDED_FE(inode
) = 1;
976 else /* fe->descTag.tagIdent == TID_FILE_ENTRY */
977 UDF_I_EXTENDED_FE(inode
) = 0;
979 if (le16_to_cpu(fe
->icbTag
.strategyType
) == 4)
980 UDF_I_STRAT4096(inode
) = 0;
981 else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
982 UDF_I_STRAT4096(inode
) = 1;
984 inode
->i_uid
= le32_to_cpu(fe
->uid
);
985 if ( inode
->i_uid
== -1 ) inode
->i_uid
= UDF_SB(inode
->i_sb
)->s_uid
;
987 inode
->i_gid
= le32_to_cpu(fe
->gid
);
988 if ( inode
->i_gid
== -1 ) inode
->i_gid
= UDF_SB(inode
->i_sb
)->s_gid
;
990 inode
->i_nlink
= le16_to_cpu(fe
->fileLinkCount
);
994 inode
->i_size
= le64_to_cpu(fe
->informationLength
);
996 inode
->i_mode
= udf_convert_permissions(fe
);
997 inode
->i_mode
&= ~UDF_SB(inode
->i_sb
)->s_umask
;
999 UDF_I_NEXT_ALLOC_BLOCK(inode
) = 0;
1000 UDF_I_NEXT_ALLOC_GOAL(inode
) = 0;
1002 UDF_I_ALLOCTYPE(inode
) = le16_to_cpu(fe
->icbTag
.flags
) & ICB_FLAG_ALLOC_MASK
;
1004 if (UDF_I_EXTENDED_FE(inode
) == 0)
1006 inode
->i_blocks
= le64_to_cpu(fe
->logicalBlocksRecorded
) <<
1007 (inode
->i_sb
->s_blocksize_bits
- 9);
1009 if ( udf_stamp_to_time(&convtime
, &convtime_usec
,
1010 lets_to_cpu(fe
->modificationTime
)) )
1012 inode
->i_mtime
= convtime
;
1013 UDF_I_UMTIME(inode
) = convtime_usec
;
1014 inode
->i_ctime
= convtime
;
1015 UDF_I_UCTIME(inode
) = convtime_usec
;
1019 inode
->i_mtime
= UDF_SB_RECORDTIME(inode
->i_sb
);
1020 UDF_I_UMTIME(inode
) = 0;
1021 inode
->i_ctime
= UDF_SB_RECORDTIME(inode
->i_sb
);
1022 UDF_I_UCTIME(inode
) = 0;
1025 if ( udf_stamp_to_time(&convtime
, &convtime_usec
,
1026 lets_to_cpu(fe
->accessTime
)) )
1028 inode
->i_atime
= convtime
;
1029 UDF_I_UATIME(inode
) = convtime_usec
;
1033 inode
->i_atime
= UDF_SB_RECORDTIME(inode
->i_sb
);
1034 UDF_I_UATIME(inode
) = convtime_usec
;
1037 UDF_I_UNIQUE(inode
) = le64_to_cpu(fe
->uniqueID
);
1038 UDF_I_LENEATTR(inode
) = le32_to_cpu(fe
->lengthExtendedAttr
);
1039 UDF_I_LENALLOC(inode
) = le32_to_cpu(fe
->lengthAllocDescs
);
1040 offset
= sizeof(struct FileEntry
) + UDF_I_LENEATTR(inode
);
1041 alen
= offset
+ UDF_I_LENALLOC(inode
);
1045 inode
->i_blocks
= le64_to_cpu(efe
->logicalBlocksRecorded
) <<
1046 (inode
->i_sb
->s_blocksize_bits
- 9);
1048 if ( udf_stamp_to_time(&convtime
, &convtime_usec
,
1049 lets_to_cpu(efe
->modificationTime
)) )
1051 inode
->i_mtime
= convtime
;
1052 UDF_I_UMTIME(inode
) = convtime_usec
;
1056 inode
->i_mtime
= UDF_SB_RECORDTIME(inode
->i_sb
);
1057 UDF_I_UMTIME(inode
) = 0;
1060 if ( udf_stamp_to_time(&convtime
, &convtime_usec
,
1061 lets_to_cpu(efe
->accessTime
)) )
1063 inode
->i_atime
= convtime
;
1064 UDF_I_UATIME(inode
) = convtime_usec
;
1068 inode
->i_atime
= UDF_SB_RECORDTIME(inode
->i_sb
);
1069 UDF_I_UATIME(inode
) = 0;
1072 if ( udf_stamp_to_time(&convtime
, &convtime_usec
,
1073 lets_to_cpu(efe
->createTime
)) )
1075 inode
->i_ctime
= convtime
;
1076 UDF_I_UCTIME(inode
) = convtime_usec
;
1080 inode
->i_ctime
= UDF_SB_RECORDTIME(inode
->i_sb
);
1081 UDF_I_UCTIME(inode
) = 0;
1084 UDF_I_UNIQUE(inode
) = le64_to_cpu(efe
->uniqueID
);
1085 UDF_I_LENEATTR(inode
) = le32_to_cpu(efe
->lengthExtendedAttr
);
1086 UDF_I_LENALLOC(inode
) = le32_to_cpu(efe
->lengthAllocDescs
);
1087 offset
= sizeof(struct ExtendedFileEntry
) + UDF_I_LENEATTR(inode
);
1088 alen
= offset
+ UDF_I_LENALLOC(inode
);
1091 switch (fe
->icbTag
.fileType
)
1093 case FILE_TYPE_DIRECTORY
:
1095 inode
->i_op
= &udf_dir_inode_operations
;
1096 inode
->i_fop
= &udf_dir_operations
;
1097 inode
->i_mode
|= S_IFDIR
;
1101 case FILE_TYPE_REGULAR
:
1102 case FILE_TYPE_NONE
:
1104 if (UDF_I_ALLOCTYPE(inode
) == ICB_FLAG_AD_IN_ICB
)
1105 inode
->i_data
.a_ops
= &udf_adinicb_aops
;
1107 inode
->i_data
.a_ops
= &udf_aops
;
1108 inode
->i_op
= &udf_file_inode_operations
;
1109 inode
->i_fop
= &udf_file_operations
;
1110 inode
->i_mode
|= S_IFREG
;
1113 case FILE_TYPE_BLOCK
:
1115 inode
->i_mode
|= S_IFBLK
;
1118 case FILE_TYPE_CHAR
:
1120 inode
->i_mode
|= S_IFCHR
;
1123 case FILE_TYPE_FIFO
:
1125 init_special_inode(inode
, inode
->i_mode
| S_IFIFO
, 0);
1128 case FILE_TYPE_SYMLINK
:
1130 inode
->i_data
.a_ops
= &udf_symlink_aops
;
1131 inode
->i_op
= &page_symlink_inode_operations
;
1132 inode
->i_mode
= S_IFLNK
|S_IRWXUGO
;
1137 printk(KERN_ERR
"udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
1138 inode
->i_ino
, fe
->icbTag
.fileType
);
1139 make_bad_inode(inode
);
1143 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
))
1145 struct buffer_head
*tbh
= NULL
;
1146 struct DeviceSpecificationExtendedAttr
*dsea
=
1147 (struct DeviceSpecificationExtendedAttr
*)
1148 udf_get_extendedattr(inode
, 12, 1, &tbh
);
1152 init_special_inode(inode
, inode
->i_mode
,
1153 ((le32_to_cpu(dsea
->majorDeviceIdent
)) << 8) |
1154 (le32_to_cpu(dsea
->minorDeviceIdent
) & 0xFF));
1155 /* Developer ID ??? */
1156 udf_release_data(tbh
);
1160 make_bad_inode(inode
);
1166 udf_convert_permissions(struct FileEntry
*fe
)
1172 permissions
= le32_to_cpu(fe
->permissions
);
1173 flags
= le16_to_cpu(fe
->icbTag
.flags
);
1175 mode
= (( permissions
) & S_IRWXO
) |
1176 (( permissions
>> 2 ) & S_IRWXG
) |
1177 (( permissions
>> 4 ) & S_IRWXU
) |
1178 (( flags
& ICB_FLAG_SETUID
) ? S_ISUID
: 0) |
1179 (( flags
& ICB_FLAG_SETGID
) ? S_ISGID
: 0) |
1180 (( flags
& ICB_FLAG_STICKY
) ? S_ISVTX
: 0);
1189 * Write out the specified inode.
1192 * This routine is called whenever an inode is synced.
1193 * Currently this routine is just a placeholder.
1196 * July 1, 1997 - Andrew E. Mileski
1197 * Written, tested, and released.
1200 void udf_write_inode(struct inode
* inode
, int sync
)
1203 udf_update_inode(inode
, sync
);
1207 int udf_sync_inode(struct inode
* inode
)
1209 return udf_update_inode(inode
, 1);
1213 udf_update_inode(struct inode
*inode
, int do_sync
)
1215 struct buffer_head
*bh
= NULL
;
1216 struct FileEntry
*fe
;
1217 struct ExtendedFileEntry
*efe
;
1225 bh
= udf_tread(inode
->i_sb
,
1226 udf_get_lb_pblock(inode
->i_sb
, UDF_I_LOCATION(inode
), 0),
1227 inode
->i_sb
->s_blocksize
);
1230 udf_debug("bread failure\n");
1233 fe
= (struct FileEntry
*)bh
->b_data
;
1234 efe
= (struct ExtendedFileEntry
*)bh
->b_data
;
1235 if (UDF_I_NEW_INODE(inode
) == 1)
1237 if (UDF_I_EXTENDED_FE(inode
) == 0)
1238 memset(bh
->b_data
, 0x0, sizeof(struct FileEntry
));
1240 memset(bh
->b_data
, 0x00, sizeof(struct ExtendedFileEntry
));
1241 memset(bh
->b_data
+ udf_file_entry_alloc_offset(inode
) +
1242 UDF_I_LENALLOC(inode
), 0x0, inode
->i_sb
->s_blocksize
-
1243 udf_file_entry_alloc_offset(inode
) - UDF_I_LENALLOC(inode
));
1244 UDF_I_NEW_INODE(inode
) = 0;
1247 if (inode
->i_uid
!= UDF_SB(inode
->i_sb
)->s_uid
)
1248 fe
->uid
= cpu_to_le32(inode
->i_uid
);
1250 if (inode
->i_gid
!= UDF_SB(inode
->i_sb
)->s_gid
)
1251 fe
->gid
= cpu_to_le32(inode
->i_gid
);
1253 udfperms
= ((inode
->i_mode
& S_IRWXO
) ) |
1254 ((inode
->i_mode
& S_IRWXG
) << 2) |
1255 ((inode
->i_mode
& S_IRWXU
) << 4);
1257 udfperms
|= (le32_to_cpu(fe
->permissions
) &
1258 (PERM_O_DELETE
| PERM_O_CHATTR
|
1259 PERM_G_DELETE
| PERM_G_CHATTR
|
1260 PERM_U_DELETE
| PERM_U_CHATTR
));
1261 fe
->permissions
= cpu_to_le32(udfperms
);
1263 if (S_ISDIR(inode
->i_mode
))
1264 fe
->fileLinkCount
= cpu_to_le16(inode
->i_nlink
- 1);
1266 fe
->fileLinkCount
= cpu_to_le16(inode
->i_nlink
);
1268 fe
->informationLength
= cpu_to_le64(inode
->i_size
);
1270 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
))
1273 struct buffer_head
*tbh
= NULL
;
1274 struct DeviceSpecificationExtendedAttr
*dsea
=
1275 (struct DeviceSpecificationExtendedAttr
*)
1276 udf_get_extendedattr(inode
, 12, 1, &tbh
);
1280 dsea
= (struct DeviceSpecificationExtendedAttr
*)
1281 udf_add_extendedattr(inode
,
1282 sizeof(struct DeviceSpecificationExtendedAttr
) +
1283 sizeof(EntityID
), 12, 0x3, &tbh
);
1284 dsea
->attrType
= 12;
1285 dsea
->attrSubtype
= 1;
1286 dsea
->attrLength
= sizeof(struct DeviceSpecificationExtendedAttr
) +
1288 dsea
->impUseLength
= sizeof(EntityID
);
1290 eid
= (EntityID
*)dsea
->impUse
;
1291 memset(eid
, 0, sizeof(EntityID
));
1292 strcpy(eid
->ident
, UDF_ID_DEVELOPER
);
1293 eid
->identSuffix
[0] = UDF_OS_CLASS_UNIX
;
1294 eid
->identSuffix
[1] = UDF_OS_ID_LINUX
;
1295 dsea
->majorDeviceIdent
= kdev_t_to_nr(inode
->i_rdev
) >> 8;
1296 dsea
->minorDeviceIdent
= kdev_t_to_nr(inode
->i_rdev
) & 0xFF;
1297 mark_buffer_dirty(tbh
);
1298 udf_release_data(tbh
);
1301 if (UDF_I_EXTENDED_FE(inode
) == 0)
1303 fe
->logicalBlocksRecorded
= cpu_to_le64(
1304 (inode
->i_blocks
+ (1 << (inode
->i_sb
->s_blocksize_bits
- 9)) - 1) >>
1305 (inode
->i_sb
->s_blocksize_bits
- 9));
1307 if (udf_time_to_stamp(&cpu_time
, inode
->i_atime
, UDF_I_UATIME(inode
)))
1308 fe
->accessTime
= cpu_to_lets(cpu_time
);
1309 if (udf_time_to_stamp(&cpu_time
, inode
->i_mtime
, UDF_I_UMTIME(inode
)))
1310 fe
->modificationTime
= cpu_to_lets(cpu_time
);
1311 memset(&(fe
->impIdent
), 0, sizeof(EntityID
));
1312 strcpy(fe
->impIdent
.ident
, UDF_ID_DEVELOPER
);
1313 fe
->impIdent
.identSuffix
[0] = UDF_OS_CLASS_UNIX
;
1314 fe
->impIdent
.identSuffix
[1] = UDF_OS_ID_LINUX
;
1315 fe
->uniqueID
= cpu_to_le64(UDF_I_UNIQUE(inode
));
1316 fe
->lengthExtendedAttr
= cpu_to_le32(UDF_I_LENEATTR(inode
));
1317 fe
->lengthAllocDescs
= cpu_to_le32(UDF_I_LENALLOC(inode
));
1318 fe
->descTag
.tagIdent
= le16_to_cpu(TID_FILE_ENTRY
);
1319 crclen
= sizeof(struct FileEntry
);
1323 efe
->logicalBlocksRecorded
= cpu_to_le64(
1324 (inode
->i_blocks
+ (2 << (inode
->i_sb
->s_blocksize_bits
- 9)) - 1) >>
1325 (inode
->i_sb
->s_blocksize_bits
- 9));
1327 if (udf_time_to_stamp(&cpu_time
, inode
->i_atime
, UDF_I_UATIME(inode
)))
1328 efe
->accessTime
= cpu_to_lets(cpu_time
);
1329 if (udf_time_to_stamp(&cpu_time
, inode
->i_mtime
, UDF_I_UMTIME(inode
)))
1330 efe
->modificationTime
= cpu_to_lets(cpu_time
);
1331 if (udf_time_to_stamp(&cpu_time
, inode
->i_ctime
, UDF_I_UCTIME(inode
)))
1332 efe
->createTime
= cpu_to_lets(cpu_time
);
1333 memset(&(efe
->impIdent
), 0, sizeof(EntityID
));
1334 strcpy(efe
->impIdent
.ident
, UDF_ID_DEVELOPER
);
1335 efe
->impIdent
.identSuffix
[0] = UDF_OS_CLASS_UNIX
;
1336 efe
->impIdent
.identSuffix
[1] = UDF_OS_ID_LINUX
;
1337 efe
->uniqueID
= cpu_to_le64(UDF_I_UNIQUE(inode
));
1338 efe
->lengthExtendedAttr
= cpu_to_le32(UDF_I_LENEATTR(inode
));
1339 efe
->lengthAllocDescs
= cpu_to_le32(UDF_I_LENALLOC(inode
));
1340 efe
->descTag
.tagIdent
= le16_to_cpu(TID_EXTENDED_FILE_ENTRY
);
1341 crclen
= sizeof(struct ExtendedFileEntry
);
1343 if (UDF_I_STRAT4096(inode
))
1345 fe
->icbTag
.strategyType
= cpu_to_le16(4096);
1346 fe
->icbTag
.strategyParameter
= cpu_to_le16(1);
1347 fe
->icbTag
.numEntries
= cpu_to_le16(2);
1351 fe
->icbTag
.strategyType
= cpu_to_le16(4);
1352 fe
->icbTag
.numEntries
= cpu_to_le16(1);
1355 if (S_ISDIR(inode
->i_mode
))
1356 fe
->icbTag
.fileType
= FILE_TYPE_DIRECTORY
;
1357 else if (S_ISREG(inode
->i_mode
))
1358 fe
->icbTag
.fileType
= FILE_TYPE_REGULAR
;
1359 else if (S_ISLNK(inode
->i_mode
))
1360 fe
->icbTag
.fileType
= FILE_TYPE_SYMLINK
;
1361 else if (S_ISBLK(inode
->i_mode
))
1362 fe
->icbTag
.fileType
= FILE_TYPE_BLOCK
;
1363 else if (S_ISCHR(inode
->i_mode
))
1364 fe
->icbTag
.fileType
= FILE_TYPE_CHAR
;
1365 else if (S_ISFIFO(inode
->i_mode
))
1366 fe
->icbTag
.fileType
= FILE_TYPE_FIFO
;
1368 icbflags
= UDF_I_ALLOCTYPE(inode
) |
1369 ((inode
->i_mode
& S_ISUID
) ? ICB_FLAG_SETUID
: 0) |
1370 ((inode
->i_mode
& S_ISGID
) ? ICB_FLAG_SETGID
: 0) |
1371 ((inode
->i_mode
& S_ISVTX
) ? ICB_FLAG_STICKY
: 0) |
1372 (le16_to_cpu(fe
->icbTag
.flags
) &
1373 ~(ICB_FLAG_ALLOC_MASK
| ICB_FLAG_SETUID
|
1374 ICB_FLAG_SETGID
| ICB_FLAG_STICKY
));
1376 fe
->icbTag
.flags
= cpu_to_le16(icbflags
);
1377 fe
->descTag
.descVersion
= cpu_to_le16(2);
1378 fe
->descTag
.tagSerialNum
= cpu_to_le16(UDF_SB_SERIALNUM(inode
->i_sb
));
1379 fe
->descTag
.tagLocation
= cpu_to_le32(UDF_I_LOCATION(inode
).logicalBlockNum
);
1380 crclen
+= UDF_I_LENEATTR(inode
) + UDF_I_LENALLOC(inode
) - sizeof(tag
);
1381 fe
->descTag
.descCRCLength
= cpu_to_le16(crclen
);
1382 fe
->descTag
.descCRC
= cpu_to_le16(udf_crc((char *)fe
+ sizeof(tag
), crclen
, 0));
1384 fe
->descTag
.tagChecksum
= 0;
1385 for (i
=0; i
<16; i
++)
1387 fe
->descTag
.tagChecksum
+= ((Uint8
*)&(fe
->descTag
))[i
];
1389 /* write the data blocks */
1390 mark_buffer_dirty(bh
);
1393 ll_rw_block(WRITE
, 1, &bh
);
1395 if (buffer_req(bh
) && !buffer_uptodate(bh
))
1397 printk("IO error syncing udf inode [%s:%08lx]\n",
1398 bdevname(inode
->i_dev
), inode
->i_ino
);
1402 udf_release_data(bh
);
1413 * This routine replaces iget() and read_inode().
1416 * October 3, 1997 - Andrew E. Mileski
1417 * Written, tested, and released.
1419 * 12/19/98 dgb Added semaphore and changed to be a wrapper of iget
1422 udf_iget(struct super_block
*sb
, lb_addr ino
)
1424 struct inode
*inode
;
1425 unsigned long block
;
1427 block
= udf_get_lb_pblock(sb
, ino
, 0);
1431 inode
= iget(sb
, block
);
1432 /* calls udf_read_inode() ! */
1436 printk(KERN_ERR
"udf: iget() failed\n");
1439 else if (is_bad_inode(inode
))
1444 else if (UDF_I_LOCATION(inode
).logicalBlockNum
== 0xFFFFFFFF &&
1445 UDF_I_LOCATION(inode
).partitionReferenceNum
== 0xFFFF)
1447 memcpy(&UDF_I_LOCATION(inode
), &ino
, sizeof(lb_addr
));
1448 __udf_read_inode(inode
);
1451 if ( ino
.logicalBlockNum
>= UDF_SB_PARTLEN(sb
, ino
.partitionReferenceNum
) )
1453 udf_debug("block=%d, partition=%d out of range\n",
1454 ino
.logicalBlockNum
, ino
.partitionReferenceNum
);
1461 int udf_add_aext(struct inode
*inode
, lb_addr
*bloc
, int *extoffset
,
1462 lb_addr eloc
, Uint32 elen
, struct buffer_head
**bh
, int inc
)
1465 short_ad
*sad
= NULL
;
1466 long_ad
*lad
= NULL
;
1467 struct AllocExtDesc
*aed
;
1472 if (!(*bh
= udf_tread(inode
->i_sb
,
1473 udf_get_lb_pblock(inode
->i_sb
, *bloc
, 0),
1474 inode
->i_sb
->s_blocksize
)))
1476 udf_debug("reading block %d failed!\n",
1477 udf_get_lb_pblock(inode
->i_sb
, *bloc
, 0));
1482 if (UDF_I_ALLOCTYPE(inode
) == ICB_FLAG_AD_SHORT
)
1483 adsize
= sizeof(short_ad
);
1484 else if (UDF_I_ALLOCTYPE(inode
) == ICB_FLAG_AD_LONG
)
1485 adsize
= sizeof(long_ad
);
1489 if (*extoffset
+ (2 * adsize
) > inode
->i_sb
->s_blocksize
)
1492 struct buffer_head
*nbh
;
1494 lb_addr obloc
= *bloc
;
1496 if (!(bloc
->logicalBlockNum
= udf_new_block(inode
,
1497 obloc
.partitionReferenceNum
, obloc
.logicalBlockNum
, &err
)))
1501 if (!(nbh
= udf_tread(inode
->i_sb
, udf_get_lb_pblock(inode
->i_sb
,
1502 *bloc
, 0), inode
->i_sb
->s_blocksize
)))
1506 aed
= (struct AllocExtDesc
*)(nbh
->b_data
);
1507 aed
->previousAllocExtLocation
= cpu_to_le32(obloc
.logicalBlockNum
);
1508 if (*extoffset
+ adsize
> inode
->i_sb
->s_blocksize
)
1510 loffset
= *extoffset
;
1511 aed
->lengthAllocDescs
= cpu_to_le32(adsize
);
1512 sptr
= (*bh
)->b_data
+ *extoffset
- adsize
;
1513 dptr
= nbh
->b_data
+ sizeof(struct AllocExtDesc
);
1514 memcpy(dptr
, sptr
, adsize
);
1515 *extoffset
= sizeof(struct AllocExtDesc
) + adsize
;
1519 loffset
= *extoffset
+ adsize
;
1520 aed
->lengthAllocDescs
= cpu_to_le32(0);
1521 sptr
= (*bh
)->b_data
+ *extoffset
;
1522 *extoffset
= sizeof(struct AllocExtDesc
);
1524 if (memcmp(&UDF_I_LOCATION(inode
), &obloc
, sizeof(lb_addr
)))
1526 aed
= (struct AllocExtDesc
*)(*bh
)->b_data
;
1527 aed
->lengthAllocDescs
=
1528 cpu_to_le32(le32_to_cpu(aed
->lengthAllocDescs
) + adsize
);
1532 UDF_I_LENALLOC(inode
) += adsize
;
1533 mark_inode_dirty(inode
);
1536 udf_new_tag(nbh
->b_data
, TID_ALLOC_EXTENT_DESC
, 2, 1,
1537 bloc
->logicalBlockNum
, sizeof(tag
));
1538 switch (UDF_I_ALLOCTYPE(inode
))
1540 case ICB_FLAG_AD_SHORT
:
1542 sad
= (short_ad
*)sptr
;
1543 sad
->extLength
= cpu_to_le32(
1544 EXTENT_NEXT_EXTENT_ALLOCDECS
<< 30 |
1545 inode
->i_sb
->s_blocksize
);
1546 sad
->extPosition
= cpu_to_le32(bloc
->logicalBlockNum
);
1549 case ICB_FLAG_AD_LONG
:
1551 lad
= (long_ad
*)sptr
;
1552 lad
->extLength
= cpu_to_le32(
1553 EXTENT_NEXT_EXTENT_ALLOCDECS
<< 30 |
1554 inode
->i_sb
->s_blocksize
);
1555 lad
->extLocation
= cpu_to_lelb(*bloc
);
1559 udf_update_tag((*bh
)->b_data
, loffset
);
1560 mark_buffer_dirty(*bh
);
1561 udf_release_data(*bh
);
1565 ret
= udf_write_aext(inode
, *bloc
, extoffset
, eloc
, elen
, bh
, inc
);
1567 if (!memcmp(&UDF_I_LOCATION(inode
), bloc
, sizeof(lb_addr
)))
1569 UDF_I_LENALLOC(inode
) += adsize
;
1570 mark_inode_dirty(inode
);
1574 aed
= (struct AllocExtDesc
*)(*bh
)->b_data
;
1575 aed
->lengthAllocDescs
=
1576 cpu_to_le32(le32_to_cpu(aed
->lengthAllocDescs
) + adsize
);
1577 udf_update_tag((*bh
)->b_data
, *extoffset
+ (inc
? 0 : adsize
));
1578 mark_buffer_dirty(*bh
);
1584 int udf_write_aext(struct inode
*inode
, lb_addr bloc
, int *extoffset
,
1585 lb_addr eloc
, Uint32 elen
, struct buffer_head
**bh
, int inc
)
1588 short_ad
*sad
= NULL
;
1589 long_ad
*lad
= NULL
;
1593 if (!(*bh
= udf_tread(inode
->i_sb
,
1594 udf_get_lb_pblock(inode
->i_sb
, bloc
, 0),
1595 inode
->i_sb
->s_blocksize
)))
1597 udf_debug("reading block %d failed!\n",
1598 udf_get_lb_pblock(inode
->i_sb
, bloc
, 0));
1603 if (UDF_I_ALLOCTYPE(inode
) == ICB_FLAG_AD_SHORT
)
1604 adsize
= sizeof(short_ad
);
1605 else if (UDF_I_ALLOCTYPE(inode
) == ICB_FLAG_AD_LONG
)
1606 adsize
= sizeof(long_ad
);
1610 switch (UDF_I_ALLOCTYPE(inode
))
1612 case ICB_FLAG_AD_SHORT
:
1614 sad
= (short_ad
*)((*bh
)->b_data
+ *extoffset
);
1615 sad
->extLength
= cpu_to_le32(elen
);
1616 sad
->extPosition
= cpu_to_le32(eloc
.logicalBlockNum
);
1619 case ICB_FLAG_AD_LONG
:
1621 lad
= (long_ad
*)((*bh
)->b_data
+ *extoffset
);
1622 lad
->extLength
= cpu_to_le32(elen
);
1623 lad
->extLocation
= cpu_to_lelb(eloc
);
1628 if (memcmp(&UDF_I_LOCATION(inode
), &bloc
, sizeof(lb_addr
)))
1630 struct AllocExtDesc
*aed
= (struct AllocExtDesc
*)(*bh
)->b_data
;
1631 udf_update_tag((*bh
)->b_data
,
1632 le32_to_cpu(aed
->lengthAllocDescs
) + sizeof(struct AllocExtDesc
));
1635 mark_inode_dirty(inode
);
1637 mark_buffer_dirty(*bh
);
1640 *extoffset
+= adsize
;
1641 return (elen
>> 30);
1644 int udf_next_aext(struct inode
*inode
, lb_addr
*bloc
, int *extoffset
,
1645 lb_addr
*eloc
, Uint32
*elen
, struct buffer_head
**bh
, int inc
)
1652 if (!(*bh
= udf_tread(inode
->i_sb
,
1653 udf_get_lb_pblock(inode
->i_sb
, *bloc
, 0),
1654 inode
->i_sb
->s_blocksize
)))
1656 udf_debug("reading block %d failed!\n",
1657 udf_get_lb_pblock(inode
->i_sb
, *bloc
, 0));
1662 if (!memcmp(&UDF_I_LOCATION(inode
), bloc
, sizeof(lb_addr
)))
1664 pos
= udf_file_entry_alloc_offset(inode
);
1665 alen
= UDF_I_LENALLOC(inode
) + pos
;
1669 struct AllocExtDesc
*aed
= (struct AllocExtDesc
*)(*bh
)->b_data
;
1671 pos
= sizeof(struct AllocExtDesc
);
1672 alen
= le32_to_cpu(aed
->lengthAllocDescs
) + pos
;
1678 switch (UDF_I_ALLOCTYPE(inode
))
1680 case ICB_FLAG_AD_SHORT
:
1684 if (!(sad
= udf_get_fileshortad((*bh
)->b_data
, alen
, extoffset
, inc
)))
1687 if ((etype
= le32_to_cpu(sad
->extLength
) >> 30) == EXTENT_NEXT_EXTENT_ALLOCDECS
)
1689 bloc
->logicalBlockNum
= le32_to_cpu(sad
->extPosition
);
1691 udf_release_data(*bh
);
1693 return udf_next_aext(inode
, bloc
, extoffset
, eloc
, elen
, bh
, inc
);
1697 eloc
->logicalBlockNum
= le32_to_cpu(sad
->extPosition
);
1698 eloc
->partitionReferenceNum
= UDF_I_LOCATION(inode
).partitionReferenceNum
;
1699 *elen
= le32_to_cpu(sad
->extLength
) & UDF_EXTENT_LENGTH_MASK
;
1703 case ICB_FLAG_AD_LONG
:
1707 if (!(lad
= udf_get_filelongad((*bh
)->b_data
, alen
, extoffset
, inc
)))
1710 if ((etype
= le32_to_cpu(lad
->extLength
) >> 30) == EXTENT_NEXT_EXTENT_ALLOCDECS
)
1712 *bloc
= lelb_to_cpu(lad
->extLocation
);
1714 udf_release_data(*bh
);
1716 return udf_next_aext(inode
, bloc
, extoffset
, eloc
, elen
, bh
, inc
);
1720 *eloc
= lelb_to_cpu(lad
->extLocation
);
1721 *elen
= le32_to_cpu(lad
->extLength
) & UDF_EXTENT_LENGTH_MASK
;
1725 case ICB_FLAG_AD_IN_ICB
:
1727 if (UDF_I_LENALLOC(inode
) == 0)
1729 etype
= EXTENT_RECORDED_ALLOCATED
;
1730 *eloc
= UDF_I_LOCATION(inode
);
1731 *elen
= UDF_I_LENALLOC(inode
);
1736 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode
));
1743 udf_debug("Empty Extent, inode=%ld, alloctype=%d, eloc=%d, elen=%d, etype=%d, extoffset=%d\n",
1744 inode
->i_ino
, UDF_I_ALLOCTYPE(inode
), eloc
->logicalBlockNum
, *elen
, etype
, *extoffset
);
1745 if (UDF_I_ALLOCTYPE(inode
) == ICB_FLAG_AD_SHORT
)
1746 *extoffset
-= sizeof(short_ad
);
1747 else if (UDF_I_ALLOCTYPE(inode
) == ICB_FLAG_AD_LONG
)
1748 *extoffset
-= sizeof(long_ad
);
1752 int udf_current_aext(struct inode
*inode
, lb_addr
*bloc
, int *extoffset
,
1753 lb_addr
*eloc
, Uint32
*elen
, struct buffer_head
**bh
, int inc
)
1760 if (!(*bh
= udf_tread(inode
->i_sb
,
1761 udf_get_lb_pblock(inode
->i_sb
, *bloc
, 0),
1762 inode
->i_sb
->s_blocksize
)))
1764 udf_debug("reading block %d failed!\n",
1765 udf_get_lb_pblock(inode
->i_sb
, *bloc
, 0));
1770 if (!memcmp(&UDF_I_LOCATION(inode
), bloc
, sizeof(lb_addr
)))
1772 if (!(UDF_I_EXTENDED_FE(inode
)))
1773 pos
= sizeof(struct FileEntry
) + UDF_I_LENEATTR(inode
);
1775 pos
= sizeof(struct ExtendedFileEntry
) + UDF_I_LENEATTR(inode
);
1776 alen
= UDF_I_LENALLOC(inode
) + pos
;
1780 struct AllocExtDesc
*aed
= (struct AllocExtDesc
*)(*bh
)->b_data
;
1782 pos
= sizeof(struct AllocExtDesc
);
1783 alen
= le32_to_cpu(aed
->lengthAllocDescs
) + pos
;
1789 switch (UDF_I_ALLOCTYPE(inode
))
1791 case ICB_FLAG_AD_SHORT
:
1795 if (!(sad
= udf_get_fileshortad((*bh
)->b_data
, alen
, extoffset
, inc
)))
1798 etype
= le32_to_cpu(sad
->extLength
) >> 30;
1799 eloc
->logicalBlockNum
= le32_to_cpu(sad
->extPosition
);
1800 eloc
->partitionReferenceNum
= UDF_I_LOCATION(inode
).partitionReferenceNum
;
1801 *elen
= le32_to_cpu(sad
->extLength
) & UDF_EXTENT_LENGTH_MASK
;
1804 case ICB_FLAG_AD_LONG
:
1808 if (!(lad
= udf_get_filelongad((*bh
)->b_data
, alen
, extoffset
, inc
)))
1811 etype
= le32_to_cpu(lad
->extLength
) >> 30;
1812 *eloc
= lelb_to_cpu(lad
->extLocation
);
1813 *elen
= le32_to_cpu(lad
->extLength
) & UDF_EXTENT_LENGTH_MASK
;
1818 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode
));
1825 udf_debug("Empty Extent!\n");
1826 if (UDF_I_ALLOCTYPE(inode
) == ICB_FLAG_AD_SHORT
)
1827 *extoffset
-= sizeof(short_ad
);
1828 else if (UDF_I_ALLOCTYPE(inode
) == ICB_FLAG_AD_LONG
)
1829 *extoffset
-= sizeof(long_ad
);
1833 int udf_insert_aext(struct inode
*inode
, lb_addr bloc
, int extoffset
,
1834 lb_addr neloc
, Uint32 nelen
, struct buffer_head
*bh
)
1842 if (!(bh
= udf_tread(inode
->i_sb
,
1843 udf_get_lb_pblock(inode
->i_sb
, bloc
, 0),
1844 inode
->i_sb
->s_blocksize
)))
1846 udf_debug("reading block %d failed!\n",
1847 udf_get_lb_pblock(inode
->i_sb
, bloc
, 0));
1852 atomic_inc(&bh
->b_count
);
1854 while ((type
= udf_next_aext(inode
, &bloc
, &extoffset
, &oeloc
, &oelen
, &bh
, 0)) != -1)
1856 udf_write_aext(inode
, bloc
, &extoffset
, neloc
, nelen
, &bh
, 1);
1859 nelen
= (type
<< 30) | oelen
;
1861 udf_add_aext(inode
, &bloc
, &extoffset
, neloc
, nelen
, &bh
, 1);
1862 udf_release_data(bh
);
1863 return (nelen
>> 30);
1866 int udf_delete_aext(struct inode
*inode
, lb_addr nbloc
, int nextoffset
,
1867 lb_addr eloc
, Uint32 elen
, struct buffer_head
*nbh
)
1869 struct buffer_head
*obh
;
1871 int oextoffset
, adsize
;
1873 struct AllocExtDesc
*aed
;
1877 if (!(nbh
= udf_tread(inode
->i_sb
,
1878 udf_get_lb_pblock(inode
->i_sb
, nbloc
, 0),
1879 inode
->i_sb
->s_blocksize
)))
1881 udf_debug("reading block %d failed!\n",
1882 udf_get_lb_pblock(inode
->i_sb
, nbloc
, 0));
1887 atomic_inc(&nbh
->b_count
);
1888 atomic_inc(&nbh
->b_count
);
1890 if (UDF_I_ALLOCTYPE(inode
) == ICB_FLAG_AD_SHORT
)
1891 adsize
= sizeof(short_ad
);
1892 else if (UDF_I_ALLOCTYPE(inode
) == ICB_FLAG_AD_LONG
)
1893 adsize
= sizeof(long_ad
);
1899 oextoffset
= nextoffset
;
1901 if (udf_next_aext(inode
, &nbloc
, &nextoffset
, &eloc
, &elen
, &nbh
, 1) == -1)
1904 while ((type
= udf_next_aext(inode
, &nbloc
, &nextoffset
, &eloc
, &elen
, &nbh
, 1)) != -1)
1906 udf_write_aext(inode
, obloc
, &oextoffset
, eloc
, (type
<< 30) | elen
, &obh
, 1);
1907 if (memcmp(&nbloc
, &obloc
, sizeof(lb_addr
)))
1910 udf_release_data(obh
);
1911 atomic_inc(&nbh
->b_count
);
1913 oextoffset
= nextoffset
- adsize
;
1916 memset(&eloc
, 0x00, sizeof(lb_addr
));
1919 if (memcmp(&nbloc
, &obloc
, sizeof(lb_addr
)))
1921 udf_free_blocks(inode
, nbloc
, 0, 1);
1922 udf_write_aext(inode
, obloc
, &oextoffset
, eloc
, elen
, &obh
, 1);
1923 udf_write_aext(inode
, obloc
, &oextoffset
, eloc
, elen
, &obh
, 1);
1924 if (!memcmp(&UDF_I_LOCATION(inode
), &obloc
, sizeof(lb_addr
)))
1926 UDF_I_LENALLOC(inode
) -= (adsize
* 2);
1927 mark_inode_dirty(inode
);
1931 aed
= (struct AllocExtDesc
*)(obh
)->b_data
;
1932 aed
->lengthAllocDescs
=
1933 cpu_to_le32(le32_to_cpu(aed
->lengthAllocDescs
) - (2*adsize
));
1934 udf_update_tag((obh
)->b_data
, oextoffset
- (2*adsize
));
1935 mark_buffer_dirty(obh
);
1940 udf_write_aext(inode
, obloc
, &oextoffset
, eloc
, elen
, &obh
, 1);
1941 if (!memcmp(&UDF_I_LOCATION(inode
), &obloc
, sizeof(lb_addr
)))
1943 UDF_I_LENALLOC(inode
) -= adsize
;
1944 mark_inode_dirty(inode
);
1948 aed
= (struct AllocExtDesc
*)(obh
)->b_data
;
1949 aed
->lengthAllocDescs
=
1950 cpu_to_le32(le32_to_cpu(aed
->lengthAllocDescs
) - adsize
);
1951 udf_update_tag((obh
)->b_data
, oextoffset
- adsize
);
1952 mark_buffer_dirty(obh
);
1956 udf_release_data(nbh
);
1957 udf_release_data(obh
);
1958 return (elen
>> 30);
1961 int inode_bmap(struct inode
*inode
, int block
, lb_addr
*bloc
, Uint32
*extoffset
,
1962 lb_addr
*eloc
, Uint32
*elen
, Uint32
*offset
, struct buffer_head
**bh
)
1964 int etype
, lbcount
= 0;
1968 printk(KERN_ERR
"udf: inode_bmap: block < 0\n");
1973 printk(KERN_ERR
"udf: inode_bmap: NULL inode\n");
1979 *bloc
= UDF_I_LOCATION(inode
);
1983 if ((etype
= udf_next_aext(inode
, bloc
, extoffset
, eloc
, elen
, bh
, 1)) == -1)
1985 *offset
= block
- lbcount
;
1988 lbcount
+= ((*elen
+ inode
->i_sb
->s_blocksize
- 1) >>
1989 inode
->i_sb
->s_blocksize_bits
);
1990 } while (lbcount
<= block
);
1992 *offset
= block
+ ((*elen
+ inode
->i_sb
->s_blocksize
- 1) >>
1993 inode
->i_sb
->s_blocksize_bits
) - lbcount
;
1998 long udf_locked_block_map(struct inode
*inode
, long block
)
2001 Uint32 offset
, extoffset
, elen
;
2002 struct buffer_head
*bh
= NULL
;
2005 if (inode_bmap(inode
, block
, &bloc
, &extoffset
, &eloc
, &elen
, &offset
, &bh
) == EXTENT_RECORDED_ALLOCATED
)
2006 ret
= udf_get_lb_pblock(inode
->i_sb
, eloc
, offset
);
2011 udf_release_data(bh
);
2013 if (UDF_QUERY_FLAG(inode
->i_sb
, UDF_FLAG_VARCONV
))
2014 return udf_fixed_to_variable(ret
);
2019 long udf_block_map(struct inode
*inode
, long block
)
2024 ret
= udf_locked_block_map(inode
, block
);