2 * linux/fs/hfsplus/extents.c
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
8 * Handling of Extents both in catalog and extents overflow trees
11 #include <linux/errno.h>
13 #include <linux/pagemap.h>
15 #include "hfsplus_fs.h"
16 #include "hfsplus_raw.h"
18 /* Compare two extents keys, returns 0 on same, pos/neg for difference */
19 int hfsplus_ext_cmp_key(const hfsplus_btree_key
*k1
,
20 const hfsplus_btree_key
*k2
)
28 return be32_to_cpu(k1id
) < be32_to_cpu(k2id
) ? -1 : 1;
30 if (k1
->ext
.fork_type
!= k2
->ext
.fork_type
)
31 return k1
->ext
.fork_type
< k2
->ext
.fork_type
? -1 : 1;
33 k1s
= k1
->ext
.start_block
;
34 k2s
= k2
->ext
.start_block
;
37 return be32_to_cpu(k1s
) < be32_to_cpu(k2s
) ? -1 : 1;
40 static void hfsplus_ext_build_key(hfsplus_btree_key
*key
, u32 cnid
,
43 key
->key_len
= cpu_to_be16(HFSPLUS_EXT_KEYLEN
- 2);
44 key
->ext
.cnid
= cpu_to_be32(cnid
);
45 key
->ext
.start_block
= cpu_to_be32(block
);
46 key
->ext
.fork_type
= type
;
50 static u32
hfsplus_ext_find_block(struct hfsplus_extent
*ext
, u32 off
)
55 for (i
= 0; i
< 8; ext
++, i
++) {
56 count
= be32_to_cpu(ext
->block_count
);
58 return be32_to_cpu(ext
->start_block
) + off
;
65 static int hfsplus_ext_block_count(struct hfsplus_extent
*ext
)
70 for (i
= 0; i
< 8; ext
++, i
++)
71 count
+= be32_to_cpu(ext
->block_count
);
75 static u32
hfsplus_ext_lastblock(struct hfsplus_extent
*ext
)
80 for (i
= 0; i
< 7; ext
--, i
++)
83 return be32_to_cpu(ext
->start_block
) + be32_to_cpu(ext
->block_count
);
86 static void __hfsplus_ext_write_extent(struct inode
*inode
,
87 struct hfs_find_data
*fd
)
89 struct hfsplus_inode_info
*hip
= HFSPLUS_I(inode
);
92 WARN_ON(!mutex_is_locked(&hip
->extents_lock
));
94 hfsplus_ext_build_key(fd
->search_key
, inode
->i_ino
, hip
->cached_start
,
95 HFSPLUS_IS_RSRC(inode
) ?
96 HFSPLUS_TYPE_RSRC
: HFSPLUS_TYPE_DATA
);
98 res
= hfs_brec_find(fd
);
99 if (hip
->extent_state
& HFSPLUS_EXT_NEW
) {
102 hfs_brec_insert(fd
, hip
->cached_extents
,
103 sizeof(hfsplus_extent_rec
));
104 hip
->extent_state
&= ~(HFSPLUS_EXT_DIRTY
| HFSPLUS_EXT_NEW
);
108 hfs_bnode_write(fd
->bnode
, hip
->cached_extents
,
109 fd
->entryoffset
, fd
->entrylength
);
110 hip
->extent_state
&= ~HFSPLUS_EXT_DIRTY
;
114 * We can't just use hfsplus_mark_inode_dirty here, because we
115 * also get called from hfsplus_write_inode, which should not
116 * redirty the inode. Instead the callers have to be careful
117 * to explicily mark the inode dirty, too.
119 set_bit(HFSPLUS_I_EXT_DIRTY
, &hip
->flags
);
122 static void hfsplus_ext_write_extent_locked(struct inode
*inode
)
124 if (HFSPLUS_I(inode
)->extent_state
& HFSPLUS_EXT_DIRTY
) {
125 struct hfs_find_data fd
;
127 hfs_find_init(HFSPLUS_SB(inode
->i_sb
)->ext_tree
, &fd
);
128 __hfsplus_ext_write_extent(inode
, &fd
);
133 void hfsplus_ext_write_extent(struct inode
*inode
)
135 mutex_lock(&HFSPLUS_I(inode
)->extents_lock
);
136 hfsplus_ext_write_extent_locked(inode
);
137 mutex_unlock(&HFSPLUS_I(inode
)->extents_lock
);
140 static inline int __hfsplus_ext_read_extent(struct hfs_find_data
*fd
,
141 struct hfsplus_extent
*extent
,
142 u32 cnid
, u32 block
, u8 type
)
146 hfsplus_ext_build_key(fd
->search_key
, cnid
, block
, type
);
147 fd
->key
->ext
.cnid
= 0;
148 res
= hfs_brec_find(fd
);
149 if (res
&& res
!= -ENOENT
)
151 if (fd
->key
->ext
.cnid
!= fd
->search_key
->ext
.cnid
||
152 fd
->key
->ext
.fork_type
!= fd
->search_key
->ext
.fork_type
)
154 if (fd
->entrylength
!= sizeof(hfsplus_extent_rec
))
156 hfs_bnode_read(fd
->bnode
, extent
, fd
->entryoffset
,
157 sizeof(hfsplus_extent_rec
));
161 static inline int __hfsplus_ext_cache_extent(struct hfs_find_data
*fd
,
162 struct inode
*inode
, u32 block
)
164 struct hfsplus_inode_info
*hip
= HFSPLUS_I(inode
);
167 WARN_ON(!mutex_is_locked(&hip
->extents_lock
));
169 if (hip
->extent_state
& HFSPLUS_EXT_DIRTY
)
170 __hfsplus_ext_write_extent(inode
, fd
);
172 res
= __hfsplus_ext_read_extent(fd
, hip
->cached_extents
, inode
->i_ino
,
173 block
, HFSPLUS_IS_RSRC(inode
) ?
177 hip
->cached_start
= be32_to_cpu(fd
->key
->ext
.start_block
);
179 hfsplus_ext_block_count(hip
->cached_extents
);
181 hip
->cached_start
= hip
->cached_blocks
= 0;
182 hip
->extent_state
&= ~(HFSPLUS_EXT_DIRTY
| HFSPLUS_EXT_NEW
);
187 static int hfsplus_ext_read_extent(struct inode
*inode
, u32 block
)
189 struct hfsplus_inode_info
*hip
= HFSPLUS_I(inode
);
190 struct hfs_find_data fd
;
193 if (block
>= hip
->cached_start
&&
194 block
< hip
->cached_start
+ hip
->cached_blocks
)
197 hfs_find_init(HFSPLUS_SB(inode
->i_sb
)->ext_tree
, &fd
);
198 res
= __hfsplus_ext_cache_extent(&fd
, inode
, block
);
203 /* Get a block at iblock for inode, possibly allocating if create */
204 int hfsplus_get_block(struct inode
*inode
, sector_t iblock
,
205 struct buffer_head
*bh_result
, int create
)
207 struct super_block
*sb
= inode
->i_sb
;
208 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(sb
);
209 struct hfsplus_inode_info
*hip
= HFSPLUS_I(inode
);
211 u32 ablock
, dblock
, mask
;
215 /* Convert inode block to disk allocation block */
216 shift
= sbi
->alloc_blksz_shift
- sb
->s_blocksize_bits
;
217 ablock
= iblock
>> sbi
->fs_shift
;
219 if (iblock
>= hip
->fs_blocks
) {
220 if (iblock
> hip
->fs_blocks
|| !create
)
222 if (ablock
>= hip
->alloc_blocks
) {
223 res
= hfsplus_file_extend(inode
);
230 if (ablock
< hip
->first_blocks
) {
231 dblock
= hfsplus_ext_find_block(hip
->first_extents
, ablock
);
235 if (inode
->i_ino
== HFSPLUS_EXT_CNID
)
238 mutex_lock(&hip
->extents_lock
);
241 * hfsplus_ext_read_extent will write out a cached extent into
242 * the extents btree. In that case we may have to mark the inode
243 * dirty even for a pure read of an extent here.
245 was_dirty
= (hip
->extent_state
& HFSPLUS_EXT_DIRTY
);
246 res
= hfsplus_ext_read_extent(inode
, ablock
);
248 mutex_unlock(&hip
->extents_lock
);
251 dblock
= hfsplus_ext_find_block(hip
->cached_extents
,
252 ablock
- hip
->cached_start
);
253 mutex_unlock(&hip
->extents_lock
);
256 dprint(DBG_EXTENT
, "get_block(%lu): %llu - %u\n",
257 inode
->i_ino
, (long long)iblock
, dblock
);
258 mask
= (1 << sbi
->fs_shift
) - 1;
259 map_bh(bh_result
, sb
,
260 (dblock
<< sbi
->fs_shift
) + sbi
->blockoffset
+
263 set_buffer_new(bh_result
);
264 hip
->phys_size
+= sb
->s_blocksize
;
266 inode_add_bytes(inode
, sb
->s_blocksize
);
268 if (create
|| was_dirty
)
269 mark_inode_dirty(inode
);
273 static void hfsplus_dump_extent(struct hfsplus_extent
*extent
)
277 dprint(DBG_EXTENT
, " ");
278 for (i
= 0; i
< 8; i
++)
279 dprint(DBG_EXTENT
, " %u:%u", be32_to_cpu(extent
[i
].start_block
),
280 be32_to_cpu(extent
[i
].block_count
));
281 dprint(DBG_EXTENT
, "\n");
284 static int hfsplus_add_extent(struct hfsplus_extent
*extent
, u32 offset
,
285 u32 alloc_block
, u32 block_count
)
290 hfsplus_dump_extent(extent
);
291 for (i
= 0; i
< 8; extent
++, i
++) {
292 count
= be32_to_cpu(extent
->block_count
);
293 if (offset
== count
) {
294 start
= be32_to_cpu(extent
->start_block
);
295 if (alloc_block
!= start
+ count
) {
299 extent
->start_block
= cpu_to_be32(alloc_block
);
301 block_count
+= count
;
302 extent
->block_count
= cpu_to_be32(block_count
);
304 } else if (offset
< count
)
312 static int hfsplus_free_extents(struct super_block
*sb
,
313 struct hfsplus_extent
*extent
,
314 u32 offset
, u32 block_nr
)
319 hfsplus_dump_extent(extent
);
320 for (i
= 0; i
< 8; extent
++, i
++) {
321 count
= be32_to_cpu(extent
->block_count
);
324 else if (offset
< count
)
332 start
= be32_to_cpu(extent
->start_block
);
333 if (count
<= block_nr
) {
334 hfsplus_block_free(sb
, start
, count
);
335 extent
->block_count
= 0;
336 extent
->start_block
= 0;
340 hfsplus_block_free(sb
, start
+ count
, block_nr
);
341 extent
->block_count
= cpu_to_be32(count
);
348 count
= be32_to_cpu(extent
->block_count
);
352 int hfsplus_free_fork(struct super_block
*sb
, u32 cnid
,
353 struct hfsplus_fork_raw
*fork
, int type
)
355 struct hfs_find_data fd
;
356 hfsplus_extent_rec ext_entry
;
357 u32 total_blocks
, blocks
, start
;
360 total_blocks
= be32_to_cpu(fork
->total_blocks
);
365 for (i
= 0; i
< 8; i
++)
366 blocks
+= be32_to_cpu(fork
->extents
[i
].block_count
);
368 res
= hfsplus_free_extents(sb
, fork
->extents
, blocks
, blocks
);
371 if (total_blocks
== blocks
)
374 hfs_find_init(HFSPLUS_SB(sb
)->ext_tree
, &fd
);
376 res
= __hfsplus_ext_read_extent(&fd
, ext_entry
, cnid
,
380 start
= be32_to_cpu(fd
.key
->ext
.start_block
);
381 hfsplus_free_extents(sb
, ext_entry
,
382 total_blocks
- start
,
384 hfs_brec_remove(&fd
);
385 total_blocks
= start
;
386 } while (total_blocks
> blocks
);
392 int hfsplus_file_extend(struct inode
*inode
)
394 struct super_block
*sb
= inode
->i_sb
;
395 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(sb
);
396 struct hfsplus_inode_info
*hip
= HFSPLUS_I(inode
);
397 u32 start
, len
, goal
;
400 if (sbi
->alloc_file
->i_size
* 8 <
401 sbi
->total_blocks
- sbi
->free_blocks
+ 8) {
402 /* extend alloc file */
403 printk(KERN_ERR
"hfs: extend alloc file! "
405 sbi
->alloc_file
->i_size
* 8,
406 sbi
->total_blocks
, sbi
->free_blocks
);
410 mutex_lock(&hip
->extents_lock
);
411 if (hip
->alloc_blocks
== hip
->first_blocks
)
412 goal
= hfsplus_ext_lastblock(hip
->first_extents
);
414 res
= hfsplus_ext_read_extent(inode
, hip
->alloc_blocks
);
417 goal
= hfsplus_ext_lastblock(hip
->cached_extents
);
420 len
= hip
->clump_blocks
;
421 start
= hfsplus_block_allocate(sb
, sbi
->total_blocks
, goal
, &len
);
422 if (start
>= sbi
->total_blocks
) {
423 start
= hfsplus_block_allocate(sb
, goal
, 0, &len
);
430 dprint(DBG_EXTENT
, "extend %lu: %u,%u\n", inode
->i_ino
, start
, len
);
432 if (hip
->alloc_blocks
<= hip
->first_blocks
) {
433 if (!hip
->first_blocks
) {
434 dprint(DBG_EXTENT
, "first extents\n");
436 hip
->first_extents
[0].start_block
= cpu_to_be32(start
);
437 hip
->first_extents
[0].block_count
= cpu_to_be32(len
);
440 /* try to append to extents in inode */
441 res
= hfsplus_add_extent(hip
->first_extents
,
448 hfsplus_dump_extent(hip
->first_extents
);
449 hip
->first_blocks
+= len
;
452 res
= hfsplus_add_extent(hip
->cached_extents
,
453 hip
->alloc_blocks
- hip
->cached_start
,
456 hfsplus_dump_extent(hip
->cached_extents
);
457 hip
->extent_state
|= HFSPLUS_EXT_DIRTY
;
458 hip
->cached_blocks
+= len
;
459 } else if (res
== -ENOSPC
)
463 mutex_unlock(&hip
->extents_lock
);
465 hip
->alloc_blocks
+= len
;
466 hfsplus_mark_inode_dirty(inode
, HFSPLUS_I_ALLOC_DIRTY
);
471 dprint(DBG_EXTENT
, "insert new extent\n");
472 hfsplus_ext_write_extent_locked(inode
);
474 memset(hip
->cached_extents
, 0, sizeof(hfsplus_extent_rec
));
475 hip
->cached_extents
[0].start_block
= cpu_to_be32(start
);
476 hip
->cached_extents
[0].block_count
= cpu_to_be32(len
);
477 hfsplus_dump_extent(hip
->cached_extents
);
478 hip
->extent_state
|= HFSPLUS_EXT_DIRTY
| HFSPLUS_EXT_NEW
;
479 hip
->cached_start
= hip
->alloc_blocks
;
480 hip
->cached_blocks
= len
;
486 void hfsplus_file_truncate(struct inode
*inode
)
488 struct super_block
*sb
= inode
->i_sb
;
489 struct hfsplus_inode_info
*hip
= HFSPLUS_I(inode
);
490 struct hfs_find_data fd
;
491 u32 alloc_cnt
, blk_cnt
, start
;
494 dprint(DBG_INODE
, "truncate: %lu, %llu -> %llu\n",
495 inode
->i_ino
, (long long)hip
->phys_size
,
498 if (inode
->i_size
> hip
->phys_size
) {
499 struct address_space
*mapping
= inode
->i_mapping
;
502 u32 size
= inode
->i_size
;
505 res
= pagecache_write_begin(NULL
, mapping
, size
, 0,
506 AOP_FLAG_UNINTERRUPTIBLE
,
510 res
= pagecache_write_end(NULL
, mapping
, size
,
514 mark_inode_dirty(inode
);
516 } else if (inode
->i_size
== hip
->phys_size
)
519 blk_cnt
= (inode
->i_size
+ HFSPLUS_SB(sb
)->alloc_blksz
- 1) >>
520 HFSPLUS_SB(sb
)->alloc_blksz_shift
;
521 alloc_cnt
= hip
->alloc_blocks
;
522 if (blk_cnt
== alloc_cnt
)
525 mutex_lock(&hip
->extents_lock
);
526 hfs_find_init(HFSPLUS_SB(sb
)->ext_tree
, &fd
);
528 if (alloc_cnt
== hip
->first_blocks
) {
529 hfsplus_free_extents(sb
, hip
->first_extents
,
530 alloc_cnt
, alloc_cnt
- blk_cnt
);
531 hfsplus_dump_extent(hip
->first_extents
);
532 hip
->first_blocks
= blk_cnt
;
535 res
= __hfsplus_ext_cache_extent(&fd
, inode
, alloc_cnt
);
538 start
= hip
->cached_start
;
539 hfsplus_free_extents(sb
, hip
->cached_extents
,
540 alloc_cnt
- start
, alloc_cnt
- blk_cnt
);
541 hfsplus_dump_extent(hip
->cached_extents
);
542 if (blk_cnt
> start
) {
543 hip
->extent_state
|= HFSPLUS_EXT_DIRTY
;
547 hip
->cached_start
= hip
->cached_blocks
= 0;
548 hip
->extent_state
&= ~(HFSPLUS_EXT_DIRTY
| HFSPLUS_EXT_NEW
);
549 hfs_brec_remove(&fd
);
552 mutex_unlock(&hip
->extents_lock
);
554 hip
->alloc_blocks
= blk_cnt
;
556 hip
->phys_size
= inode
->i_size
;
557 hip
->fs_blocks
= (inode
->i_size
+ sb
->s_blocksize
- 1) >>
558 sb
->s_blocksize_bits
;
559 inode_set_bytes(inode
, hip
->fs_blocks
<< sb
->s_blocksize_bits
);
560 hfsplus_mark_inode_dirty(inode
, HFSPLUS_I_ALLOC_DIRTY
);