2 * linux/fs/hfsplus/inode.c
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
8 * Inode handling routines
13 #include <linux/pagemap.h>
14 #include <linux/mpage.h>
15 #include <linux/sched.h>
17 #include "hfsplus_fs.h"
18 #include "hfsplus_raw.h"
20 static int hfsplus_readpage(struct file
*file
, struct page
*page
)
22 return block_read_full_page(page
, hfsplus_get_block
);
25 static int hfsplus_writepage(struct page
*page
, struct writeback_control
*wbc
)
27 return block_write_full_page(page
, hfsplus_get_block
, wbc
);
30 static int hfsplus_write_begin(struct file
*file
, struct address_space
*mapping
,
31 loff_t pos
, unsigned len
, unsigned flags
,
32 struct page
**pagep
, void **fsdata
)
37 ret
= cont_write_begin(file
, mapping
, pos
, len
, flags
, pagep
, fsdata
,
39 &HFSPLUS_I(mapping
->host
)->phys_size
);
41 loff_t isize
= mapping
->host
->i_size
;
42 if (pos
+ len
> isize
)
43 vmtruncate(mapping
->host
, isize
);
49 static sector_t
hfsplus_bmap(struct address_space
*mapping
, sector_t block
)
51 return generic_block_bmap(mapping
, block
, hfsplus_get_block
);
54 static int hfsplus_releasepage(struct page
*page
, gfp_t mask
)
56 struct inode
*inode
= page
->mapping
->host
;
57 struct super_block
*sb
= inode
->i_sb
;
58 struct hfs_btree
*tree
;
59 struct hfs_bnode
*node
;
63 switch (inode
->i_ino
) {
64 case HFSPLUS_EXT_CNID
:
65 tree
= HFSPLUS_SB(sb
)->ext_tree
;
67 case HFSPLUS_CAT_CNID
:
68 tree
= HFSPLUS_SB(sb
)->cat_tree
;
70 case HFSPLUS_ATTR_CNID
:
71 tree
= HFSPLUS_SB(sb
)->attr_tree
;
79 if (tree
->node_size
>= PAGE_CACHE_SIZE
) {
80 nidx
= page
->index
>> (tree
->node_size_shift
- PAGE_CACHE_SHIFT
);
81 spin_lock(&tree
->hash_lock
);
82 node
= hfs_bnode_findhash(tree
, nidx
);
85 else if (atomic_read(&node
->refcnt
))
88 hfs_bnode_unhash(node
);
91 spin_unlock(&tree
->hash_lock
);
93 nidx
= page
->index
<< (PAGE_CACHE_SHIFT
- tree
->node_size_shift
);
94 i
= 1 << (PAGE_CACHE_SHIFT
- tree
->node_size_shift
);
95 spin_lock(&tree
->hash_lock
);
97 node
= hfs_bnode_findhash(tree
, nidx
++);
100 if (atomic_read(&node
->refcnt
)) {
104 hfs_bnode_unhash(node
);
105 hfs_bnode_free(node
);
106 } while (--i
&& nidx
< tree
->node_count
);
107 spin_unlock(&tree
->hash_lock
);
109 return res
? try_to_free_buffers(page
) : 0;
112 static ssize_t
hfsplus_direct_IO(int rw
, struct kiocb
*iocb
,
113 const struct iovec
*iov
, loff_t offset
, unsigned long nr_segs
)
115 struct file
*file
= iocb
->ki_filp
;
116 struct inode
*inode
= file
->f_path
.dentry
->d_inode
->i_mapping
->host
;
119 ret
= blockdev_direct_IO(rw
, iocb
, inode
, inode
->i_sb
->s_bdev
, iov
,
120 offset
, nr_segs
, hfsplus_get_block
, NULL
);
123 * In case of error extending write may have instantiated a few
124 * blocks outside i_size. Trim these off again.
126 if (unlikely((rw
& WRITE
) && ret
< 0)) {
127 loff_t isize
= i_size_read(inode
);
128 loff_t end
= offset
+ iov_length(iov
, nr_segs
);
131 vmtruncate(inode
, isize
);
137 static int hfsplus_writepages(struct address_space
*mapping
,
138 struct writeback_control
*wbc
)
140 return mpage_writepages(mapping
, wbc
, hfsplus_get_block
);
143 const struct address_space_operations hfsplus_btree_aops
= {
144 .readpage
= hfsplus_readpage
,
145 .writepage
= hfsplus_writepage
,
146 .sync_page
= block_sync_page
,
147 .write_begin
= hfsplus_write_begin
,
148 .write_end
= generic_write_end
,
149 .bmap
= hfsplus_bmap
,
150 .releasepage
= hfsplus_releasepage
,
153 const struct address_space_operations hfsplus_aops
= {
154 .readpage
= hfsplus_readpage
,
155 .writepage
= hfsplus_writepage
,
156 .sync_page
= block_sync_page
,
157 .write_begin
= hfsplus_write_begin
,
158 .write_end
= generic_write_end
,
159 .bmap
= hfsplus_bmap
,
160 .direct_IO
= hfsplus_direct_IO
,
161 .writepages
= hfsplus_writepages
,
164 const struct dentry_operations hfsplus_dentry_operations
= {
165 .d_hash
= hfsplus_hash_dentry
,
166 .d_compare
= hfsplus_compare_dentry
,
169 static struct dentry
*hfsplus_file_lookup(struct inode
*dir
, struct dentry
*dentry
,
170 struct nameidata
*nd
)
172 struct hfs_find_data fd
;
173 struct super_block
*sb
= dir
->i_sb
;
174 struct inode
*inode
= NULL
;
175 struct hfsplus_inode_info
*hip
;
178 if (HFSPLUS_IS_RSRC(dir
) || strcmp(dentry
->d_name
.name
, "rsrc"))
181 inode
= HFSPLUS_I(dir
)->rsrc_inode
;
185 inode
= new_inode(sb
);
187 return ERR_PTR(-ENOMEM
);
189 hip
= HFSPLUS_I(inode
);
190 inode
->i_ino
= dir
->i_ino
;
191 INIT_LIST_HEAD(&hip
->open_dir_list
);
192 mutex_init(&hip
->extents_lock
);
193 hip
->flags
= HFSPLUS_FLG_RSRC
;
195 hfs_find_init(HFSPLUS_SB(sb
)->cat_tree
, &fd
);
196 err
= hfsplus_find_cat(sb
, dir
->i_ino
, &fd
);
198 err
= hfsplus_cat_read_inode(inode
, &fd
);
204 hip
->rsrc_inode
= dir
;
205 HFSPLUS_I(dir
)->rsrc_inode
= inode
;
209 * __mark_inode_dirty expects inodes to be hashed. Since we don't
210 * want resource fork inodes in the regular inode space, we make them
211 * appear hashed, but do not put on any lists. hlist_del()
212 * will work fine and require no locking.
214 hlist_add_fake(&inode
->i_hash
);
216 mark_inode_dirty(inode
);
218 d_add(dentry
, inode
);
222 static void hfsplus_get_perms(struct inode
*inode
, struct hfsplus_perm
*perms
, int dir
)
224 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(inode
->i_sb
);
227 mode
= be16_to_cpu(perms
->mode
);
229 inode
->i_uid
= be32_to_cpu(perms
->owner
);
230 if (!inode
->i_uid
&& !mode
)
231 inode
->i_uid
= sbi
->uid
;
233 inode
->i_gid
= be32_to_cpu(perms
->group
);
234 if (!inode
->i_gid
&& !mode
)
235 inode
->i_gid
= sbi
->gid
;
238 mode
= mode
? (mode
& S_IALLUGO
) : (S_IRWXUGO
& ~(sbi
->umask
));
241 mode
= S_IFREG
| ((S_IRUGO
|S_IWUGO
) & ~(sbi
->umask
));
242 inode
->i_mode
= mode
;
244 HFSPLUS_I(inode
)->userflags
= perms
->userflags
;
245 if (perms
->rootflags
& HFSPLUS_FLG_IMMUTABLE
)
246 inode
->i_flags
|= S_IMMUTABLE
;
248 inode
->i_flags
&= ~S_IMMUTABLE
;
249 if (perms
->rootflags
& HFSPLUS_FLG_APPEND
)
250 inode
->i_flags
|= S_APPEND
;
252 inode
->i_flags
&= ~S_APPEND
;
255 static int hfsplus_file_open(struct inode
*inode
, struct file
*file
)
257 if (HFSPLUS_IS_RSRC(inode
))
258 inode
= HFSPLUS_I(inode
)->rsrc_inode
;
259 if (!(file
->f_flags
& O_LARGEFILE
) && i_size_read(inode
) > MAX_NON_LFS
)
261 atomic_inc(&HFSPLUS_I(inode
)->opencnt
);
265 static int hfsplus_file_release(struct inode
*inode
, struct file
*file
)
267 struct super_block
*sb
= inode
->i_sb
;
269 if (HFSPLUS_IS_RSRC(inode
))
270 inode
= HFSPLUS_I(inode
)->rsrc_inode
;
271 if (atomic_dec_and_test(&HFSPLUS_I(inode
)->opencnt
)) {
272 mutex_lock(&inode
->i_mutex
);
273 hfsplus_file_truncate(inode
);
274 if (inode
->i_flags
& S_DEAD
) {
275 hfsplus_delete_cat(inode
->i_ino
,
276 HFSPLUS_SB(sb
)->hidden_dir
, NULL
);
277 hfsplus_delete_inode(inode
);
279 mutex_unlock(&inode
->i_mutex
);
284 static int hfsplus_setattr(struct dentry
*dentry
, struct iattr
*attr
)
286 struct inode
*inode
= dentry
->d_inode
;
289 error
= inode_change_ok(inode
, attr
);
293 if ((attr
->ia_valid
& ATTR_SIZE
) &&
294 attr
->ia_size
!= i_size_read(inode
)) {
295 error
= vmtruncate(inode
, attr
->ia_size
);
300 setattr_copy(inode
, attr
);
301 mark_inode_dirty(inode
);
305 static int hfsplus_file_fsync(struct file
*filp
, int datasync
)
307 struct inode
*inode
= filp
->f_mapping
->host
;
308 struct super_block
* sb
;
311 /* sync the inode to buffers */
312 ret
= write_inode_now(inode
, 0);
314 /* sync the superblock to buffers */
317 if (!(sb
->s_flags
& MS_RDONLY
))
318 hfsplus_sync_fs(sb
, 1);
323 /* .. finally sync the buffers to disk */
324 err
= sync_blockdev(sb
->s_bdev
);
330 static const struct inode_operations hfsplus_file_inode_operations
= {
331 .lookup
= hfsplus_file_lookup
,
332 .truncate
= hfsplus_file_truncate
,
333 .setattr
= hfsplus_setattr
,
334 .setxattr
= hfsplus_setxattr
,
335 .getxattr
= hfsplus_getxattr
,
336 .listxattr
= hfsplus_listxattr
,
339 static const struct file_operations hfsplus_file_operations
= {
340 .llseek
= generic_file_llseek
,
341 .read
= do_sync_read
,
342 .aio_read
= generic_file_aio_read
,
343 .write
= do_sync_write
,
344 .aio_write
= generic_file_aio_write
,
345 .mmap
= generic_file_mmap
,
346 .splice_read
= generic_file_splice_read
,
347 .fsync
= hfsplus_file_fsync
,
348 .open
= hfsplus_file_open
,
349 .release
= hfsplus_file_release
,
350 .unlocked_ioctl
= hfsplus_ioctl
,
353 struct inode
*hfsplus_new_inode(struct super_block
*sb
, int mode
)
355 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(sb
);
356 struct inode
*inode
= new_inode(sb
);
357 struct hfsplus_inode_info
*hip
;
362 inode
->i_ino
= sbi
->next_cnid
++;
363 inode
->i_mode
= mode
;
364 inode
->i_uid
= current_fsuid();
365 inode
->i_gid
= current_fsgid();
367 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= CURRENT_TIME_SEC
;
369 hip
= HFSPLUS_I(inode
);
370 INIT_LIST_HEAD(&hip
->open_dir_list
);
371 mutex_init(&hip
->extents_lock
);
372 atomic_set(&hip
->opencnt
, 0);
374 memset(hip
->first_extents
, 0, sizeof(hfsplus_extent_rec
));
375 memset(hip
->cached_extents
, 0, sizeof(hfsplus_extent_rec
));
376 hip
->alloc_blocks
= 0;
377 hip
->first_blocks
= 0;
378 hip
->cached_start
= 0;
379 hip
->cached_blocks
= 0;
382 hip
->rsrc_inode
= NULL
;
383 if (S_ISDIR(inode
->i_mode
)) {
386 inode
->i_op
= &hfsplus_dir_inode_operations
;
387 inode
->i_fop
= &hfsplus_dir_operations
;
388 } else if (S_ISREG(inode
->i_mode
)) {
390 inode
->i_op
= &hfsplus_file_inode_operations
;
391 inode
->i_fop
= &hfsplus_file_operations
;
392 inode
->i_mapping
->a_ops
= &hfsplus_aops
;
393 hip
->clump_blocks
= sbi
->data_clump_blocks
;
394 } else if (S_ISLNK(inode
->i_mode
)) {
396 inode
->i_op
= &page_symlink_inode_operations
;
397 inode
->i_mapping
->a_ops
= &hfsplus_aops
;
398 hip
->clump_blocks
= 1;
401 insert_inode_hash(inode
);
402 mark_inode_dirty(inode
);
408 void hfsplus_delete_inode(struct inode
*inode
)
410 struct super_block
*sb
= inode
->i_sb
;
412 if (S_ISDIR(inode
->i_mode
)) {
413 HFSPLUS_SB(sb
)->folder_count
--;
417 HFSPLUS_SB(sb
)->file_count
--;
418 if (S_ISREG(inode
->i_mode
)) {
419 if (!inode
->i_nlink
) {
421 hfsplus_file_truncate(inode
);
423 } else if (S_ISLNK(inode
->i_mode
)) {
425 hfsplus_file_truncate(inode
);
430 void hfsplus_inode_read_fork(struct inode
*inode
, struct hfsplus_fork_raw
*fork
)
432 struct super_block
*sb
= inode
->i_sb
;
433 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(sb
);
434 struct hfsplus_inode_info
*hip
= HFSPLUS_I(inode
);
438 memcpy(&hip
->first_extents
, &fork
->extents
, sizeof(hfsplus_extent_rec
));
439 for (count
= 0, i
= 0; i
< 8; i
++)
440 count
+= be32_to_cpu(fork
->extents
[i
].block_count
);
441 hip
->first_blocks
= count
;
442 memset(hip
->cached_extents
, 0, sizeof(hfsplus_extent_rec
));
443 hip
->cached_start
= 0;
444 hip
->cached_blocks
= 0;
446 hip
->alloc_blocks
= be32_to_cpu(fork
->total_blocks
);
447 hip
->phys_size
= inode
->i_size
= be64_to_cpu(fork
->total_size
);
449 (inode
->i_size
+ sb
->s_blocksize
- 1) >> sb
->s_blocksize_bits
;
450 inode_set_bytes(inode
, hip
->fs_blocks
<< sb
->s_blocksize_bits
);
452 be32_to_cpu(fork
->clump_size
) >> sbi
->alloc_blksz_shift
;
453 if (!hip
->clump_blocks
) {
454 hip
->clump_blocks
= HFSPLUS_IS_RSRC(inode
) ?
455 sbi
->rsrc_clump_blocks
:
456 sbi
->data_clump_blocks
;
460 void hfsplus_inode_write_fork(struct inode
*inode
, struct hfsplus_fork_raw
*fork
)
462 memcpy(&fork
->extents
, &HFSPLUS_I(inode
)->first_extents
,
463 sizeof(hfsplus_extent_rec
));
464 fork
->total_size
= cpu_to_be64(inode
->i_size
);
465 fork
->total_blocks
= cpu_to_be32(HFSPLUS_I(inode
)->alloc_blocks
);
468 int hfsplus_cat_read_inode(struct inode
*inode
, struct hfs_find_data
*fd
)
470 hfsplus_cat_entry entry
;
474 type
= hfs_bnode_read_u16(fd
->bnode
, fd
->entryoffset
);
476 HFSPLUS_I(inode
)->linkid
= 0;
477 if (type
== HFSPLUS_FOLDER
) {
478 struct hfsplus_cat_folder
*folder
= &entry
.folder
;
480 if (fd
->entrylength
< sizeof(struct hfsplus_cat_folder
))
482 hfs_bnode_read(fd
->bnode
, &entry
, fd
->entryoffset
,
483 sizeof(struct hfsplus_cat_folder
));
484 hfsplus_get_perms(inode
, &folder
->permissions
, 1);
486 inode
->i_size
= 2 + be32_to_cpu(folder
->valence
);
487 inode
->i_atime
= hfsp_mt2ut(folder
->access_date
);
488 inode
->i_mtime
= hfsp_mt2ut(folder
->content_mod_date
);
489 inode
->i_ctime
= hfsp_mt2ut(folder
->attribute_mod_date
);
490 HFSPLUS_I(inode
)->create_date
= folder
->create_date
;
491 HFSPLUS_I(inode
)->fs_blocks
= 0;
492 inode
->i_op
= &hfsplus_dir_inode_operations
;
493 inode
->i_fop
= &hfsplus_dir_operations
;
494 } else if (type
== HFSPLUS_FILE
) {
495 struct hfsplus_cat_file
*file
= &entry
.file
;
497 if (fd
->entrylength
< sizeof(struct hfsplus_cat_file
))
499 hfs_bnode_read(fd
->bnode
, &entry
, fd
->entryoffset
,
500 sizeof(struct hfsplus_cat_file
));
502 hfsplus_inode_read_fork(inode
, HFSPLUS_IS_DATA(inode
) ?
503 &file
->data_fork
: &file
->rsrc_fork
);
504 hfsplus_get_perms(inode
, &file
->permissions
, 0);
506 if (S_ISREG(inode
->i_mode
)) {
507 if (file
->permissions
.dev
)
508 inode
->i_nlink
= be32_to_cpu(file
->permissions
.dev
);
509 inode
->i_op
= &hfsplus_file_inode_operations
;
510 inode
->i_fop
= &hfsplus_file_operations
;
511 inode
->i_mapping
->a_ops
= &hfsplus_aops
;
512 } else if (S_ISLNK(inode
->i_mode
)) {
513 inode
->i_op
= &page_symlink_inode_operations
;
514 inode
->i_mapping
->a_ops
= &hfsplus_aops
;
516 init_special_inode(inode
, inode
->i_mode
,
517 be32_to_cpu(file
->permissions
.dev
));
519 inode
->i_atime
= hfsp_mt2ut(file
->access_date
);
520 inode
->i_mtime
= hfsp_mt2ut(file
->content_mod_date
);
521 inode
->i_ctime
= hfsp_mt2ut(file
->attribute_mod_date
);
522 HFSPLUS_I(inode
)->create_date
= file
->create_date
;
524 printk(KERN_ERR
"hfs: bad catalog entry used to create inode\n");
530 int hfsplus_cat_write_inode(struct inode
*inode
)
532 struct inode
*main_inode
= inode
;
533 struct hfs_find_data fd
;
534 hfsplus_cat_entry entry
;
536 if (HFSPLUS_IS_RSRC(inode
))
537 main_inode
= HFSPLUS_I(inode
)->rsrc_inode
;
539 if (!main_inode
->i_nlink
)
542 if (hfs_find_init(HFSPLUS_SB(main_inode
->i_sb
)->cat_tree
, &fd
))
546 if (hfsplus_find_cat(main_inode
->i_sb
, main_inode
->i_ino
, &fd
))
550 if (S_ISDIR(main_inode
->i_mode
)) {
551 struct hfsplus_cat_folder
*folder
= &entry
.folder
;
553 if (fd
.entrylength
< sizeof(struct hfsplus_cat_folder
))
555 hfs_bnode_read(fd
.bnode
, &entry
, fd
.entryoffset
,
556 sizeof(struct hfsplus_cat_folder
));
557 /* simple node checks? */
558 hfsplus_cat_set_perms(inode
, &folder
->permissions
);
559 folder
->access_date
= hfsp_ut2mt(inode
->i_atime
);
560 folder
->content_mod_date
= hfsp_ut2mt(inode
->i_mtime
);
561 folder
->attribute_mod_date
= hfsp_ut2mt(inode
->i_ctime
);
562 folder
->valence
= cpu_to_be32(inode
->i_size
- 2);
563 hfs_bnode_write(fd
.bnode
, &entry
, fd
.entryoffset
,
564 sizeof(struct hfsplus_cat_folder
));
565 } else if (HFSPLUS_IS_RSRC(inode
)) {
566 struct hfsplus_cat_file
*file
= &entry
.file
;
567 hfs_bnode_read(fd
.bnode
, &entry
, fd
.entryoffset
,
568 sizeof(struct hfsplus_cat_file
));
569 hfsplus_inode_write_fork(inode
, &file
->rsrc_fork
);
570 hfs_bnode_write(fd
.bnode
, &entry
, fd
.entryoffset
,
571 sizeof(struct hfsplus_cat_file
));
573 struct hfsplus_cat_file
*file
= &entry
.file
;
575 if (fd
.entrylength
< sizeof(struct hfsplus_cat_file
))
577 hfs_bnode_read(fd
.bnode
, &entry
, fd
.entryoffset
,
578 sizeof(struct hfsplus_cat_file
));
579 hfsplus_inode_write_fork(inode
, &file
->data_fork
);
580 hfsplus_cat_set_perms(inode
, &file
->permissions
);
581 if ((file
->permissions
.rootflags
| file
->permissions
.userflags
) & HFSPLUS_FLG_IMMUTABLE
)
582 file
->flags
|= cpu_to_be16(HFSPLUS_FILE_LOCKED
);
584 file
->flags
&= cpu_to_be16(~HFSPLUS_FILE_LOCKED
);
585 file
->access_date
= hfsp_ut2mt(inode
->i_atime
);
586 file
->content_mod_date
= hfsp_ut2mt(inode
->i_mtime
);
587 file
->attribute_mod_date
= hfsp_ut2mt(inode
->i_ctime
);
588 hfs_bnode_write(fd
.bnode
, &entry
, fd
.entryoffset
,
589 sizeof(struct hfsplus_cat_file
));