2 * linux/fs/hfsplus/inode.c
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
8 * Inode handling routines
13 #include <linux/pagemap.h>
14 #include <linux/mpage.h>
15 #include <linux/sched.h>
17 #include "hfsplus_fs.h"
18 #include "hfsplus_raw.h"
20 static int hfsplus_readpage(struct file
*file
, struct page
*page
)
22 return block_read_full_page(page
, hfsplus_get_block
);
25 static int hfsplus_writepage(struct page
*page
, struct writeback_control
*wbc
)
27 return block_write_full_page(page
, hfsplus_get_block
, wbc
);
30 static int hfsplus_write_begin(struct file
*file
, struct address_space
*mapping
,
31 loff_t pos
, unsigned len
, unsigned flags
,
32 struct page
**pagep
, void **fsdata
)
35 return cont_write_begin(file
, mapping
, pos
, len
, flags
, pagep
, fsdata
,
37 &HFSPLUS_I(mapping
->host
).phys_size
);
40 static sector_t
hfsplus_bmap(struct address_space
*mapping
, sector_t block
)
42 return generic_block_bmap(mapping
, block
, hfsplus_get_block
);
45 static int hfsplus_releasepage(struct page
*page
, gfp_t mask
)
47 struct inode
*inode
= page
->mapping
->host
;
48 struct super_block
*sb
= inode
->i_sb
;
49 struct hfs_btree
*tree
;
50 struct hfs_bnode
*node
;
54 switch (inode
->i_ino
) {
55 case HFSPLUS_EXT_CNID
:
56 tree
= HFSPLUS_SB(sb
).ext_tree
;
58 case HFSPLUS_CAT_CNID
:
59 tree
= HFSPLUS_SB(sb
).cat_tree
;
61 case HFSPLUS_ATTR_CNID
:
62 tree
= HFSPLUS_SB(sb
).attr_tree
;
68 if (tree
->node_size
>= PAGE_CACHE_SIZE
) {
69 nidx
= page
->index
>> (tree
->node_size_shift
- PAGE_CACHE_SHIFT
);
70 spin_lock(&tree
->hash_lock
);
71 node
= hfs_bnode_findhash(tree
, nidx
);
74 else if (atomic_read(&node
->refcnt
))
77 hfs_bnode_unhash(node
);
80 spin_unlock(&tree
->hash_lock
);
82 nidx
= page
->index
<< (PAGE_CACHE_SHIFT
- tree
->node_size_shift
);
83 i
= 1 << (PAGE_CACHE_SHIFT
- tree
->node_size_shift
);
84 spin_lock(&tree
->hash_lock
);
86 node
= hfs_bnode_findhash(tree
, nidx
++);
89 if (atomic_read(&node
->refcnt
)) {
93 hfs_bnode_unhash(node
);
95 } while (--i
&& nidx
< tree
->node_count
);
96 spin_unlock(&tree
->hash_lock
);
98 return res
? try_to_free_buffers(page
) : 0;
101 static ssize_t
hfsplus_direct_IO(int rw
, struct kiocb
*iocb
,
102 const struct iovec
*iov
, loff_t offset
, unsigned long nr_segs
)
104 struct file
*file
= iocb
->ki_filp
;
105 struct inode
*inode
= file
->f_path
.dentry
->d_inode
->i_mapping
->host
;
107 return blockdev_direct_IO(rw
, iocb
, inode
, inode
->i_sb
->s_bdev
, iov
,
108 offset
, nr_segs
, hfsplus_get_block
, NULL
);
111 static int hfsplus_writepages(struct address_space
*mapping
,
112 struct writeback_control
*wbc
)
114 return mpage_writepages(mapping
, wbc
, hfsplus_get_block
);
117 const struct address_space_operations hfsplus_btree_aops
= {
118 .readpage
= hfsplus_readpage
,
119 .writepage
= hfsplus_writepage
,
120 .sync_page
= block_sync_page
,
121 .write_begin
= hfsplus_write_begin
,
122 .write_end
= generic_write_end
,
123 .bmap
= hfsplus_bmap
,
124 .releasepage
= hfsplus_releasepage
,
127 const struct address_space_operations hfsplus_aops
= {
128 .readpage
= hfsplus_readpage
,
129 .writepage
= hfsplus_writepage
,
130 .sync_page
= block_sync_page
,
131 .write_begin
= hfsplus_write_begin
,
132 .write_end
= generic_write_end
,
133 .bmap
= hfsplus_bmap
,
134 .direct_IO
= hfsplus_direct_IO
,
135 .writepages
= hfsplus_writepages
,
138 struct dentry_operations hfsplus_dentry_operations
= {
139 .d_hash
= hfsplus_hash_dentry
,
140 .d_compare
= hfsplus_compare_dentry
,
143 static struct dentry
*hfsplus_file_lookup(struct inode
*dir
, struct dentry
*dentry
,
144 struct nameidata
*nd
)
146 struct hfs_find_data fd
;
147 struct super_block
*sb
= dir
->i_sb
;
148 struct inode
*inode
= NULL
;
151 if (HFSPLUS_IS_RSRC(dir
) || strcmp(dentry
->d_name
.name
, "rsrc"))
154 inode
= HFSPLUS_I(dir
).rsrc_inode
;
158 inode
= new_inode(sb
);
160 return ERR_PTR(-ENOMEM
);
162 inode
->i_ino
= dir
->i_ino
;
163 INIT_LIST_HEAD(&HFSPLUS_I(inode
).open_dir_list
);
164 init_MUTEX(&HFSPLUS_I(inode
).extents_lock
);
165 HFSPLUS_I(inode
).flags
= HFSPLUS_FLG_RSRC
;
167 hfs_find_init(HFSPLUS_SB(sb
).cat_tree
, &fd
);
168 err
= hfsplus_find_cat(sb
, dir
->i_ino
, &fd
);
170 err
= hfsplus_cat_read_inode(inode
, &fd
);
176 HFSPLUS_I(inode
).rsrc_inode
= dir
;
177 HFSPLUS_I(dir
).rsrc_inode
= inode
;
179 hlist_add_head(&inode
->i_hash
, &HFSPLUS_SB(sb
).rsrc_inodes
);
180 mark_inode_dirty(inode
);
182 d_add(dentry
, inode
);
186 static void hfsplus_get_perms(struct inode
*inode
, struct hfsplus_perm
*perms
, int dir
)
188 struct super_block
*sb
= inode
->i_sb
;
191 mode
= be16_to_cpu(perms
->mode
);
193 inode
->i_uid
= be32_to_cpu(perms
->owner
);
194 if (!inode
->i_uid
&& !mode
)
195 inode
->i_uid
= HFSPLUS_SB(sb
).uid
;
197 inode
->i_gid
= be32_to_cpu(perms
->group
);
198 if (!inode
->i_gid
&& !mode
)
199 inode
->i_gid
= HFSPLUS_SB(sb
).gid
;
202 mode
= mode
? (mode
& S_IALLUGO
) :
203 (S_IRWXUGO
& ~(HFSPLUS_SB(sb
).umask
));
206 mode
= S_IFREG
| ((S_IRUGO
|S_IWUGO
) &
207 ~(HFSPLUS_SB(sb
).umask
));
208 inode
->i_mode
= mode
;
210 HFSPLUS_I(inode
).rootflags
= perms
->rootflags
;
211 HFSPLUS_I(inode
).userflags
= perms
->userflags
;
212 if (perms
->rootflags
& HFSPLUS_FLG_IMMUTABLE
)
213 inode
->i_flags
|= S_IMMUTABLE
;
215 inode
->i_flags
&= ~S_IMMUTABLE
;
216 if (perms
->rootflags
& HFSPLUS_FLG_APPEND
)
217 inode
->i_flags
|= S_APPEND
;
219 inode
->i_flags
&= ~S_APPEND
;
222 static void hfsplus_set_perms(struct inode
*inode
, struct hfsplus_perm
*perms
)
224 if (inode
->i_flags
& S_IMMUTABLE
)
225 perms
->rootflags
|= HFSPLUS_FLG_IMMUTABLE
;
227 perms
->rootflags
&= ~HFSPLUS_FLG_IMMUTABLE
;
228 if (inode
->i_flags
& S_APPEND
)
229 perms
->rootflags
|= HFSPLUS_FLG_APPEND
;
231 perms
->rootflags
&= ~HFSPLUS_FLG_APPEND
;
232 perms
->userflags
= HFSPLUS_I(inode
).userflags
;
233 perms
->mode
= cpu_to_be16(inode
->i_mode
);
234 perms
->owner
= cpu_to_be32(inode
->i_uid
);
235 perms
->group
= cpu_to_be32(inode
->i_gid
);
236 perms
->dev
= cpu_to_be32(HFSPLUS_I(inode
).dev
);
239 static int hfsplus_permission(struct inode
*inode
, int mask
, struct nameidata
*nd
)
241 /* MAY_EXEC is also used for lookup, if no x bit is set allow lookup,
242 * open_exec has the same test, so it's still not executable, if a x bit
243 * is set fall back to standard permission check.
245 if (S_ISREG(inode
->i_mode
) && mask
& MAY_EXEC
&& !(inode
->i_mode
& 0111))
247 return generic_permission(inode
, mask
, NULL
);
251 static int hfsplus_file_open(struct inode
*inode
, struct file
*file
)
253 if (HFSPLUS_IS_RSRC(inode
))
254 inode
= HFSPLUS_I(inode
).rsrc_inode
;
255 if (atomic_read(&file
->f_count
) != 1)
257 atomic_inc(&HFSPLUS_I(inode
).opencnt
);
261 static int hfsplus_file_release(struct inode
*inode
, struct file
*file
)
263 struct super_block
*sb
= inode
->i_sb
;
265 if (HFSPLUS_IS_RSRC(inode
))
266 inode
= HFSPLUS_I(inode
).rsrc_inode
;
267 if (atomic_read(&file
->f_count
) != 0)
269 if (atomic_dec_and_test(&HFSPLUS_I(inode
).opencnt
)) {
270 mutex_lock(&inode
->i_mutex
);
271 hfsplus_file_truncate(inode
);
272 if (inode
->i_flags
& S_DEAD
) {
273 hfsplus_delete_cat(inode
->i_ino
, HFSPLUS_SB(sb
).hidden_dir
, NULL
);
274 hfsplus_delete_inode(inode
);
276 mutex_unlock(&inode
->i_mutex
);
281 extern const struct inode_operations hfsplus_dir_inode_operations
;
282 extern struct file_operations hfsplus_dir_operations
;
284 static const struct inode_operations hfsplus_file_inode_operations
= {
285 .lookup
= hfsplus_file_lookup
,
286 .truncate
= hfsplus_file_truncate
,
287 .permission
= hfsplus_permission
,
288 .setxattr
= hfsplus_setxattr
,
289 .getxattr
= hfsplus_getxattr
,
290 .listxattr
= hfsplus_listxattr
,
293 static const struct file_operations hfsplus_file_operations
= {
294 .llseek
= generic_file_llseek
,
295 .read
= do_sync_read
,
296 .aio_read
= generic_file_aio_read
,
297 .write
= do_sync_write
,
298 .aio_write
= generic_file_aio_write
,
299 .mmap
= generic_file_mmap
,
300 .splice_read
= generic_file_splice_read
,
302 .open
= hfsplus_file_open
,
303 .release
= hfsplus_file_release
,
304 .ioctl
= hfsplus_ioctl
,
307 struct inode
*hfsplus_new_inode(struct super_block
*sb
, int mode
)
309 struct inode
*inode
= new_inode(sb
);
313 inode
->i_ino
= HFSPLUS_SB(sb
).next_cnid
++;
314 inode
->i_mode
= mode
;
315 inode
->i_uid
= current
->fsuid
;
316 inode
->i_gid
= current
->fsgid
;
318 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= CURRENT_TIME_SEC
;
319 INIT_LIST_HEAD(&HFSPLUS_I(inode
).open_dir_list
);
320 init_MUTEX(&HFSPLUS_I(inode
).extents_lock
);
321 atomic_set(&HFSPLUS_I(inode
).opencnt
, 0);
322 HFSPLUS_I(inode
).flags
= 0;
323 memset(HFSPLUS_I(inode
).first_extents
, 0, sizeof(hfsplus_extent_rec
));
324 memset(HFSPLUS_I(inode
).cached_extents
, 0, sizeof(hfsplus_extent_rec
));
325 HFSPLUS_I(inode
).alloc_blocks
= 0;
326 HFSPLUS_I(inode
).first_blocks
= 0;
327 HFSPLUS_I(inode
).cached_start
= 0;
328 HFSPLUS_I(inode
).cached_blocks
= 0;
329 HFSPLUS_I(inode
).phys_size
= 0;
330 HFSPLUS_I(inode
).fs_blocks
= 0;
331 HFSPLUS_I(inode
).rsrc_inode
= NULL
;
332 if (S_ISDIR(inode
->i_mode
)) {
334 HFSPLUS_SB(sb
).folder_count
++;
335 inode
->i_op
= &hfsplus_dir_inode_operations
;
336 inode
->i_fop
= &hfsplus_dir_operations
;
337 } else if (S_ISREG(inode
->i_mode
)) {
338 HFSPLUS_SB(sb
).file_count
++;
339 inode
->i_op
= &hfsplus_file_inode_operations
;
340 inode
->i_fop
= &hfsplus_file_operations
;
341 inode
->i_mapping
->a_ops
= &hfsplus_aops
;
342 HFSPLUS_I(inode
).clump_blocks
= HFSPLUS_SB(sb
).data_clump_blocks
;
343 } else if (S_ISLNK(inode
->i_mode
)) {
344 HFSPLUS_SB(sb
).file_count
++;
345 inode
->i_op
= &page_symlink_inode_operations
;
346 inode
->i_mapping
->a_ops
= &hfsplus_aops
;
347 HFSPLUS_I(inode
).clump_blocks
= 1;
349 HFSPLUS_SB(sb
).file_count
++;
350 insert_inode_hash(inode
);
351 mark_inode_dirty(inode
);
357 void hfsplus_delete_inode(struct inode
*inode
)
359 struct super_block
*sb
= inode
->i_sb
;
361 if (S_ISDIR(inode
->i_mode
)) {
362 HFSPLUS_SB(sb
).folder_count
--;
366 HFSPLUS_SB(sb
).file_count
--;
367 if (S_ISREG(inode
->i_mode
)) {
368 if (!inode
->i_nlink
) {
370 hfsplus_file_truncate(inode
);
372 } else if (S_ISLNK(inode
->i_mode
)) {
374 hfsplus_file_truncate(inode
);
379 void hfsplus_inode_read_fork(struct inode
*inode
, struct hfsplus_fork_raw
*fork
)
381 struct super_block
*sb
= inode
->i_sb
;
385 memcpy(&HFSPLUS_I(inode
).first_extents
, &fork
->extents
,
386 sizeof(hfsplus_extent_rec
));
387 for (count
= 0, i
= 0; i
< 8; i
++)
388 count
+= be32_to_cpu(fork
->extents
[i
].block_count
);
389 HFSPLUS_I(inode
).first_blocks
= count
;
390 memset(HFSPLUS_I(inode
).cached_extents
, 0, sizeof(hfsplus_extent_rec
));
391 HFSPLUS_I(inode
).cached_start
= 0;
392 HFSPLUS_I(inode
).cached_blocks
= 0;
394 HFSPLUS_I(inode
).alloc_blocks
= be32_to_cpu(fork
->total_blocks
);
395 inode
->i_size
= HFSPLUS_I(inode
).phys_size
= be64_to_cpu(fork
->total_size
);
396 HFSPLUS_I(inode
).fs_blocks
= (inode
->i_size
+ sb
->s_blocksize
- 1) >> sb
->s_blocksize_bits
;
397 inode_set_bytes(inode
, HFSPLUS_I(inode
).fs_blocks
<< sb
->s_blocksize_bits
);
398 HFSPLUS_I(inode
).clump_blocks
= be32_to_cpu(fork
->clump_size
) >> HFSPLUS_SB(sb
).alloc_blksz_shift
;
399 if (!HFSPLUS_I(inode
).clump_blocks
)
400 HFSPLUS_I(inode
).clump_blocks
= HFSPLUS_IS_RSRC(inode
) ? HFSPLUS_SB(sb
).rsrc_clump_blocks
:
401 HFSPLUS_SB(sb
).data_clump_blocks
;
404 void hfsplus_inode_write_fork(struct inode
*inode
, struct hfsplus_fork_raw
*fork
)
406 memcpy(&fork
->extents
, &HFSPLUS_I(inode
).first_extents
,
407 sizeof(hfsplus_extent_rec
));
408 fork
->total_size
= cpu_to_be64(inode
->i_size
);
409 fork
->total_blocks
= cpu_to_be32(HFSPLUS_I(inode
).alloc_blocks
);
412 int hfsplus_cat_read_inode(struct inode
*inode
, struct hfs_find_data
*fd
)
414 hfsplus_cat_entry entry
;
418 type
= hfs_bnode_read_u16(fd
->bnode
, fd
->entryoffset
);
420 HFSPLUS_I(inode
).dev
= 0;
421 if (type
== HFSPLUS_FOLDER
) {
422 struct hfsplus_cat_folder
*folder
= &entry
.folder
;
424 if (fd
->entrylength
< sizeof(struct hfsplus_cat_folder
))
426 hfs_bnode_read(fd
->bnode
, &entry
, fd
->entryoffset
,
427 sizeof(struct hfsplus_cat_folder
));
428 hfsplus_get_perms(inode
, &folder
->permissions
, 1);
430 inode
->i_size
= 2 + be32_to_cpu(folder
->valence
);
431 inode
->i_atime
= hfsp_mt2ut(folder
->access_date
);
432 inode
->i_mtime
= hfsp_mt2ut(folder
->content_mod_date
);
433 inode
->i_ctime
= hfsp_mt2ut(folder
->attribute_mod_date
);
434 HFSPLUS_I(inode
).create_date
= folder
->create_date
;
435 HFSPLUS_I(inode
).fs_blocks
= 0;
436 inode
->i_op
= &hfsplus_dir_inode_operations
;
437 inode
->i_fop
= &hfsplus_dir_operations
;
438 } else if (type
== HFSPLUS_FILE
) {
439 struct hfsplus_cat_file
*file
= &entry
.file
;
441 if (fd
->entrylength
< sizeof(struct hfsplus_cat_file
))
443 hfs_bnode_read(fd
->bnode
, &entry
, fd
->entryoffset
,
444 sizeof(struct hfsplus_cat_file
));
446 hfsplus_inode_read_fork(inode
, HFSPLUS_IS_DATA(inode
) ?
447 &file
->data_fork
: &file
->rsrc_fork
);
448 hfsplus_get_perms(inode
, &file
->permissions
, 0);
450 if (S_ISREG(inode
->i_mode
)) {
451 if (file
->permissions
.dev
)
452 inode
->i_nlink
= be32_to_cpu(file
->permissions
.dev
);
453 inode
->i_op
= &hfsplus_file_inode_operations
;
454 inode
->i_fop
= &hfsplus_file_operations
;
455 inode
->i_mapping
->a_ops
= &hfsplus_aops
;
456 } else if (S_ISLNK(inode
->i_mode
)) {
457 inode
->i_op
= &page_symlink_inode_operations
;
458 inode
->i_mapping
->a_ops
= &hfsplus_aops
;
460 init_special_inode(inode
, inode
->i_mode
,
461 be32_to_cpu(file
->permissions
.dev
));
463 inode
->i_atime
= hfsp_mt2ut(file
->access_date
);
464 inode
->i_mtime
= hfsp_mt2ut(file
->content_mod_date
);
465 inode
->i_ctime
= hfsp_mt2ut(file
->attribute_mod_date
);
466 HFSPLUS_I(inode
).create_date
= file
->create_date
;
468 printk(KERN_ERR
"hfs: bad catalog entry used to create inode\n");
474 int hfsplus_cat_write_inode(struct inode
*inode
)
476 struct inode
*main_inode
= inode
;
477 struct hfs_find_data fd
;
478 hfsplus_cat_entry entry
;
480 if (HFSPLUS_IS_RSRC(inode
))
481 main_inode
= HFSPLUS_I(inode
).rsrc_inode
;
483 if (!main_inode
->i_nlink
)
486 if (hfs_find_init(HFSPLUS_SB(main_inode
->i_sb
).cat_tree
, &fd
))
490 if (hfsplus_find_cat(main_inode
->i_sb
, main_inode
->i_ino
, &fd
))
494 if (S_ISDIR(main_inode
->i_mode
)) {
495 struct hfsplus_cat_folder
*folder
= &entry
.folder
;
497 if (fd
.entrylength
< sizeof(struct hfsplus_cat_folder
))
499 hfs_bnode_read(fd
.bnode
, &entry
, fd
.entryoffset
,
500 sizeof(struct hfsplus_cat_folder
));
501 /* simple node checks? */
502 hfsplus_set_perms(inode
, &folder
->permissions
);
503 folder
->access_date
= hfsp_ut2mt(inode
->i_atime
);
504 folder
->content_mod_date
= hfsp_ut2mt(inode
->i_mtime
);
505 folder
->attribute_mod_date
= hfsp_ut2mt(inode
->i_ctime
);
506 folder
->valence
= cpu_to_be32(inode
->i_size
- 2);
507 hfs_bnode_write(fd
.bnode
, &entry
, fd
.entryoffset
,
508 sizeof(struct hfsplus_cat_folder
));
509 } else if (HFSPLUS_IS_RSRC(inode
)) {
510 struct hfsplus_cat_file
*file
= &entry
.file
;
511 hfs_bnode_read(fd
.bnode
, &entry
, fd
.entryoffset
,
512 sizeof(struct hfsplus_cat_file
));
513 hfsplus_inode_write_fork(inode
, &file
->rsrc_fork
);
514 hfs_bnode_write(fd
.bnode
, &entry
, fd
.entryoffset
,
515 sizeof(struct hfsplus_cat_file
));
517 struct hfsplus_cat_file
*file
= &entry
.file
;
519 if (fd
.entrylength
< sizeof(struct hfsplus_cat_file
))
521 hfs_bnode_read(fd
.bnode
, &entry
, fd
.entryoffset
,
522 sizeof(struct hfsplus_cat_file
));
523 hfsplus_inode_write_fork(inode
, &file
->data_fork
);
524 if (S_ISREG(inode
->i_mode
))
525 HFSPLUS_I(inode
).dev
= inode
->i_nlink
;
526 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
))
527 HFSPLUS_I(inode
).dev
= kdev_t_to_nr(inode
->i_rdev
);
528 hfsplus_set_perms(inode
, &file
->permissions
);
529 if ((file
->permissions
.rootflags
| file
->permissions
.userflags
) & HFSPLUS_FLG_IMMUTABLE
)
530 file
->flags
|= cpu_to_be16(HFSPLUS_FILE_LOCKED
);
532 file
->flags
&= cpu_to_be16(~HFSPLUS_FILE_LOCKED
);
533 file
->access_date
= hfsp_ut2mt(inode
->i_atime
);
534 file
->content_mod_date
= hfsp_ut2mt(inode
->i_mtime
);
535 file
->attribute_mod_date
= hfsp_ut2mt(inode
->i_ctime
);
536 hfs_bnode_write(fd
.bnode
, &entry
, fd
.entryoffset
,
537 sizeof(struct hfsplus_cat_file
));