mm: don't drop a partial page in a zone's memory map size
[linux-2.6/openmoko-kernel/knife-kernel.git] / fs / hfsplus / inode.c
blob67e1c8b467c496565359331e79ca7c706166a30d
1 /*
2 * linux/fs/hfsplus/inode.c
4 * Copyright (C) 2001
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
8 * Inode handling routines
9 */
11 #include <linux/mm.h>
12 #include <linux/fs.h>
13 #include <linux/pagemap.h>
14 #include <linux/mpage.h>
15 #include <linux/sched.h>
17 #include "hfsplus_fs.h"
18 #include "hfsplus_raw.h"
20 static int hfsplus_readpage(struct file *file, struct page *page)
22 return block_read_full_page(page, hfsplus_get_block);
25 static int hfsplus_writepage(struct page *page, struct writeback_control *wbc)
27 return block_write_full_page(page, hfsplus_get_block, wbc);
30 static int hfsplus_write_begin(struct file *file, struct address_space *mapping,
31 loff_t pos, unsigned len, unsigned flags,
32 struct page **pagep, void **fsdata)
34 *pagep = NULL;
35 return cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
36 hfsplus_get_block,
37 &HFSPLUS_I(mapping->host).phys_size);
40 static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block)
42 return generic_block_bmap(mapping, block, hfsplus_get_block);
45 static int hfsplus_releasepage(struct page *page, gfp_t mask)
47 struct inode *inode = page->mapping->host;
48 struct super_block *sb = inode->i_sb;
49 struct hfs_btree *tree;
50 struct hfs_bnode *node;
51 u32 nidx;
52 int i, res = 1;
54 switch (inode->i_ino) {
55 case HFSPLUS_EXT_CNID:
56 tree = HFSPLUS_SB(sb).ext_tree;
57 break;
58 case HFSPLUS_CAT_CNID:
59 tree = HFSPLUS_SB(sb).cat_tree;
60 break;
61 case HFSPLUS_ATTR_CNID:
62 tree = HFSPLUS_SB(sb).attr_tree;
63 break;
64 default:
65 BUG();
66 return 0;
68 if (!tree)
69 return 0;
70 if (tree->node_size >= PAGE_CACHE_SIZE) {
71 nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT);
72 spin_lock(&tree->hash_lock);
73 node = hfs_bnode_findhash(tree, nidx);
74 if (!node)
76 else if (atomic_read(&node->refcnt))
77 res = 0;
78 if (res && node) {
79 hfs_bnode_unhash(node);
80 hfs_bnode_free(node);
82 spin_unlock(&tree->hash_lock);
83 } else {
84 nidx = page->index << (PAGE_CACHE_SHIFT - tree->node_size_shift);
85 i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift);
86 spin_lock(&tree->hash_lock);
87 do {
88 node = hfs_bnode_findhash(tree, nidx++);
89 if (!node)
90 continue;
91 if (atomic_read(&node->refcnt)) {
92 res = 0;
93 break;
95 hfs_bnode_unhash(node);
96 hfs_bnode_free(node);
97 } while (--i && nidx < tree->node_count);
98 spin_unlock(&tree->hash_lock);
100 return res ? try_to_free_buffers(page) : 0;
103 static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
104 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
106 struct file *file = iocb->ki_filp;
107 struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
109 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
110 offset, nr_segs, hfsplus_get_block, NULL);
113 static int hfsplus_writepages(struct address_space *mapping,
114 struct writeback_control *wbc)
116 return mpage_writepages(mapping, wbc, hfsplus_get_block);
119 const struct address_space_operations hfsplus_btree_aops = {
120 .readpage = hfsplus_readpage,
121 .writepage = hfsplus_writepage,
122 .sync_page = block_sync_page,
123 .write_begin = hfsplus_write_begin,
124 .write_end = generic_write_end,
125 .bmap = hfsplus_bmap,
126 .releasepage = hfsplus_releasepage,
129 const struct address_space_operations hfsplus_aops = {
130 .readpage = hfsplus_readpage,
131 .writepage = hfsplus_writepage,
132 .sync_page = block_sync_page,
133 .write_begin = hfsplus_write_begin,
134 .write_end = generic_write_end,
135 .bmap = hfsplus_bmap,
136 .direct_IO = hfsplus_direct_IO,
137 .writepages = hfsplus_writepages,
140 struct dentry_operations hfsplus_dentry_operations = {
141 .d_hash = hfsplus_hash_dentry,
142 .d_compare = hfsplus_compare_dentry,
145 static struct dentry *hfsplus_file_lookup(struct inode *dir, struct dentry *dentry,
146 struct nameidata *nd)
148 struct hfs_find_data fd;
149 struct super_block *sb = dir->i_sb;
150 struct inode *inode = NULL;
151 int err;
153 if (HFSPLUS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc"))
154 goto out;
156 inode = HFSPLUS_I(dir).rsrc_inode;
157 if (inode)
158 goto out;
160 inode = new_inode(sb);
161 if (!inode)
162 return ERR_PTR(-ENOMEM);
164 inode->i_ino = dir->i_ino;
165 INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
166 init_MUTEX(&HFSPLUS_I(inode).extents_lock);
167 HFSPLUS_I(inode).flags = HFSPLUS_FLG_RSRC;
169 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
170 err = hfsplus_find_cat(sb, dir->i_ino, &fd);
171 if (!err)
172 err = hfsplus_cat_read_inode(inode, &fd);
173 hfs_find_exit(&fd);
174 if (err) {
175 iput(inode);
176 return ERR_PTR(err);
178 HFSPLUS_I(inode).rsrc_inode = dir;
179 HFSPLUS_I(dir).rsrc_inode = inode;
180 igrab(dir);
181 hlist_add_head(&inode->i_hash, &HFSPLUS_SB(sb).rsrc_inodes);
182 mark_inode_dirty(inode);
183 out:
184 d_add(dentry, inode);
185 return NULL;
188 static void hfsplus_get_perms(struct inode *inode, struct hfsplus_perm *perms, int dir)
190 struct super_block *sb = inode->i_sb;
191 u16 mode;
193 mode = be16_to_cpu(perms->mode);
195 inode->i_uid = be32_to_cpu(perms->owner);
196 if (!inode->i_uid && !mode)
197 inode->i_uid = HFSPLUS_SB(sb).uid;
199 inode->i_gid = be32_to_cpu(perms->group);
200 if (!inode->i_gid && !mode)
201 inode->i_gid = HFSPLUS_SB(sb).gid;
203 if (dir) {
204 mode = mode ? (mode & S_IALLUGO) :
205 (S_IRWXUGO & ~(HFSPLUS_SB(sb).umask));
206 mode |= S_IFDIR;
207 } else if (!mode)
208 mode = S_IFREG | ((S_IRUGO|S_IWUGO) &
209 ~(HFSPLUS_SB(sb).umask));
210 inode->i_mode = mode;
212 HFSPLUS_I(inode).rootflags = perms->rootflags;
213 HFSPLUS_I(inode).userflags = perms->userflags;
214 if (perms->rootflags & HFSPLUS_FLG_IMMUTABLE)
215 inode->i_flags |= S_IMMUTABLE;
216 else
217 inode->i_flags &= ~S_IMMUTABLE;
218 if (perms->rootflags & HFSPLUS_FLG_APPEND)
219 inode->i_flags |= S_APPEND;
220 else
221 inode->i_flags &= ~S_APPEND;
224 static void hfsplus_set_perms(struct inode *inode, struct hfsplus_perm *perms)
226 if (inode->i_flags & S_IMMUTABLE)
227 perms->rootflags |= HFSPLUS_FLG_IMMUTABLE;
228 else
229 perms->rootflags &= ~HFSPLUS_FLG_IMMUTABLE;
230 if (inode->i_flags & S_APPEND)
231 perms->rootflags |= HFSPLUS_FLG_APPEND;
232 else
233 perms->rootflags &= ~HFSPLUS_FLG_APPEND;
234 perms->userflags = HFSPLUS_I(inode).userflags;
235 perms->mode = cpu_to_be16(inode->i_mode);
236 perms->owner = cpu_to_be32(inode->i_uid);
237 perms->group = cpu_to_be32(inode->i_gid);
238 perms->dev = cpu_to_be32(HFSPLUS_I(inode).dev);
241 static int hfsplus_permission(struct inode *inode, int mask, struct nameidata *nd)
243 /* MAY_EXEC is also used for lookup, if no x bit is set allow lookup,
244 * open_exec has the same test, so it's still not executable, if a x bit
245 * is set fall back to standard permission check.
247 if (S_ISREG(inode->i_mode) && mask & MAY_EXEC && !(inode->i_mode & 0111))
248 return 0;
249 return generic_permission(inode, mask, NULL);
253 static int hfsplus_file_open(struct inode *inode, struct file *file)
255 if (HFSPLUS_IS_RSRC(inode))
256 inode = HFSPLUS_I(inode).rsrc_inode;
257 if (atomic_read(&file->f_count) != 1)
258 return 0;
259 atomic_inc(&HFSPLUS_I(inode).opencnt);
260 return 0;
263 static int hfsplus_file_release(struct inode *inode, struct file *file)
265 struct super_block *sb = inode->i_sb;
267 if (HFSPLUS_IS_RSRC(inode))
268 inode = HFSPLUS_I(inode).rsrc_inode;
269 if (atomic_read(&file->f_count) != 0)
270 return 0;
271 if (atomic_dec_and_test(&HFSPLUS_I(inode).opencnt)) {
272 mutex_lock(&inode->i_mutex);
273 hfsplus_file_truncate(inode);
274 if (inode->i_flags & S_DEAD) {
275 hfsplus_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL);
276 hfsplus_delete_inode(inode);
278 mutex_unlock(&inode->i_mutex);
280 return 0;
283 static const struct inode_operations hfsplus_file_inode_operations = {
284 .lookup = hfsplus_file_lookup,
285 .truncate = hfsplus_file_truncate,
286 .permission = hfsplus_permission,
287 .setxattr = hfsplus_setxattr,
288 .getxattr = hfsplus_getxattr,
289 .listxattr = hfsplus_listxattr,
292 static const struct file_operations hfsplus_file_operations = {
293 .llseek = generic_file_llseek,
294 .read = do_sync_read,
295 .aio_read = generic_file_aio_read,
296 .write = do_sync_write,
297 .aio_write = generic_file_aio_write,
298 .mmap = generic_file_mmap,
299 .splice_read = generic_file_splice_read,
300 .fsync = file_fsync,
301 .open = hfsplus_file_open,
302 .release = hfsplus_file_release,
303 .ioctl = hfsplus_ioctl,
306 struct inode *hfsplus_new_inode(struct super_block *sb, int mode)
308 struct inode *inode = new_inode(sb);
309 if (!inode)
310 return NULL;
312 inode->i_ino = HFSPLUS_SB(sb).next_cnid++;
313 inode->i_mode = mode;
314 inode->i_uid = current->fsuid;
315 inode->i_gid = current->fsgid;
316 inode->i_nlink = 1;
317 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
318 INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
319 init_MUTEX(&HFSPLUS_I(inode).extents_lock);
320 atomic_set(&HFSPLUS_I(inode).opencnt, 0);
321 HFSPLUS_I(inode).flags = 0;
322 memset(HFSPLUS_I(inode).first_extents, 0, sizeof(hfsplus_extent_rec));
323 memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec));
324 HFSPLUS_I(inode).alloc_blocks = 0;
325 HFSPLUS_I(inode).first_blocks = 0;
326 HFSPLUS_I(inode).cached_start = 0;
327 HFSPLUS_I(inode).cached_blocks = 0;
328 HFSPLUS_I(inode).phys_size = 0;
329 HFSPLUS_I(inode).fs_blocks = 0;
330 HFSPLUS_I(inode).rsrc_inode = NULL;
331 if (S_ISDIR(inode->i_mode)) {
332 inode->i_size = 2;
333 HFSPLUS_SB(sb).folder_count++;
334 inode->i_op = &hfsplus_dir_inode_operations;
335 inode->i_fop = &hfsplus_dir_operations;
336 } else if (S_ISREG(inode->i_mode)) {
337 HFSPLUS_SB(sb).file_count++;
338 inode->i_op = &hfsplus_file_inode_operations;
339 inode->i_fop = &hfsplus_file_operations;
340 inode->i_mapping->a_ops = &hfsplus_aops;
341 HFSPLUS_I(inode).clump_blocks = HFSPLUS_SB(sb).data_clump_blocks;
342 } else if (S_ISLNK(inode->i_mode)) {
343 HFSPLUS_SB(sb).file_count++;
344 inode->i_op = &page_symlink_inode_operations;
345 inode->i_mapping->a_ops = &hfsplus_aops;
346 HFSPLUS_I(inode).clump_blocks = 1;
347 } else
348 HFSPLUS_SB(sb).file_count++;
349 insert_inode_hash(inode);
350 mark_inode_dirty(inode);
351 sb->s_dirt = 1;
353 return inode;
356 void hfsplus_delete_inode(struct inode *inode)
358 struct super_block *sb = inode->i_sb;
360 if (S_ISDIR(inode->i_mode)) {
361 HFSPLUS_SB(sb).folder_count--;
362 sb->s_dirt = 1;
363 return;
365 HFSPLUS_SB(sb).file_count--;
366 if (S_ISREG(inode->i_mode)) {
367 if (!inode->i_nlink) {
368 inode->i_size = 0;
369 hfsplus_file_truncate(inode);
371 } else if (S_ISLNK(inode->i_mode)) {
372 inode->i_size = 0;
373 hfsplus_file_truncate(inode);
375 sb->s_dirt = 1;
378 void hfsplus_inode_read_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
380 struct super_block *sb = inode->i_sb;
381 u32 count;
382 int i;
384 memcpy(&HFSPLUS_I(inode).first_extents, &fork->extents,
385 sizeof(hfsplus_extent_rec));
386 for (count = 0, i = 0; i < 8; i++)
387 count += be32_to_cpu(fork->extents[i].block_count);
388 HFSPLUS_I(inode).first_blocks = count;
389 memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec));
390 HFSPLUS_I(inode).cached_start = 0;
391 HFSPLUS_I(inode).cached_blocks = 0;
393 HFSPLUS_I(inode).alloc_blocks = be32_to_cpu(fork->total_blocks);
394 inode->i_size = HFSPLUS_I(inode).phys_size = be64_to_cpu(fork->total_size);
395 HFSPLUS_I(inode).fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
396 inode_set_bytes(inode, HFSPLUS_I(inode).fs_blocks << sb->s_blocksize_bits);
397 HFSPLUS_I(inode).clump_blocks = be32_to_cpu(fork->clump_size) >> HFSPLUS_SB(sb).alloc_blksz_shift;
398 if (!HFSPLUS_I(inode).clump_blocks)
399 HFSPLUS_I(inode).clump_blocks = HFSPLUS_IS_RSRC(inode) ? HFSPLUS_SB(sb).rsrc_clump_blocks :
400 HFSPLUS_SB(sb).data_clump_blocks;
403 void hfsplus_inode_write_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
405 memcpy(&fork->extents, &HFSPLUS_I(inode).first_extents,
406 sizeof(hfsplus_extent_rec));
407 fork->total_size = cpu_to_be64(inode->i_size);
408 fork->total_blocks = cpu_to_be32(HFSPLUS_I(inode).alloc_blocks);
411 int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
413 hfsplus_cat_entry entry;
414 int res = 0;
415 u16 type;
417 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
419 HFSPLUS_I(inode).dev = 0;
420 if (type == HFSPLUS_FOLDER) {
421 struct hfsplus_cat_folder *folder = &entry.folder;
423 if (fd->entrylength < sizeof(struct hfsplus_cat_folder))
424 /* panic? */;
425 hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
426 sizeof(struct hfsplus_cat_folder));
427 hfsplus_get_perms(inode, &folder->permissions, 1);
428 inode->i_nlink = 1;
429 inode->i_size = 2 + be32_to_cpu(folder->valence);
430 inode->i_atime = hfsp_mt2ut(folder->access_date);
431 inode->i_mtime = hfsp_mt2ut(folder->content_mod_date);
432 inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date);
433 HFSPLUS_I(inode).create_date = folder->create_date;
434 HFSPLUS_I(inode).fs_blocks = 0;
435 inode->i_op = &hfsplus_dir_inode_operations;
436 inode->i_fop = &hfsplus_dir_operations;
437 } else if (type == HFSPLUS_FILE) {
438 struct hfsplus_cat_file *file = &entry.file;
440 if (fd->entrylength < sizeof(struct hfsplus_cat_file))
441 /* panic? */;
442 hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
443 sizeof(struct hfsplus_cat_file));
445 hfsplus_inode_read_fork(inode, HFSPLUS_IS_DATA(inode) ?
446 &file->data_fork : &file->rsrc_fork);
447 hfsplus_get_perms(inode, &file->permissions, 0);
448 inode->i_nlink = 1;
449 if (S_ISREG(inode->i_mode)) {
450 if (file->permissions.dev)
451 inode->i_nlink = be32_to_cpu(file->permissions.dev);
452 inode->i_op = &hfsplus_file_inode_operations;
453 inode->i_fop = &hfsplus_file_operations;
454 inode->i_mapping->a_ops = &hfsplus_aops;
455 } else if (S_ISLNK(inode->i_mode)) {
456 inode->i_op = &page_symlink_inode_operations;
457 inode->i_mapping->a_ops = &hfsplus_aops;
458 } else {
459 init_special_inode(inode, inode->i_mode,
460 be32_to_cpu(file->permissions.dev));
462 inode->i_atime = hfsp_mt2ut(file->access_date);
463 inode->i_mtime = hfsp_mt2ut(file->content_mod_date);
464 inode->i_ctime = hfsp_mt2ut(file->attribute_mod_date);
465 HFSPLUS_I(inode).create_date = file->create_date;
466 } else {
467 printk(KERN_ERR "hfs: bad catalog entry used to create inode\n");
468 res = -EIO;
470 return res;
473 int hfsplus_cat_write_inode(struct inode *inode)
475 struct inode *main_inode = inode;
476 struct hfs_find_data fd;
477 hfsplus_cat_entry entry;
479 if (HFSPLUS_IS_RSRC(inode))
480 main_inode = HFSPLUS_I(inode).rsrc_inode;
482 if (!main_inode->i_nlink)
483 return 0;
485 if (hfs_find_init(HFSPLUS_SB(main_inode->i_sb).cat_tree, &fd))
486 /* panic? */
487 return -EIO;
489 if (hfsplus_find_cat(main_inode->i_sb, main_inode->i_ino, &fd))
490 /* panic? */
491 goto out;
493 if (S_ISDIR(main_inode->i_mode)) {
494 struct hfsplus_cat_folder *folder = &entry.folder;
496 if (fd.entrylength < sizeof(struct hfsplus_cat_folder))
497 /* panic? */;
498 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
499 sizeof(struct hfsplus_cat_folder));
500 /* simple node checks? */
501 hfsplus_set_perms(inode, &folder->permissions);
502 folder->access_date = hfsp_ut2mt(inode->i_atime);
503 folder->content_mod_date = hfsp_ut2mt(inode->i_mtime);
504 folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
505 folder->valence = cpu_to_be32(inode->i_size - 2);
506 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
507 sizeof(struct hfsplus_cat_folder));
508 } else if (HFSPLUS_IS_RSRC(inode)) {
509 struct hfsplus_cat_file *file = &entry.file;
510 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
511 sizeof(struct hfsplus_cat_file));
512 hfsplus_inode_write_fork(inode, &file->rsrc_fork);
513 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
514 sizeof(struct hfsplus_cat_file));
515 } else {
516 struct hfsplus_cat_file *file = &entry.file;
518 if (fd.entrylength < sizeof(struct hfsplus_cat_file))
519 /* panic? */;
520 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
521 sizeof(struct hfsplus_cat_file));
522 hfsplus_inode_write_fork(inode, &file->data_fork);
523 if (S_ISREG(inode->i_mode))
524 HFSPLUS_I(inode).dev = inode->i_nlink;
525 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
526 HFSPLUS_I(inode).dev = kdev_t_to_nr(inode->i_rdev);
527 hfsplus_set_perms(inode, &file->permissions);
528 if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE)
529 file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
530 else
531 file->flags &= cpu_to_be16(~HFSPLUS_FILE_LOCKED);
532 file->access_date = hfsp_ut2mt(inode->i_atime);
533 file->content_mod_date = hfsp_ut2mt(inode->i_mtime);
534 file->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
535 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
536 sizeof(struct hfsplus_cat_file));
538 out:
539 hfs_find_exit(&fd);
540 return 0;