2 * inode.c - NILFS inode operations.
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
24 #include <linux/buffer_head.h>
25 #include <linux/gfp.h>
26 #include <linux/mpage.h>
27 #include <linux/writeback.h>
28 #include <linux/uio.h>
37 struct nilfs_iget_args
{
40 struct nilfs_root
*root
;
44 void nilfs_inode_add_blocks(struct inode
*inode
, int n
)
46 struct nilfs_root
*root
= NILFS_I(inode
)->i_root
;
48 inode_add_bytes(inode
, (1 << inode
->i_blkbits
) * n
);
50 atomic_add(n
, &root
->blocks_count
);
53 void nilfs_inode_sub_blocks(struct inode
*inode
, int n
)
55 struct nilfs_root
*root
= NILFS_I(inode
)->i_root
;
57 inode_sub_bytes(inode
, (1 << inode
->i_blkbits
) * n
);
59 atomic_sub(n
, &root
->blocks_count
);
63 * nilfs_get_block() - get a file block on the filesystem (callback function)
64 * @inode - inode struct of the target file
65 * @blkoff - file block number
66 * @bh_result - buffer head to be mapped on
67 * @create - indicate whether allocating the block or not when it has not
70 * This function does not issue actual read request of the specified data
71 * block. It is done by VFS.
73 int nilfs_get_block(struct inode
*inode
, sector_t blkoff
,
74 struct buffer_head
*bh_result
, int create
)
76 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
77 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
80 unsigned maxblocks
= bh_result
->b_size
>> inode
->i_blkbits
;
82 down_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
83 ret
= nilfs_bmap_lookup_contig(ii
->i_bmap
, blkoff
, &blknum
, maxblocks
);
84 up_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
85 if (ret
>= 0) { /* found */
86 map_bh(bh_result
, inode
->i_sb
, blknum
);
88 bh_result
->b_size
= (ret
<< inode
->i_blkbits
);
91 /* data block was not found */
92 if (ret
== -ENOENT
&& create
) {
93 struct nilfs_transaction_info ti
;
95 bh_result
->b_blocknr
= 0;
96 err
= nilfs_transaction_begin(inode
->i_sb
, &ti
, 1);
99 err
= nilfs_bmap_insert(ii
->i_bmap
, (unsigned long)blkoff
,
100 (unsigned long)bh_result
);
101 if (unlikely(err
!= 0)) {
102 if (err
== -EEXIST
) {
104 * The get_block() function could be called
105 * from multiple callers for an inode.
106 * However, the page having this block must
107 * be locked in this case.
110 "nilfs_get_block: a race condition "
111 "while inserting a data block. "
112 "(inode number=%lu, file block "
115 (unsigned long long)blkoff
);
118 nilfs_transaction_abort(inode
->i_sb
);
121 nilfs_mark_inode_dirty(inode
);
122 nilfs_transaction_commit(inode
->i_sb
); /* never fails */
123 /* Error handling should be detailed */
124 set_buffer_new(bh_result
);
125 set_buffer_delay(bh_result
);
126 map_bh(bh_result
, inode
->i_sb
, 0); /* dbn must be changed
128 } else if (ret
== -ENOENT
) {
129 /* not found is not error (e.g. hole); must return without
130 the mapped state flag. */
141 * nilfs_readpage() - implement readpage() method of nilfs_aops {}
142 * address_space_operations.
143 * @file - file struct of the file to be read
144 * @page - the page to be read
146 static int nilfs_readpage(struct file
*file
, struct page
*page
)
148 return mpage_readpage(page
, nilfs_get_block
);
152 * nilfs_readpages() - implement readpages() method of nilfs_aops {}
153 * address_space_operations.
154 * @file - file struct of the file to be read
155 * @mapping - address_space struct used for reading multiple pages
156 * @pages - the pages to be read
157 * @nr_pages - number of pages to be read
159 static int nilfs_readpages(struct file
*file
, struct address_space
*mapping
,
160 struct list_head
*pages
, unsigned nr_pages
)
162 return mpage_readpages(mapping
, pages
, nr_pages
, nilfs_get_block
);
165 static int nilfs_writepages(struct address_space
*mapping
,
166 struct writeback_control
*wbc
)
168 struct inode
*inode
= mapping
->host
;
171 if (wbc
->sync_mode
== WB_SYNC_ALL
)
172 err
= nilfs_construct_dsync_segment(inode
->i_sb
, inode
,
178 static int nilfs_writepage(struct page
*page
, struct writeback_control
*wbc
)
180 struct inode
*inode
= page
->mapping
->host
;
183 redirty_page_for_writepage(wbc
, page
);
186 if (wbc
->sync_mode
== WB_SYNC_ALL
) {
187 err
= nilfs_construct_segment(inode
->i_sb
);
190 } else if (wbc
->for_reclaim
)
191 nilfs_flush_segment(inode
->i_sb
, inode
->i_ino
);
196 static int nilfs_set_page_dirty(struct page
*page
)
198 int ret
= __set_page_dirty_buffers(page
);
201 struct inode
*inode
= page
->mapping
->host
;
202 unsigned nr_dirty
= 1 << (PAGE_SHIFT
- inode
->i_blkbits
);
204 nilfs_set_file_dirty(inode
, nr_dirty
);
209 static int nilfs_write_begin(struct file
*file
, struct address_space
*mapping
,
210 loff_t pos
, unsigned len
, unsigned flags
,
211 struct page
**pagep
, void **fsdata
)
214 struct inode
*inode
= mapping
->host
;
215 int err
= nilfs_transaction_begin(inode
->i_sb
, NULL
, 1);
220 err
= block_write_begin(mapping
, pos
, len
, flags
, pagep
,
223 loff_t isize
= mapping
->host
->i_size
;
224 if (pos
+ len
> isize
)
225 vmtruncate(mapping
->host
, isize
);
227 nilfs_transaction_abort(inode
->i_sb
);
232 static int nilfs_write_end(struct file
*file
, struct address_space
*mapping
,
233 loff_t pos
, unsigned len
, unsigned copied
,
234 struct page
*page
, void *fsdata
)
236 struct inode
*inode
= mapping
->host
;
237 unsigned start
= pos
& (PAGE_CACHE_SIZE
- 1);
241 nr_dirty
= nilfs_page_count_clean_buffers(page
, start
,
243 copied
= generic_write_end(file
, mapping
, pos
, len
, copied
, page
,
245 nilfs_set_file_dirty(inode
, nr_dirty
);
246 err
= nilfs_transaction_commit(inode
->i_sb
);
247 return err
? : copied
;
251 nilfs_direct_IO(int rw
, struct kiocb
*iocb
, const struct iovec
*iov
,
252 loff_t offset
, unsigned long nr_segs
)
254 struct file
*file
= iocb
->ki_filp
;
255 struct inode
*inode
= file
->f_mapping
->host
;
261 /* Needs synchronization with the cleaner */
262 size
= blockdev_direct_IO(rw
, iocb
, inode
, inode
->i_sb
->s_bdev
, iov
,
263 offset
, nr_segs
, nilfs_get_block
, NULL
);
266 * In case of error extending write may have instantiated a few
267 * blocks outside i_size. Trim these off again.
269 if (unlikely((rw
& WRITE
) && size
< 0)) {
270 loff_t isize
= i_size_read(inode
);
271 loff_t end
= offset
+ iov_length(iov
, nr_segs
);
274 vmtruncate(inode
, isize
);
280 const struct address_space_operations nilfs_aops
= {
281 .writepage
= nilfs_writepage
,
282 .readpage
= nilfs_readpage
,
283 .writepages
= nilfs_writepages
,
284 .set_page_dirty
= nilfs_set_page_dirty
,
285 .readpages
= nilfs_readpages
,
286 .write_begin
= nilfs_write_begin
,
287 .write_end
= nilfs_write_end
,
288 /* .releasepage = nilfs_releasepage, */
289 .invalidatepage
= block_invalidatepage
,
290 .direct_IO
= nilfs_direct_IO
,
291 .is_partially_uptodate
= block_is_partially_uptodate
,
294 struct inode
*nilfs_new_inode(struct inode
*dir
, int mode
)
296 struct super_block
*sb
= dir
->i_sb
;
297 struct the_nilfs
*nilfs
= sb
->s_fs_info
;
299 struct nilfs_inode_info
*ii
;
300 struct nilfs_root
*root
;
304 inode
= new_inode(sb
);
305 if (unlikely(!inode
))
308 mapping_set_gfp_mask(inode
->i_mapping
,
309 mapping_gfp_mask(inode
->i_mapping
) & ~__GFP_FS
);
311 root
= NILFS_I(dir
)->i_root
;
313 ii
->i_state
= 1 << NILFS_I_NEW
;
316 err
= nilfs_ifile_create_inode(root
->ifile
, &ino
, &ii
->i_bh
);
318 goto failed_ifile_create_inode
;
319 /* reference count of i_bh inherits from nilfs_mdt_read_block() */
321 atomic_inc(&root
->inodes_count
);
322 inode_init_owner(inode
, dir
, mode
);
324 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= CURRENT_TIME
;
326 if (S_ISREG(mode
) || S_ISDIR(mode
) || S_ISLNK(mode
)) {
327 err
= nilfs_bmap_read(ii
->i_bmap
, NULL
);
331 set_bit(NILFS_I_BMAP
, &ii
->i_state
);
332 /* No lock is needed; iget() ensures it. */
335 ii
->i_flags
= nilfs_mask_flags(
336 mode
, NILFS_I(dir
)->i_flags
& NILFS_FL_INHERITED
);
338 /* ii->i_file_acl = 0; */
339 /* ii->i_dir_acl = 0; */
340 ii
->i_dir_start_lookup
= 0;
341 nilfs_set_inode_flags(inode
);
342 spin_lock(&nilfs
->ns_next_gen_lock
);
343 inode
->i_generation
= nilfs
->ns_next_generation
++;
344 spin_unlock(&nilfs
->ns_next_gen_lock
);
345 insert_inode_hash(inode
);
347 err
= nilfs_init_acl(inode
, dir
);
349 goto failed_acl
; /* never occur. When supporting
350 nilfs_init_acl(), proper cancellation of
351 above jobs should be considered */
358 iput(inode
); /* raw_inode will be deleted through
359 generic_delete_inode() */
362 failed_ifile_create_inode
:
363 make_bad_inode(inode
);
364 iput(inode
); /* if i_nlink == 1, generic_forget_inode() will be
370 void nilfs_set_inode_flags(struct inode
*inode
)
372 unsigned int flags
= NILFS_I(inode
)->i_flags
;
374 inode
->i_flags
&= ~(S_SYNC
| S_APPEND
| S_IMMUTABLE
| S_NOATIME
|
376 if (flags
& FS_SYNC_FL
)
377 inode
->i_flags
|= S_SYNC
;
378 if (flags
& FS_APPEND_FL
)
379 inode
->i_flags
|= S_APPEND
;
380 if (flags
& FS_IMMUTABLE_FL
)
381 inode
->i_flags
|= S_IMMUTABLE
;
382 if (flags
& FS_NOATIME_FL
)
383 inode
->i_flags
|= S_NOATIME
;
384 if (flags
& FS_DIRSYNC_FL
)
385 inode
->i_flags
|= S_DIRSYNC
;
386 mapping_set_gfp_mask(inode
->i_mapping
,
387 mapping_gfp_mask(inode
->i_mapping
) & ~__GFP_FS
);
390 int nilfs_read_inode_common(struct inode
*inode
,
391 struct nilfs_inode
*raw_inode
)
393 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
396 inode
->i_mode
= le16_to_cpu(raw_inode
->i_mode
);
397 inode
->i_uid
= (uid_t
)le32_to_cpu(raw_inode
->i_uid
);
398 inode
->i_gid
= (gid_t
)le32_to_cpu(raw_inode
->i_gid
);
399 inode
->i_nlink
= le16_to_cpu(raw_inode
->i_links_count
);
400 inode
->i_size
= le64_to_cpu(raw_inode
->i_size
);
401 inode
->i_atime
.tv_sec
= le64_to_cpu(raw_inode
->i_mtime
);
402 inode
->i_ctime
.tv_sec
= le64_to_cpu(raw_inode
->i_ctime
);
403 inode
->i_mtime
.tv_sec
= le64_to_cpu(raw_inode
->i_mtime
);
404 inode
->i_atime
.tv_nsec
= le32_to_cpu(raw_inode
->i_mtime_nsec
);
405 inode
->i_ctime
.tv_nsec
= le32_to_cpu(raw_inode
->i_ctime_nsec
);
406 inode
->i_mtime
.tv_nsec
= le32_to_cpu(raw_inode
->i_mtime_nsec
);
407 if (inode
->i_nlink
== 0 && inode
->i_mode
== 0)
408 return -EINVAL
; /* this inode is deleted */
410 inode
->i_blocks
= le64_to_cpu(raw_inode
->i_blocks
);
411 ii
->i_flags
= le32_to_cpu(raw_inode
->i_flags
);
413 ii
->i_file_acl
= le32_to_cpu(raw_inode
->i_file_acl
);
414 ii
->i_dir_acl
= S_ISREG(inode
->i_mode
) ?
415 0 : le32_to_cpu(raw_inode
->i_dir_acl
);
417 ii
->i_dir_start_lookup
= 0;
418 inode
->i_generation
= le32_to_cpu(raw_inode
->i_generation
);
420 if (S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
421 S_ISLNK(inode
->i_mode
)) {
422 err
= nilfs_bmap_read(ii
->i_bmap
, raw_inode
);
425 set_bit(NILFS_I_BMAP
, &ii
->i_state
);
426 /* No lock is needed; iget() ensures it. */
431 static int __nilfs_read_inode(struct super_block
*sb
,
432 struct nilfs_root
*root
, unsigned long ino
,
435 struct the_nilfs
*nilfs
= sb
->s_fs_info
;
436 struct buffer_head
*bh
;
437 struct nilfs_inode
*raw_inode
;
440 down_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
441 err
= nilfs_ifile_get_inode_block(root
->ifile
, ino
, &bh
);
445 raw_inode
= nilfs_ifile_map_inode(root
->ifile
, ino
, bh
);
447 err
= nilfs_read_inode_common(inode
, raw_inode
);
451 if (S_ISREG(inode
->i_mode
)) {
452 inode
->i_op
= &nilfs_file_inode_operations
;
453 inode
->i_fop
= &nilfs_file_operations
;
454 inode
->i_mapping
->a_ops
= &nilfs_aops
;
455 } else if (S_ISDIR(inode
->i_mode
)) {
456 inode
->i_op
= &nilfs_dir_inode_operations
;
457 inode
->i_fop
= &nilfs_dir_operations
;
458 inode
->i_mapping
->a_ops
= &nilfs_aops
;
459 } else if (S_ISLNK(inode
->i_mode
)) {
460 inode
->i_op
= &nilfs_symlink_inode_operations
;
461 inode
->i_mapping
->a_ops
= &nilfs_aops
;
463 inode
->i_op
= &nilfs_special_inode_operations
;
465 inode
, inode
->i_mode
,
466 huge_decode_dev(le64_to_cpu(raw_inode
->i_device_code
)));
468 nilfs_ifile_unmap_inode(root
->ifile
, ino
, bh
);
470 up_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
471 nilfs_set_inode_flags(inode
);
475 nilfs_ifile_unmap_inode(root
->ifile
, ino
, bh
);
479 up_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
483 static int nilfs_iget_test(struct inode
*inode
, void *opaque
)
485 struct nilfs_iget_args
*args
= opaque
;
486 struct nilfs_inode_info
*ii
;
488 if (args
->ino
!= inode
->i_ino
|| args
->root
!= NILFS_I(inode
)->i_root
)
492 if (!test_bit(NILFS_I_GCINODE
, &ii
->i_state
))
493 return !args
->for_gc
;
495 return args
->for_gc
&& args
->cno
== ii
->i_cno
;
498 static int nilfs_iget_set(struct inode
*inode
, void *opaque
)
500 struct nilfs_iget_args
*args
= opaque
;
502 inode
->i_ino
= args
->ino
;
504 NILFS_I(inode
)->i_state
= 1 << NILFS_I_GCINODE
;
505 NILFS_I(inode
)->i_cno
= args
->cno
;
506 NILFS_I(inode
)->i_root
= NULL
;
508 if (args
->root
&& args
->ino
== NILFS_ROOT_INO
)
509 nilfs_get_root(args
->root
);
510 NILFS_I(inode
)->i_root
= args
->root
;
515 struct inode
*nilfs_ilookup(struct super_block
*sb
, struct nilfs_root
*root
,
518 struct nilfs_iget_args args
= {
519 .ino
= ino
, .root
= root
, .cno
= 0, .for_gc
= 0
522 return ilookup5(sb
, ino
, nilfs_iget_test
, &args
);
525 struct inode
*nilfs_iget_locked(struct super_block
*sb
, struct nilfs_root
*root
,
528 struct nilfs_iget_args args
= {
529 .ino
= ino
, .root
= root
, .cno
= 0, .for_gc
= 0
532 return iget5_locked(sb
, ino
, nilfs_iget_test
, nilfs_iget_set
, &args
);
535 struct inode
*nilfs_iget(struct super_block
*sb
, struct nilfs_root
*root
,
541 inode
= nilfs_iget_locked(sb
, root
, ino
);
542 if (unlikely(!inode
))
543 return ERR_PTR(-ENOMEM
);
544 if (!(inode
->i_state
& I_NEW
))
547 err
= __nilfs_read_inode(sb
, root
, ino
, inode
);
552 unlock_new_inode(inode
);
556 struct inode
*nilfs_iget_for_gc(struct super_block
*sb
, unsigned long ino
,
559 struct nilfs_iget_args args
= {
560 .ino
= ino
, .root
= NULL
, .cno
= cno
, .for_gc
= 1
565 inode
= iget5_locked(sb
, ino
, nilfs_iget_test
, nilfs_iget_set
, &args
);
566 if (unlikely(!inode
))
567 return ERR_PTR(-ENOMEM
);
568 if (!(inode
->i_state
& I_NEW
))
571 err
= nilfs_init_gcinode(inode
);
576 unlock_new_inode(inode
);
580 void nilfs_write_inode_common(struct inode
*inode
,
581 struct nilfs_inode
*raw_inode
, int has_bmap
)
583 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
585 raw_inode
->i_mode
= cpu_to_le16(inode
->i_mode
);
586 raw_inode
->i_uid
= cpu_to_le32(inode
->i_uid
);
587 raw_inode
->i_gid
= cpu_to_le32(inode
->i_gid
);
588 raw_inode
->i_links_count
= cpu_to_le16(inode
->i_nlink
);
589 raw_inode
->i_size
= cpu_to_le64(inode
->i_size
);
590 raw_inode
->i_ctime
= cpu_to_le64(inode
->i_ctime
.tv_sec
);
591 raw_inode
->i_mtime
= cpu_to_le64(inode
->i_mtime
.tv_sec
);
592 raw_inode
->i_ctime_nsec
= cpu_to_le32(inode
->i_ctime
.tv_nsec
);
593 raw_inode
->i_mtime_nsec
= cpu_to_le32(inode
->i_mtime
.tv_nsec
);
594 raw_inode
->i_blocks
= cpu_to_le64(inode
->i_blocks
);
596 raw_inode
->i_flags
= cpu_to_le32(ii
->i_flags
);
597 raw_inode
->i_generation
= cpu_to_le32(inode
->i_generation
);
599 if (NILFS_ROOT_METADATA_FILE(inode
->i_ino
)) {
600 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
602 /* zero-fill unused portion in the case of super root block */
603 raw_inode
->i_xattr
= 0;
604 raw_inode
->i_pad
= 0;
605 memset((void *)raw_inode
+ sizeof(*raw_inode
), 0,
606 nilfs
->ns_inode_size
- sizeof(*raw_inode
));
610 nilfs_bmap_write(ii
->i_bmap
, raw_inode
);
611 else if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
))
612 raw_inode
->i_device_code
=
613 cpu_to_le64(huge_encode_dev(inode
->i_rdev
));
614 /* When extending inode, nilfs->ns_inode_size should be checked
615 for substitutions of appended fields */
618 void nilfs_update_inode(struct inode
*inode
, struct buffer_head
*ibh
)
620 ino_t ino
= inode
->i_ino
;
621 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
622 struct inode
*ifile
= ii
->i_root
->ifile
;
623 struct nilfs_inode
*raw_inode
;
625 raw_inode
= nilfs_ifile_map_inode(ifile
, ino
, ibh
);
627 if (test_and_clear_bit(NILFS_I_NEW
, &ii
->i_state
))
628 memset(raw_inode
, 0, NILFS_MDT(ifile
)->mi_entry_size
);
629 set_bit(NILFS_I_INODE_DIRTY
, &ii
->i_state
);
631 nilfs_write_inode_common(inode
, raw_inode
, 0);
632 /* XXX: call with has_bmap = 0 is a workaround to avoid
633 deadlock of bmap. This delays update of i_bmap to just
635 nilfs_ifile_unmap_inode(ifile
, ino
, ibh
);
638 #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */
640 static void nilfs_truncate_bmap(struct nilfs_inode_info
*ii
,
646 if (!test_bit(NILFS_I_BMAP
, &ii
->i_state
))
649 ret
= nilfs_bmap_last_key(ii
->i_bmap
, &b
);
658 b
-= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS
, b
- from
);
659 ret
= nilfs_bmap_truncate(ii
->i_bmap
, b
);
660 nilfs_relax_pressure_in_lock(ii
->vfs_inode
.i_sb
);
661 if (!ret
|| (ret
== -ENOMEM
&&
662 nilfs_bmap_truncate(ii
->i_bmap
, b
) == 0))
666 nilfs_warning(ii
->vfs_inode
.i_sb
, __func__
,
667 "failed to truncate bmap (ino=%lu, err=%d)",
668 ii
->vfs_inode
.i_ino
, ret
);
671 void nilfs_truncate(struct inode
*inode
)
673 unsigned long blkoff
;
674 unsigned int blocksize
;
675 struct nilfs_transaction_info ti
;
676 struct super_block
*sb
= inode
->i_sb
;
677 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
679 if (!test_bit(NILFS_I_BMAP
, &ii
->i_state
))
681 if (IS_APPEND(inode
) || IS_IMMUTABLE(inode
))
684 blocksize
= sb
->s_blocksize
;
685 blkoff
= (inode
->i_size
+ blocksize
- 1) >> sb
->s_blocksize_bits
;
686 nilfs_transaction_begin(sb
, &ti
, 0); /* never fails */
688 block_truncate_page(inode
->i_mapping
, inode
->i_size
, nilfs_get_block
);
690 nilfs_truncate_bmap(ii
, blkoff
);
692 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
694 nilfs_set_transaction_flag(NILFS_TI_SYNC
);
696 nilfs_mark_inode_dirty(inode
);
697 nilfs_set_file_dirty(inode
, 0);
698 nilfs_transaction_commit(sb
);
699 /* May construct a logical segment and may fail in sync mode.
700 But truncate has no return value. */
703 static void nilfs_clear_inode(struct inode
*inode
)
705 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
706 struct nilfs_mdt_info
*mdi
= NILFS_MDT(inode
);
709 * Free resources allocated in nilfs_read_inode(), here.
711 BUG_ON(!list_empty(&ii
->i_dirty
));
715 if (mdi
&& mdi
->mi_palloc_cache
)
716 nilfs_palloc_destroy_cache(inode
);
718 if (test_bit(NILFS_I_BMAP
, &ii
->i_state
))
719 nilfs_bmap_clear(ii
->i_bmap
);
721 nilfs_btnode_cache_clear(&ii
->i_btnode_cache
);
723 if (ii
->i_root
&& inode
->i_ino
== NILFS_ROOT_INO
)
724 nilfs_put_root(ii
->i_root
);
727 void nilfs_evict_inode(struct inode
*inode
)
729 struct nilfs_transaction_info ti
;
730 struct super_block
*sb
= inode
->i_sb
;
731 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
734 if (inode
->i_nlink
|| !ii
->i_root
|| unlikely(is_bad_inode(inode
))) {
735 if (inode
->i_data
.nrpages
)
736 truncate_inode_pages(&inode
->i_data
, 0);
737 end_writeback(inode
);
738 nilfs_clear_inode(inode
);
741 nilfs_transaction_begin(sb
, &ti
, 0); /* never fails */
743 if (inode
->i_data
.nrpages
)
744 truncate_inode_pages(&inode
->i_data
, 0);
746 /* TODO: some of the following operations may fail. */
747 nilfs_truncate_bmap(ii
, 0);
748 nilfs_mark_inode_dirty(inode
);
749 end_writeback(inode
);
751 ret
= nilfs_ifile_delete_inode(ii
->i_root
->ifile
, inode
->i_ino
);
753 atomic_dec(&ii
->i_root
->inodes_count
);
755 nilfs_clear_inode(inode
);
758 nilfs_set_transaction_flag(NILFS_TI_SYNC
);
759 nilfs_transaction_commit(sb
);
760 /* May construct a logical segment and may fail in sync mode.
761 But delete_inode has no return value. */
764 int nilfs_setattr(struct dentry
*dentry
, struct iattr
*iattr
)
766 struct nilfs_transaction_info ti
;
767 struct inode
*inode
= dentry
->d_inode
;
768 struct super_block
*sb
= inode
->i_sb
;
771 err
= inode_change_ok(inode
, iattr
);
775 err
= nilfs_transaction_begin(sb
, &ti
, 0);
779 if ((iattr
->ia_valid
& ATTR_SIZE
) &&
780 iattr
->ia_size
!= i_size_read(inode
)) {
781 err
= vmtruncate(inode
, iattr
->ia_size
);
786 setattr_copy(inode
, iattr
);
787 mark_inode_dirty(inode
);
789 if (iattr
->ia_valid
& ATTR_MODE
) {
790 err
= nilfs_acl_chmod(inode
);
795 return nilfs_transaction_commit(sb
);
798 nilfs_transaction_abort(sb
);
802 int nilfs_permission(struct inode
*inode
, int mask
, unsigned int flags
)
804 struct nilfs_root
*root
= NILFS_I(inode
)->i_root
;
805 if ((mask
& MAY_WRITE
) && root
&&
806 root
->cno
!= NILFS_CPTREE_CURRENT_CNO
)
807 return -EROFS
; /* snapshot is not writable */
809 return generic_permission(inode
, mask
, flags
, NULL
);
812 int nilfs_load_inode_block(struct inode
*inode
, struct buffer_head
**pbh
)
814 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
815 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
818 spin_lock(&nilfs
->ns_inode_lock
);
819 if (ii
->i_bh
== NULL
) {
820 spin_unlock(&nilfs
->ns_inode_lock
);
821 err
= nilfs_ifile_get_inode_block(ii
->i_root
->ifile
,
825 spin_lock(&nilfs
->ns_inode_lock
);
826 if (ii
->i_bh
== NULL
)
836 spin_unlock(&nilfs
->ns_inode_lock
);
840 int nilfs_inode_dirty(struct inode
*inode
)
842 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
843 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
846 if (!list_empty(&ii
->i_dirty
)) {
847 spin_lock(&nilfs
->ns_inode_lock
);
848 ret
= test_bit(NILFS_I_DIRTY
, &ii
->i_state
) ||
849 test_bit(NILFS_I_BUSY
, &ii
->i_state
);
850 spin_unlock(&nilfs
->ns_inode_lock
);
855 int nilfs_set_file_dirty(struct inode
*inode
, unsigned nr_dirty
)
857 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
858 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
860 atomic_add(nr_dirty
, &nilfs
->ns_ndirtyblks
);
862 if (test_and_set_bit(NILFS_I_DIRTY
, &ii
->i_state
))
865 spin_lock(&nilfs
->ns_inode_lock
);
866 if (!test_bit(NILFS_I_QUEUED
, &ii
->i_state
) &&
867 !test_bit(NILFS_I_BUSY
, &ii
->i_state
)) {
868 /* Because this routine may race with nilfs_dispose_list(),
869 we have to check NILFS_I_QUEUED here, too. */
870 if (list_empty(&ii
->i_dirty
) && igrab(inode
) == NULL
) {
871 /* This will happen when somebody is freeing
873 nilfs_warning(inode
->i_sb
, __func__
,
874 "cannot get inode (ino=%lu)\n",
876 spin_unlock(&nilfs
->ns_inode_lock
);
877 return -EINVAL
; /* NILFS_I_DIRTY may remain for
880 list_move_tail(&ii
->i_dirty
, &nilfs
->ns_dirty_files
);
881 set_bit(NILFS_I_QUEUED
, &ii
->i_state
);
883 spin_unlock(&nilfs
->ns_inode_lock
);
887 int nilfs_mark_inode_dirty(struct inode
*inode
)
889 struct buffer_head
*ibh
;
892 err
= nilfs_load_inode_block(inode
, &ibh
);
894 nilfs_warning(inode
->i_sb
, __func__
,
895 "failed to reget inode block.\n");
898 nilfs_update_inode(inode
, ibh
);
899 mark_buffer_dirty(ibh
);
900 nilfs_mdt_mark_dirty(NILFS_I(inode
)->i_root
->ifile
);
906 * nilfs_dirty_inode - reflect changes on given inode to an inode block.
907 * @inode: inode of the file to be registered.
909 * nilfs_dirty_inode() loads a inode block containing the specified
910 * @inode and copies data from a nilfs_inode to a corresponding inode
911 * entry in the inode block. This operation is excluded from the segment
912 * construction. This function can be called both as a single operation
913 * and as a part of indivisible file operations.
915 void nilfs_dirty_inode(struct inode
*inode
, int flags
)
917 struct nilfs_transaction_info ti
;
918 struct nilfs_mdt_info
*mdi
= NILFS_MDT(inode
);
920 if (is_bad_inode(inode
)) {
921 nilfs_warning(inode
->i_sb
, __func__
,
922 "tried to mark bad_inode dirty. ignored.\n");
927 nilfs_mdt_mark_dirty(inode
);
930 nilfs_transaction_begin(inode
->i_sb
, &ti
, 0);
931 nilfs_mark_inode_dirty(inode
);
932 nilfs_transaction_commit(inode
->i_sb
); /* never fails */
935 int nilfs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
936 __u64 start
, __u64 len
)
938 struct the_nilfs
*nilfs
= inode
->i_sb
->s_fs_info
;
939 __u64 logical
= 0, phys
= 0, size
= 0;
942 sector_t blkoff
, end_blkoff
;
943 sector_t delalloc_blkoff
;
944 unsigned long delalloc_blklen
;
945 unsigned int blkbits
= inode
->i_blkbits
;
948 ret
= fiemap_check_flags(fieinfo
, FIEMAP_FLAG_SYNC
);
952 mutex_lock(&inode
->i_mutex
);
954 isize
= i_size_read(inode
);
956 blkoff
= start
>> blkbits
;
957 end_blkoff
= (start
+ len
- 1) >> blkbits
;
959 delalloc_blklen
= nilfs_find_uncommitted_extent(inode
, blkoff
,
964 unsigned int maxblocks
;
966 if (delalloc_blklen
&& blkoff
== delalloc_blkoff
) {
968 /* End of the current extent */
969 ret
= fiemap_fill_next_extent(
970 fieinfo
, logical
, phys
, size
, flags
);
974 if (blkoff
> end_blkoff
)
977 flags
= FIEMAP_EXTENT_MERGED
| FIEMAP_EXTENT_DELALLOC
;
978 logical
= blkoff
<< blkbits
;
980 size
= delalloc_blklen
<< blkbits
;
982 blkoff
= delalloc_blkoff
+ delalloc_blklen
;
983 delalloc_blklen
= nilfs_find_uncommitted_extent(
984 inode
, blkoff
, &delalloc_blkoff
);
989 * Limit the number of blocks that we look up so as
990 * not to get into the next delayed allocation extent.
994 maxblocks
= min_t(sector_t
, delalloc_blkoff
- blkoff
,
998 down_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
999 n
= nilfs_bmap_lookup_contig(
1000 NILFS_I(inode
)->i_bmap
, blkoff
, &blkphy
, maxblocks
);
1001 up_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
1006 if (unlikely(n
!= -ENOENT
))
1011 past_eof
= ((blkoff
<< blkbits
) >= isize
);
1014 /* End of the current extent */
1017 flags
|= FIEMAP_EXTENT_LAST
;
1019 ret
= fiemap_fill_next_extent(
1020 fieinfo
, logical
, phys
, size
, flags
);
1025 if (blkoff
> end_blkoff
|| past_eof
)
1029 if (phys
&& blkphy
<< blkbits
== phys
+ size
) {
1030 /* The current extent goes on */
1031 size
+= n
<< blkbits
;
1033 /* Terminate the current extent */
1034 ret
= fiemap_fill_next_extent(
1035 fieinfo
, logical
, phys
, size
,
1037 if (ret
|| blkoff
> end_blkoff
)
1040 /* Start another extent */
1041 flags
= FIEMAP_EXTENT_MERGED
;
1042 logical
= blkoff
<< blkbits
;
1043 phys
= blkphy
<< blkbits
;
1044 size
= n
<< blkbits
;
1047 /* Start a new extent */
1048 flags
= FIEMAP_EXTENT_MERGED
;
1049 logical
= blkoff
<< blkbits
;
1050 phys
= blkphy
<< blkbits
;
1051 size
= n
<< blkbits
;
1058 /* If ret is 1 then we just hit the end of the extent array */
1062 mutex_unlock(&inode
->i_mutex
);