2 * mdt.c - meta data file for NILFS
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
23 #include <linux/buffer_head.h>
24 #include <linux/mpage.h>
26 #include <linux/writeback.h>
27 #include <linux/backing-dev.h>
28 #include <linux/swap.h>
29 #include <linux/slab.h>
37 #define NILFS_MDT_MAX_RA_BLOCKS (16 - 1)
39 #define INIT_UNUSED_INODE_FIELDS
42 nilfs_mdt_insert_new_block(struct inode
*inode
, unsigned long block
,
43 struct buffer_head
*bh
,
44 void (*init_block
)(struct inode
*,
45 struct buffer_head
*, void *))
47 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
51 /* Caller exclude read accesses using page lock */
53 /* set_buffer_new(bh); */
56 ret
= nilfs_bmap_insert(ii
->i_bmap
, block
, (unsigned long)bh
);
60 set_buffer_mapped(bh
);
62 kaddr
= kmap_atomic(bh
->b_page
, KM_USER0
);
63 memset(kaddr
+ bh_offset(bh
), 0, 1 << inode
->i_blkbits
);
65 init_block(inode
, bh
, kaddr
);
66 flush_dcache_page(bh
->b_page
);
67 kunmap_atomic(kaddr
, KM_USER0
);
69 set_buffer_uptodate(bh
);
70 nilfs_mark_buffer_dirty(bh
);
71 nilfs_mdt_mark_dirty(inode
);
75 static int nilfs_mdt_create_block(struct inode
*inode
, unsigned long block
,
76 struct buffer_head
**out_bh
,
77 void (*init_block
)(struct inode
*,
81 struct the_nilfs
*nilfs
= NILFS_MDT(inode
)->mi_nilfs
;
82 struct super_block
*sb
= inode
->i_sb
;
83 struct nilfs_transaction_info ti
;
84 struct buffer_head
*bh
;
89 * Make sure this function is not called from any
92 if (!nilfs
->ns_writer
) {
97 sb
= nilfs
->ns_writer
->s_super
;
100 nilfs_transaction_begin(sb
, &ti
, 0);
103 bh
= nilfs_grab_buffer(inode
, inode
->i_mapping
, block
, 0);
108 if (buffer_uptodate(bh
))
112 if (buffer_uptodate(bh
))
115 bh
->b_bdev
= nilfs
->ns_bdev
;
116 err
= nilfs_mdt_insert_new_block(inode
, block
, bh
, init_block
);
123 unlock_page(bh
->b_page
);
124 page_cache_release(bh
->b_page
);
129 err
= nilfs_transaction_commit(sb
);
131 nilfs_transaction_abort(sb
);
137 nilfs_mdt_submit_block(struct inode
*inode
, unsigned long blkoff
,
138 int mode
, struct buffer_head
**out_bh
)
140 struct buffer_head
*bh
;
144 bh
= nilfs_grab_buffer(inode
, inode
->i_mapping
, blkoff
, 0);
148 ret
= -EEXIST
; /* internal code */
149 if (buffer_uptodate(bh
))
153 if (!trylock_buffer(bh
)) {
157 } else /* mode == READ */
160 if (buffer_uptodate(bh
)) {
165 ret
= nilfs_bmap_lookup(NILFS_I(inode
)->i_bmap
, blkoff
, &blknum
);
170 bh
->b_bdev
= NILFS_MDT(inode
)->mi_nilfs
->ns_bdev
;
171 bh
->b_blocknr
= (sector_t
)blknum
;
172 set_buffer_mapped(bh
);
174 bh
->b_end_io
= end_buffer_read_sync
;
183 unlock_page(bh
->b_page
);
184 page_cache_release(bh
->b_page
);
190 static int nilfs_mdt_read_block(struct inode
*inode
, unsigned long block
,
191 int readahead
, struct buffer_head
**out_bh
)
193 struct buffer_head
*first_bh
, *bh
;
194 unsigned long blkoff
;
195 int i
, nr_ra_blocks
= NILFS_MDT_MAX_RA_BLOCKS
;
198 err
= nilfs_mdt_submit_block(inode
, block
, READ
, &first_bh
);
199 if (err
== -EEXIST
) /* internal code */
207 for (i
= 0; i
< nr_ra_blocks
; i
++, blkoff
++) {
208 err
= nilfs_mdt_submit_block(inode
, blkoff
, READA
, &bh
);
209 if (likely(!err
|| err
== -EEXIST
))
211 else if (err
!= -EBUSY
)
213 /* abort readahead if bmap lookup failed */
214 if (!buffer_locked(first_bh
))
219 wait_on_buffer(first_bh
);
223 if (!buffer_uptodate(first_bh
))
236 * nilfs_mdt_get_block - read or create a buffer on meta data file.
237 * @inode: inode of the meta data file
238 * @blkoff: block offset
239 * @create: create flag
240 * @init_block: initializer used for newly allocated block
241 * @out_bh: output of a pointer to the buffer_head
243 * nilfs_mdt_get_block() looks up the specified buffer and tries to create
244 * a new buffer if @create is not zero. On success, the returned buffer is
245 * assured to be either existing or formatted using a buffer lock on success.
246 * @out_bh is substituted only when zero is returned.
248 * Return Value: On success, it returns 0. On error, the following negative
249 * error code is returned.
251 * %-ENOMEM - Insufficient memory available.
255 * %-ENOENT - the specified block does not exist (hole block)
257 * %-EINVAL - bmap is broken. (the caller should call nilfs_error())
259 * %-EROFS - Read only filesystem (for create mode)
261 int nilfs_mdt_get_block(struct inode
*inode
, unsigned long blkoff
, int create
,
262 void (*init_block
)(struct inode
*,
263 struct buffer_head
*, void *),
264 struct buffer_head
**out_bh
)
268 /* Should be rewritten with merging nilfs_mdt_read_block() */
270 ret
= nilfs_mdt_read_block(inode
, blkoff
, !create
, out_bh
);
271 if (!create
|| ret
!= -ENOENT
)
274 ret
= nilfs_mdt_create_block(inode
, blkoff
, out_bh
, init_block
);
275 if (unlikely(ret
== -EEXIST
)) {
276 /* create = 0; */ /* limit read-create loop retries */
283 * nilfs_mdt_delete_block - make a hole on the meta data file.
284 * @inode: inode of the meta data file
285 * @block: block offset
287 * Return Value: On success, zero is returned.
288 * On error, one of the following negative error code is returned.
290 * %-ENOMEM - Insufficient memory available.
294 * %-EINVAL - bmap is broken. (the caller should call nilfs_error())
296 int nilfs_mdt_delete_block(struct inode
*inode
, unsigned long block
)
298 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
301 err
= nilfs_bmap_delete(ii
->i_bmap
, block
);
302 if (!err
|| err
== -ENOENT
) {
303 nilfs_mdt_mark_dirty(inode
);
304 nilfs_mdt_forget_block(inode
, block
);
310 * nilfs_mdt_forget_block - discard dirty state and try to remove the page
311 * @inode: inode of the meta data file
312 * @block: block offset
314 * nilfs_mdt_forget_block() clears a dirty flag of the specified buffer, and
315 * tries to release the page including the buffer from a page cache.
317 * Return Value: On success, 0 is returned. On error, one of the following
318 * negative error code is returned.
320 * %-EBUSY - page has an active buffer.
322 * %-ENOENT - page cache has no page addressed by the offset.
324 int nilfs_mdt_forget_block(struct inode
*inode
, unsigned long block
)
326 pgoff_t index
= (pgoff_t
)block
>>
327 (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
329 unsigned long first_block
;
333 page
= find_lock_page(inode
->i_mapping
, index
);
337 wait_on_page_writeback(page
);
339 first_block
= (unsigned long)index
<<
340 (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
341 if (page_has_buffers(page
)) {
342 struct buffer_head
*bh
;
344 bh
= nilfs_page_get_nth_block(page
, block
- first_block
);
345 nilfs_forget_buffer(bh
);
347 still_dirty
= PageDirty(page
);
349 page_cache_release(page
);
352 invalidate_inode_pages2_range(inode
->i_mapping
, index
, index
) != 0)
358 * nilfs_mdt_mark_block_dirty - mark a block on the meta data file dirty.
359 * @inode: inode of the meta data file
360 * @block: block offset
362 * Return Value: On success, it returns 0. On error, the following negative
363 * error code is returned.
365 * %-ENOMEM - Insufficient memory available.
369 * %-ENOENT - the specified block does not exist (hole block)
371 * %-EINVAL - bmap is broken. (the caller should call nilfs_error())
373 int nilfs_mdt_mark_block_dirty(struct inode
*inode
, unsigned long block
)
375 struct buffer_head
*bh
;
378 err
= nilfs_mdt_read_block(inode
, block
, 0, &bh
);
381 nilfs_mark_buffer_dirty(bh
);
382 nilfs_mdt_mark_dirty(inode
);
387 int nilfs_mdt_fetch_dirty(struct inode
*inode
)
389 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
391 if (nilfs_bmap_test_and_clear_dirty(ii
->i_bmap
)) {
392 set_bit(NILFS_I_DIRTY
, &ii
->i_state
);
395 return test_bit(NILFS_I_DIRTY
, &ii
->i_state
);
399 nilfs_mdt_write_page(struct page
*page
, struct writeback_control
*wbc
)
402 struct super_block
*sb
;
403 struct the_nilfs
*nilfs
;
404 struct nilfs_sb_info
*writer
= NULL
;
407 redirty_page_for_writepage(wbc
, page
);
410 inode
= page
->mapping
->host
;
415 nilfs
= NILFS_MDT(inode
)->mi_nilfs
;
417 if (page
->mapping
->assoc_mapping
)
418 return 0; /* Do not request flush for shadow page cache */
420 down_read(&nilfs
->ns_writer_sem
);
421 writer
= nilfs
->ns_writer
;
423 up_read(&nilfs
->ns_writer_sem
);
426 sb
= writer
->s_super
;
429 if (wbc
->sync_mode
== WB_SYNC_ALL
)
430 err
= nilfs_construct_segment(sb
);
431 else if (wbc
->for_reclaim
)
432 nilfs_flush_segment(sb
, inode
->i_ino
);
435 up_read(&nilfs
->ns_writer_sem
);
440 static const struct address_space_operations def_mdt_aops
= {
441 .writepage
= nilfs_mdt_write_page
,
442 .sync_page
= block_sync_page
,
445 static const struct inode_operations def_mdt_iops
;
446 static const struct file_operations def_mdt_fops
;
449 int nilfs_mdt_init(struct inode
*inode
, struct the_nilfs
*nilfs
,
450 gfp_t gfp_mask
, size_t objsz
)
452 struct nilfs_mdt_info
*mi
;
454 mi
= kzalloc(max(sizeof(*mi
), objsz
), GFP_NOFS
);
458 mi
->mi_nilfs
= nilfs
;
459 init_rwsem(&mi
->mi_sem
);
460 inode
->i_private
= mi
;
462 inode
->i_mode
= S_IFREG
;
463 mapping_set_gfp_mask(inode
->i_mapping
, gfp_mask
);
464 inode
->i_mapping
->backing_dev_info
= nilfs
->ns_bdi
;
470 * NILFS2 uses pseudo inodes for meta data files such as DAT, cpfile, sufile,
471 * ifile, or gcinodes. This allows the B-tree code and segment constructor
472 * to treat them like regular files, and this helps to simplify the
474 * On the other hand, some of the pseudo inodes have an irregular point:
475 * They don't have valid inode->i_sb pointer because their lifetimes are
476 * longer than those of the super block structs; they may continue for
477 * several consecutive mounts/umounts. This would need discussions.
480 * nilfs_mdt_new_common - allocate a pseudo inode for metadata file
481 * @nilfs: nilfs object
482 * @sb: super block instance the metadata file belongs to
486 nilfs_mdt_new_common(struct the_nilfs
*nilfs
, struct super_block
*sb
,
489 struct inode
*inode
= nilfs_alloc_inode_common(nilfs
);
494 struct address_space
* const mapping
= &inode
->i_data
;
496 inode
->i_sb
= sb
; /* sb may be NULL for some meta data files */
497 inode
->i_blkbits
= nilfs
->ns_blocksize_bits
;
499 atomic_set(&inode
->i_count
, 1);
503 #ifdef INIT_UNUSED_INODE_FIELDS
504 atomic_set(&inode
->i_writecount
, 0);
508 inode
->i_generation
= 0;
510 memset(&inode
->i_dquot
, 0, sizeof(inode
->i_dquot
));
512 inode
->i_pipe
= NULL
;
513 inode
->i_bdev
= NULL
;
514 inode
->i_cdev
= NULL
;
516 #ifdef CONFIG_SECURITY
517 inode
->i_security
= NULL
;
519 inode
->dirtied_when
= 0;
521 INIT_LIST_HEAD(&inode
->i_list
);
522 INIT_LIST_HEAD(&inode
->i_sb_list
);
526 spin_lock_init(&inode
->i_lock
);
527 mutex_init(&inode
->i_mutex
);
528 init_rwsem(&inode
->i_alloc_sem
);
530 mapping
->host
= NULL
; /* instead of inode */
532 mapping
->assoc_mapping
= NULL
;
534 inode
->i_mapping
= mapping
;
540 struct inode
*nilfs_mdt_new(struct the_nilfs
*nilfs
, struct super_block
*sb
,
541 ino_t ino
, size_t objsz
)
545 inode
= nilfs_mdt_new_common(nilfs
, sb
, ino
);
549 if (nilfs_mdt_init(inode
, nilfs
, NILFS_MDT_GFP
, objsz
) < 0) {
550 nilfs_destroy_inode(inode
);
553 inode
->i_op
= &def_mdt_iops
;
554 inode
->i_fop
= &def_mdt_fops
;
555 inode
->i_mapping
->a_ops
= &def_mdt_aops
;
559 void nilfs_mdt_set_entry_size(struct inode
*inode
, unsigned entry_size
,
560 unsigned header_size
)
562 struct nilfs_mdt_info
*mi
= NILFS_MDT(inode
);
564 mi
->mi_entry_size
= entry_size
;
565 mi
->mi_entries_per_block
= (1 << inode
->i_blkbits
) / entry_size
;
566 mi
->mi_first_entry_offset
= DIV_ROUND_UP(header_size
, entry_size
);
569 void nilfs_mdt_set_shadow(struct inode
*orig
, struct inode
*shadow
)
571 shadow
->i_mapping
->assoc_mapping
= orig
->i_mapping
;
572 NILFS_I(shadow
)->i_btnode_cache
.assoc_mapping
=
573 &NILFS_I(orig
)->i_btnode_cache
;
576 static const struct address_space_operations shadow_map_aops
= {
577 .sync_page
= block_sync_page
,
581 * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file
582 * @inode: inode of the metadata file
583 * @shadow: shadow mapping
585 int nilfs_mdt_setup_shadow_map(struct inode
*inode
,
586 struct nilfs_shadow_map
*shadow
)
588 struct nilfs_mdt_info
*mi
= NILFS_MDT(inode
);
589 struct backing_dev_info
*bdi
= NILFS_I_NILFS(inode
)->ns_bdi
;
591 INIT_LIST_HEAD(&shadow
->frozen_buffers
);
592 nilfs_mapping_init_once(&shadow
->frozen_data
);
593 nilfs_mapping_init(&shadow
->frozen_data
, bdi
, &shadow_map_aops
);
594 nilfs_mapping_init_once(&shadow
->frozen_btnodes
);
595 nilfs_mapping_init(&shadow
->frozen_btnodes
, bdi
, &shadow_map_aops
);
596 mi
->mi_shadow
= shadow
;
601 * nilfs_mdt_save_to_shadow_map - copy bmap and dirty pages to shadow map
602 * @inode: inode of the metadata file
604 int nilfs_mdt_save_to_shadow_map(struct inode
*inode
)
606 struct nilfs_mdt_info
*mi
= NILFS_MDT(inode
);
607 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
608 struct nilfs_shadow_map
*shadow
= mi
->mi_shadow
;
611 ret
= nilfs_copy_dirty_pages(&shadow
->frozen_data
, inode
->i_mapping
);
615 ret
= nilfs_copy_dirty_pages(&shadow
->frozen_btnodes
,
616 &ii
->i_btnode_cache
);
620 nilfs_bmap_save(ii
->i_bmap
, &shadow
->bmap_store
);
625 int nilfs_mdt_freeze_buffer(struct inode
*inode
, struct buffer_head
*bh
)
627 struct nilfs_shadow_map
*shadow
= NILFS_MDT(inode
)->mi_shadow
;
628 struct buffer_head
*bh_frozen
;
630 int blkbits
= inode
->i_blkbits
;
633 page
= grab_cache_page(&shadow
->frozen_data
, bh
->b_page
->index
);
637 if (!page_has_buffers(page
))
638 create_empty_buffers(page
, 1 << blkbits
, 0);
640 bh_frozen
= nilfs_page_get_nth_block(page
, bh_offset(bh
) >> blkbits
);
642 if (!buffer_uptodate(bh_frozen
))
643 nilfs_copy_buffer(bh_frozen
, bh
);
644 if (list_empty(&bh_frozen
->b_assoc_buffers
)) {
645 list_add_tail(&bh_frozen
->b_assoc_buffers
,
646 &shadow
->frozen_buffers
);
647 set_buffer_nilfs_redirected(bh
);
649 brelse(bh_frozen
); /* already frozen */
654 page_cache_release(page
);
659 nilfs_mdt_get_frozen_buffer(struct inode
*inode
, struct buffer_head
*bh
)
661 struct nilfs_shadow_map
*shadow
= NILFS_MDT(inode
)->mi_shadow
;
662 struct buffer_head
*bh_frozen
= NULL
;
666 page
= find_lock_page(&shadow
->frozen_data
, bh
->b_page
->index
);
668 if (page_has_buffers(page
)) {
669 n
= bh_offset(bh
) >> inode
->i_blkbits
;
670 bh_frozen
= nilfs_page_get_nth_block(page
, n
);
673 page_cache_release(page
);
678 static void nilfs_release_frozen_buffers(struct nilfs_shadow_map
*shadow
)
680 struct list_head
*head
= &shadow
->frozen_buffers
;
681 struct buffer_head
*bh
;
683 while (!list_empty(head
)) {
684 bh
= list_first_entry(head
, struct buffer_head
,
686 list_del_init(&bh
->b_assoc_buffers
);
687 brelse(bh
); /* drop ref-count to make it releasable */
692 * nilfs_mdt_restore_from_shadow_map - restore dirty pages and bmap state
693 * @inode: inode of the metadata file
695 void nilfs_mdt_restore_from_shadow_map(struct inode
*inode
)
697 struct nilfs_mdt_info
*mi
= NILFS_MDT(inode
);
698 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
699 struct nilfs_shadow_map
*shadow
= mi
->mi_shadow
;
701 down_write(&mi
->mi_sem
);
703 if (mi
->mi_palloc_cache
)
704 nilfs_palloc_clear_cache(inode
);
706 nilfs_clear_dirty_pages(inode
->i_mapping
);
707 nilfs_copy_back_pages(inode
->i_mapping
, &shadow
->frozen_data
);
709 nilfs_clear_dirty_pages(&ii
->i_btnode_cache
);
710 nilfs_copy_back_pages(&ii
->i_btnode_cache
, &shadow
->frozen_btnodes
);
712 nilfs_bmap_restore(ii
->i_bmap
, &shadow
->bmap_store
);
714 up_write(&mi
->mi_sem
);
718 * nilfs_mdt_clear_shadow_map - truncate pages in shadow map caches
719 * @inode: inode of the metadata file
721 void nilfs_mdt_clear_shadow_map(struct inode
*inode
)
723 struct nilfs_mdt_info
*mi
= NILFS_MDT(inode
);
724 struct nilfs_shadow_map
*shadow
= mi
->mi_shadow
;
726 down_write(&mi
->mi_sem
);
727 nilfs_release_frozen_buffers(shadow
);
728 truncate_inode_pages(&shadow
->frozen_data
, 0);
729 truncate_inode_pages(&shadow
->frozen_btnodes
, 0);
730 up_write(&mi
->mi_sem
);
733 static void nilfs_mdt_clear(struct inode
*inode
)
735 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
737 invalidate_mapping_pages(inode
->i_mapping
, 0, -1);
738 truncate_inode_pages(inode
->i_mapping
, 0);
740 if (test_bit(NILFS_I_BMAP
, &ii
->i_state
))
741 nilfs_bmap_clear(ii
->i_bmap
);
742 nilfs_btnode_cache_clear(&ii
->i_btnode_cache
);
745 void nilfs_mdt_destroy(struct inode
*inode
)
747 struct nilfs_mdt_info
*mdi
= NILFS_MDT(inode
);
749 if (mdi
->mi_palloc_cache
)
750 nilfs_palloc_destroy_cache(inode
);
751 nilfs_mdt_clear(inode
);
753 nilfs_destroy_inode(inode
);