2 * mdt.c - meta data file for NILFS
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
23 #include <linux/buffer_head.h>
24 #include <linux/mpage.h>
26 #include <linux/writeback.h>
27 #include <linux/backing-dev.h>
28 #include <linux/swap.h>
35 #define NILFS_MDT_MAX_RA_BLOCKS (16 - 1)
37 #define INIT_UNUSED_INODE_FIELDS
40 nilfs_mdt_insert_new_block(struct inode
*inode
, unsigned long block
,
41 struct buffer_head
*bh
,
42 void (*init_block
)(struct inode
*,
43 struct buffer_head
*, void *))
45 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
49 /* Caller exclude read accesses using page lock */
51 /* set_buffer_new(bh); */
54 ret
= nilfs_bmap_insert(ii
->i_bmap
, block
, (unsigned long)bh
);
58 set_buffer_mapped(bh
);
60 kaddr
= kmap_atomic(bh
->b_page
, KM_USER0
);
61 memset(kaddr
+ bh_offset(bh
), 0, 1 << inode
->i_blkbits
);
63 init_block(inode
, bh
, kaddr
);
64 flush_dcache_page(bh
->b_page
);
65 kunmap_atomic(kaddr
, KM_USER0
);
67 set_buffer_uptodate(bh
);
68 nilfs_mark_buffer_dirty(bh
);
69 nilfs_mdt_mark_dirty(inode
);
73 static int nilfs_mdt_create_block(struct inode
*inode
, unsigned long block
,
74 struct buffer_head
**out_bh
,
75 void (*init_block
)(struct inode
*,
79 struct the_nilfs
*nilfs
= NILFS_MDT(inode
)->mi_nilfs
;
80 struct super_block
*sb
= inode
->i_sb
;
81 struct nilfs_transaction_info ti
;
82 struct buffer_head
*bh
;
87 * Make sure this function is not called from any
90 if (!nilfs
->ns_writer
) {
95 sb
= nilfs
->ns_writer
->s_super
;
98 nilfs_transaction_begin(sb
, &ti
, 0);
101 bh
= nilfs_grab_buffer(inode
, inode
->i_mapping
, block
, 0);
106 if (buffer_uptodate(bh
) || buffer_mapped(bh
))
109 /* The uptodate flag is not protected by the page lock, but
110 the mapped flag is. Thus, we don't have to wait the buffer. */
112 if (buffer_uptodate(bh
))
116 bh
->b_bdev
= nilfs
->ns_bdev
;
117 err
= nilfs_mdt_insert_new_block(inode
, block
, bh
, init_block
);
124 unlock_page(bh
->b_page
);
125 page_cache_release(bh
->b_page
);
130 err
= nilfs_transaction_commit(sb
);
132 nilfs_transaction_abort(sb
);
138 nilfs_mdt_submit_block(struct inode
*inode
, unsigned long blkoff
,
139 int mode
, struct buffer_head
**out_bh
)
141 struct buffer_head
*bh
;
142 unsigned long blknum
= 0;
145 bh
= nilfs_grab_buffer(inode
, inode
->i_mapping
, blkoff
, 0);
149 ret
= -EEXIST
; /* internal code */
150 if (buffer_uptodate(bh
))
154 if (!trylock_buffer(bh
)) {
158 } else /* mode == READ */
161 if (buffer_uptodate(bh
)) {
165 if (!buffer_mapped(bh
)) { /* unused buffer */
166 ret
= nilfs_bmap_lookup(NILFS_I(inode
)->i_bmap
, blkoff
,
172 bh
->b_bdev
= NILFS_MDT(inode
)->mi_nilfs
->ns_bdev
;
173 bh
->b_blocknr
= blknum
;
174 set_buffer_mapped(bh
);
177 bh
->b_end_io
= end_buffer_read_sync
;
186 unlock_page(bh
->b_page
);
187 page_cache_release(bh
->b_page
);
193 static int nilfs_mdt_read_block(struct inode
*inode
, unsigned long block
,
194 struct buffer_head
**out_bh
)
196 struct buffer_head
*first_bh
, *bh
;
197 unsigned long blkoff
;
198 int i
, nr_ra_blocks
= NILFS_MDT_MAX_RA_BLOCKS
;
201 err
= nilfs_mdt_submit_block(inode
, block
, READ
, &first_bh
);
202 if (err
== -EEXIST
) /* internal code */
209 for (i
= 0; i
< nr_ra_blocks
; i
++, blkoff
++) {
210 err
= nilfs_mdt_submit_block(inode
, blkoff
, READA
, &bh
);
211 if (likely(!err
|| err
== -EEXIST
))
213 else if (err
!= -EBUSY
)
214 break; /* abort readahead if bmap lookup failed */
216 if (!buffer_locked(first_bh
))
220 wait_on_buffer(first_bh
);
224 if (!buffer_uptodate(first_bh
))
237 * nilfs_mdt_get_block - read or create a buffer on meta data file.
238 * @inode: inode of the meta data file
239 * @blkoff: block offset
240 * @create: create flag
241 * @init_block: initializer used for newly allocated block
242 * @out_bh: output of a pointer to the buffer_head
244 * nilfs_mdt_get_block() looks up the specified buffer and tries to create
245 * a new buffer if @create is not zero. On success, the returned buffer is
246 * assured to be either existing or formatted using a buffer lock on success.
247 * @out_bh is substituted only when zero is returned.
249 * Return Value: On success, it returns 0. On error, the following negative
250 * error code is returned.
252 * %-ENOMEM - Insufficient memory available.
256 * %-ENOENT - the specified block does not exist (hole block)
258 * %-EINVAL - bmap is broken. (the caller should call nilfs_error())
260 * %-EROFS - Read only filesystem (for create mode)
262 int nilfs_mdt_get_block(struct inode
*inode
, unsigned long blkoff
, int create
,
263 void (*init_block
)(struct inode
*,
264 struct buffer_head
*, void *),
265 struct buffer_head
**out_bh
)
269 /* Should be rewritten with merging nilfs_mdt_read_block() */
271 ret
= nilfs_mdt_read_block(inode
, blkoff
, out_bh
);
272 if (!create
|| ret
!= -ENOENT
)
275 ret
= nilfs_mdt_create_block(inode
, blkoff
, out_bh
, init_block
);
276 if (unlikely(ret
== -EEXIST
)) {
277 /* create = 0; */ /* limit read-create loop retries */
284 * nilfs_mdt_delete_block - make a hole on the meta data file.
285 * @inode: inode of the meta data file
286 * @block: block offset
288 * Return Value: On success, zero is returned.
289 * On error, one of the following negative error code is returned.
291 * %-ENOMEM - Insufficient memory available.
295 * %-EINVAL - bmap is broken. (the caller should call nilfs_error())
297 int nilfs_mdt_delete_block(struct inode
*inode
, unsigned long block
)
299 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
302 err
= nilfs_bmap_delete(ii
->i_bmap
, block
);
303 if (!err
|| err
== -ENOENT
) {
304 nilfs_mdt_mark_dirty(inode
);
305 nilfs_mdt_forget_block(inode
, block
);
311 * nilfs_mdt_forget_block - discard dirty state and try to remove the page
312 * @inode: inode of the meta data file
313 * @block: block offset
315 * nilfs_mdt_forget_block() clears a dirty flag of the specified buffer, and
316 * tries to release the page including the buffer from a page cache.
318 * Return Value: On success, 0 is returned. On error, one of the following
319 * negative error code is returned.
321 * %-EBUSY - page has an active buffer.
323 * %-ENOENT - page cache has no page addressed by the offset.
325 int nilfs_mdt_forget_block(struct inode
*inode
, unsigned long block
)
327 pgoff_t index
= (pgoff_t
)block
>>
328 (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
330 unsigned long first_block
;
334 page
= find_lock_page(inode
->i_mapping
, index
);
338 wait_on_page_writeback(page
);
340 first_block
= (unsigned long)index
<<
341 (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
342 if (page_has_buffers(page
)) {
343 struct buffer_head
*bh
;
345 bh
= nilfs_page_get_nth_block(page
, block
- first_block
);
346 nilfs_forget_buffer(bh
);
348 still_dirty
= PageDirty(page
);
350 page_cache_release(page
);
353 invalidate_inode_pages2_range(inode
->i_mapping
, index
, index
) != 0)
359 * nilfs_mdt_mark_block_dirty - mark a block on the meta data file dirty.
360 * @inode: inode of the meta data file
361 * @block: block offset
363 * Return Value: On success, it returns 0. On error, the following negative
364 * error code is returned.
366 * %-ENOMEM - Insufficient memory available.
370 * %-ENOENT - the specified block does not exist (hole block)
372 * %-EINVAL - bmap is broken. (the caller should call nilfs_error())
374 int nilfs_mdt_mark_block_dirty(struct inode
*inode
, unsigned long block
)
376 struct buffer_head
*bh
;
379 err
= nilfs_mdt_read_block(inode
, block
, &bh
);
382 nilfs_mark_buffer_dirty(bh
);
383 nilfs_mdt_mark_dirty(inode
);
388 int nilfs_mdt_fetch_dirty(struct inode
*inode
)
390 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
392 if (nilfs_bmap_test_and_clear_dirty(ii
->i_bmap
)) {
393 set_bit(NILFS_I_DIRTY
, &ii
->i_state
);
396 return test_bit(NILFS_I_DIRTY
, &ii
->i_state
);
400 nilfs_mdt_write_page(struct page
*page
, struct writeback_control
*wbc
)
402 struct inode
*inode
= container_of(page
->mapping
,
403 struct inode
, i_data
);
404 struct super_block
*sb
= inode
->i_sb
;
405 struct nilfs_sb_info
*writer
= NULL
;
408 redirty_page_for_writepage(wbc
, page
);
411 if (page
->mapping
->assoc_mapping
)
412 return 0; /* Do not request flush for shadow page cache */
414 writer
= nilfs_get_writer(NILFS_MDT(inode
)->mi_nilfs
);
417 sb
= writer
->s_super
;
420 if (wbc
->sync_mode
== WB_SYNC_ALL
)
421 err
= nilfs_construct_segment(sb
);
422 else if (wbc
->for_reclaim
)
423 nilfs_flush_segment(sb
, inode
->i_ino
);
426 nilfs_put_writer(NILFS_MDT(inode
)->mi_nilfs
);
431 static struct address_space_operations def_mdt_aops
= {
432 .writepage
= nilfs_mdt_write_page
,
433 .sync_page
= block_sync_page
,
436 static struct inode_operations def_mdt_iops
;
437 static struct file_operations def_mdt_fops
;
440 * NILFS2 uses pseudo inodes for meta data files such as DAT, cpfile, sufile,
441 * ifile, or gcinodes. This allows the B-tree code and segment constructor
442 * to treat them like regular files, and this helps to simplify the
444 * On the other hand, some of the pseudo inodes have an irregular point:
445 * They don't have valid inode->i_sb pointer because their lifetimes are
446 * longer than those of the super block structs; they may continue for
447 * several consecutive mounts/umounts. This would need discussions.
450 nilfs_mdt_new_common(struct the_nilfs
*nilfs
, struct super_block
*sb
,
451 ino_t ino
, gfp_t gfp_mask
)
453 struct inode
*inode
= nilfs_alloc_inode_common(nilfs
);
458 struct address_space
* const mapping
= &inode
->i_data
;
459 struct nilfs_mdt_info
*mi
= kzalloc(sizeof(*mi
), GFP_NOFS
);
462 nilfs_destroy_inode(inode
);
465 mi
->mi_nilfs
= nilfs
;
466 init_rwsem(&mi
->mi_sem
);
468 inode
->i_sb
= sb
; /* sb may be NULL for some meta data files */
469 inode
->i_blkbits
= nilfs
->ns_blocksize_bits
;
471 atomic_set(&inode
->i_count
, 1);
474 inode
->i_mode
= S_IFREG
;
475 inode
->i_private
= mi
;
477 #ifdef INIT_UNUSED_INODE_FIELDS
478 atomic_set(&inode
->i_writecount
, 0);
482 inode
->i_generation
= 0;
484 memset(&inode
->i_dquot
, 0, sizeof(inode
->i_dquot
));
486 inode
->i_pipe
= NULL
;
487 inode
->i_bdev
= NULL
;
488 inode
->i_cdev
= NULL
;
490 #ifdef CONFIG_SECURITY
491 inode
->i_security
= NULL
;
493 inode
->dirtied_when
= 0;
495 INIT_LIST_HEAD(&inode
->i_list
);
496 INIT_LIST_HEAD(&inode
->i_sb_list
);
500 spin_lock_init(&inode
->i_lock
);
501 mutex_init(&inode
->i_mutex
);
502 init_rwsem(&inode
->i_alloc_sem
);
504 mapping
->host
= NULL
; /* instead of inode */
506 mapping_set_gfp_mask(mapping
, gfp_mask
);
507 mapping
->assoc_mapping
= NULL
;
508 mapping
->backing_dev_info
= nilfs
->ns_bdi
;
510 inode
->i_mapping
= mapping
;
516 struct inode
*nilfs_mdt_new(struct the_nilfs
*nilfs
, struct super_block
*sb
,
517 ino_t ino
, gfp_t gfp_mask
)
519 struct inode
*inode
= nilfs_mdt_new_common(nilfs
, sb
, ino
, gfp_mask
);
524 inode
->i_op
= &def_mdt_iops
;
525 inode
->i_fop
= &def_mdt_fops
;
526 inode
->i_mapping
->a_ops
= &def_mdt_aops
;
530 void nilfs_mdt_set_entry_size(struct inode
*inode
, unsigned entry_size
,
531 unsigned header_size
)
533 struct nilfs_mdt_info
*mi
= NILFS_MDT(inode
);
535 mi
->mi_entry_size
= entry_size
;
536 mi
->mi_entries_per_block
= (1 << inode
->i_blkbits
) / entry_size
;
537 mi
->mi_first_entry_offset
= DIV_ROUND_UP(header_size
, entry_size
);
540 void nilfs_mdt_set_shadow(struct inode
*orig
, struct inode
*shadow
)
542 shadow
->i_mapping
->assoc_mapping
= orig
->i_mapping
;
543 NILFS_I(shadow
)->i_btnode_cache
.assoc_mapping
=
544 &NILFS_I(orig
)->i_btnode_cache
;
547 void nilfs_mdt_clear(struct inode
*inode
)
549 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
551 invalidate_mapping_pages(inode
->i_mapping
, 0, -1);
552 truncate_inode_pages(inode
->i_mapping
, 0);
554 nilfs_bmap_clear(ii
->i_bmap
);
555 nilfs_btnode_cache_clear(&ii
->i_btnode_cache
);
558 void nilfs_mdt_destroy(struct inode
*inode
)
560 struct nilfs_mdt_info
*mdi
= NILFS_MDT(inode
);
562 kfree(mdi
->mi_bgl
); /* kfree(NULL) is safe */
564 nilfs_destroy_inode(inode
);