dm ioctl: only issue uevent on resume if state changed
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / nilfs2 / gcinode.c
blobe16a6664dfa2bf03d320379c2f2f93d7ebca71a3
1 /*
2 * gcinode.c - dummy inodes to buffer blocks for garbage collection
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Seiji Kihara <kihara@osrg.net>, Amagai Yoshiji <amagai@osrg.net>,
21 * and Ryusuke Konishi <ryusuke@osrg.net>.
22 * Revised by Ryusuke Konishi <ryusuke@osrg.net>.
26 * This file adds the cache of on-disk blocks to be moved in garbage
27 * collection. The disk blocks are held with dummy inodes (called
28 * gcinodes), and this file provides lookup function of the dummy
29 * inodes and their buffer read function.
31 * Since NILFS2 keeps up multiple checkpoints/snapshots accross GC, it
32 * has to treat blocks that belong to a same file but have different
33 * checkpoint numbers. To avoid interference among generations, dummy
34 * inodes are managed separatly from actual inodes, and their lookup
35 * function (nilfs_gc_iget) is designed to be specified with a
36 * checkpoint number argument as well as an inode number.
38 * Buffers and pages held by the dummy inodes will be released each
39 * time after they are copied to a new log. Dirty blocks made on the
40 * current generation and the blocks to be moved by GC never overlap
41 * because the dirty blocks make a new generation; they rather must be
42 * written individually.
45 #include <linux/buffer_head.h>
46 #include <linux/mpage.h>
47 #include <linux/hash.h>
48 #include <linux/swap.h>
49 #include "nilfs.h"
50 #include "page.h"
51 #include "mdt.h"
52 #include "dat.h"
53 #include "ifile.h"
55 static const struct address_space_operations def_gcinode_aops = {
56 .sync_page = block_sync_page,
60 * nilfs_gccache_submit_read_data() - add data buffer and submit read request
61 * @inode - gc inode
62 * @blkoff - dummy offset treated as the key for the page cache
63 * @pbn - physical block number of the block
64 * @vbn - virtual block number of the block, 0 for non-virtual block
65 * @out_bh - indirect pointer to a buffer_head struct to receive the results
67 * Description: nilfs_gccache_submit_read_data() registers the data buffer
68 * specified by @pbn to the GC pagecache with the key @blkoff.
69 * This function sets @vbn (@pbn if @vbn is zero) in b_blocknr of the buffer.
71 * Return Value: On success, 0 is returned. On Error, one of the following
72 * negative error code is returned.
74 * %-EIO - I/O error.
76 * %-ENOMEM - Insufficient amount of memory available.
78 * %-ENOENT - The block specified with @pbn does not exist.
80 int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
81 sector_t pbn, __u64 vbn,
82 struct buffer_head **out_bh)
84 struct buffer_head *bh;
85 int err;
87 bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0);
88 if (unlikely(!bh))
89 return -ENOMEM;
91 if (buffer_uptodate(bh))
92 goto out;
94 if (pbn == 0) {
95 struct inode *dat_inode = NILFS_I_NILFS(inode)->ns_dat;
96 /* use original dat, not gc dat. */
97 err = nilfs_dat_translate(dat_inode, vbn, &pbn);
98 if (unlikely(err)) { /* -EIO, -ENOMEM, -ENOENT */
99 brelse(bh);
100 goto failed;
104 lock_buffer(bh);
105 if (buffer_uptodate(bh)) {
106 unlock_buffer(bh);
107 goto out;
110 if (!buffer_mapped(bh)) {
111 bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev;
112 set_buffer_mapped(bh);
114 bh->b_blocknr = pbn;
115 bh->b_end_io = end_buffer_read_sync;
116 get_bh(bh);
117 submit_bh(READ, bh);
118 if (vbn)
119 bh->b_blocknr = vbn;
120 out:
121 err = 0;
122 *out_bh = bh;
124 failed:
125 unlock_page(bh->b_page);
126 page_cache_release(bh->b_page);
127 return err;
131 * nilfs_gccache_submit_read_node() - add node buffer and submit read request
132 * @inode - gc inode
133 * @pbn - physical block number for the block
134 * @vbn - virtual block number for the block
135 * @out_bh - indirect pointer to a buffer_head struct to receive the results
137 * Description: nilfs_gccache_submit_read_node() registers the node buffer
138 * specified by @vbn to the GC pagecache. @pbn can be supplied by the
139 * caller to avoid translation of the disk block address.
141 * Return Value: On success, 0 is returned. On Error, one of the following
142 * negative error code is returned.
144 * %-EIO - I/O error.
146 * %-ENOMEM - Insufficient amount of memory available.
148 int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn,
149 __u64 vbn, struct buffer_head **out_bh)
151 int ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache,
152 vbn ? : pbn, pbn, out_bh);
153 if (ret == -EEXIST) /* internal code (cache hit) */
154 ret = 0;
155 return ret;
158 int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *bh)
160 wait_on_buffer(bh);
161 if (!buffer_uptodate(bh))
162 return -EIO;
163 if (buffer_dirty(bh))
164 return -EEXIST;
166 if (buffer_nilfs_node(bh))
167 nilfs_btnode_mark_dirty(bh);
168 else
169 nilfs_mdt_mark_buffer_dirty(bh);
170 return 0;
174 * nilfs_init_gccache() - allocate and initialize gc_inode hash table
175 * @nilfs - the_nilfs
177 * Return Value: On success, 0.
178 * On error, a negative error code is returned.
180 int nilfs_init_gccache(struct the_nilfs *nilfs)
182 int loop;
184 BUG_ON(nilfs->ns_gc_inodes_h);
186 INIT_LIST_HEAD(&nilfs->ns_gc_inodes);
188 nilfs->ns_gc_inodes_h =
189 kmalloc(sizeof(struct hlist_head) * NILFS_GCINODE_HASH_SIZE,
190 GFP_NOFS);
191 if (nilfs->ns_gc_inodes_h == NULL)
192 return -ENOMEM;
194 for (loop = 0; loop < NILFS_GCINODE_HASH_SIZE; loop++)
195 INIT_HLIST_HEAD(&nilfs->ns_gc_inodes_h[loop]);
196 return 0;
200 * nilfs_destroy_gccache() - free gc_inode hash table
201 * @nilfs - the nilfs
203 void nilfs_destroy_gccache(struct the_nilfs *nilfs)
205 if (nilfs->ns_gc_inodes_h) {
206 nilfs_remove_all_gcinode(nilfs);
207 kfree(nilfs->ns_gc_inodes_h);
208 nilfs->ns_gc_inodes_h = NULL;
212 static struct inode *alloc_gcinode(struct the_nilfs *nilfs, ino_t ino,
213 __u64 cno)
215 struct inode *inode;
216 struct nilfs_inode_info *ii;
218 inode = nilfs_mdt_new_common(nilfs, NULL, ino, GFP_NOFS, 0);
219 if (!inode)
220 return NULL;
222 inode->i_op = NULL;
223 inode->i_fop = NULL;
224 inode->i_mapping->a_ops = &def_gcinode_aops;
226 ii = NILFS_I(inode);
227 ii->i_cno = cno;
228 ii->i_flags = 0;
229 ii->i_state = 1 << NILFS_I_GCINODE;
230 ii->i_bh = NULL;
231 nilfs_bmap_init_gc(ii->i_bmap);
233 return inode;
236 static unsigned long ihash(ino_t ino, __u64 cno)
238 return hash_long((unsigned long)((ino << 2) + cno),
239 NILFS_GCINODE_HASH_BITS);
243 * nilfs_gc_iget() - find or create gc inode with specified (ino,cno)
245 struct inode *nilfs_gc_iget(struct the_nilfs *nilfs, ino_t ino, __u64 cno)
247 struct hlist_head *head = nilfs->ns_gc_inodes_h + ihash(ino, cno);
248 struct hlist_node *node;
249 struct inode *inode;
251 hlist_for_each_entry(inode, node, head, i_hash) {
252 if (inode->i_ino == ino && NILFS_I(inode)->i_cno == cno)
253 return inode;
256 inode = alloc_gcinode(nilfs, ino, cno);
257 if (likely(inode)) {
258 hlist_add_head(&inode->i_hash, head);
259 list_add(&NILFS_I(inode)->i_dirty, &nilfs->ns_gc_inodes);
261 return inode;
265 * nilfs_clear_gcinode() - clear and free a gc inode
267 void nilfs_clear_gcinode(struct inode *inode)
269 nilfs_mdt_destroy(inode);
273 * nilfs_remove_all_gcinode() - remove all inodes from the_nilfs
275 void nilfs_remove_all_gcinode(struct the_nilfs *nilfs)
277 struct hlist_head *head = nilfs->ns_gc_inodes_h;
278 struct hlist_node *node, *n;
279 struct inode *inode;
280 int loop;
282 for (loop = 0; loop < NILFS_GCINODE_HASH_SIZE; loop++, head++) {
283 hlist_for_each_entry_safe(inode, node, n, head, i_hash) {
284 hlist_del_init(&inode->i_hash);
285 list_del_init(&NILFS_I(inode)->i_dirty);
286 nilfs_clear_gcinode(inode); /* might sleep */