nilfs2: hide nilfs_write_info struct in segment buffer code
[linux-2.6.git] / fs / nilfs2 / segbuf.c
blob636590c92c8bca949a8f09cb46d4534b437fb086
1 /*
2 * segbuf.c - NILFS segment buffer
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
24 #include <linux/buffer_head.h>
25 #include <linux/writeback.h>
26 #include <linux/crc32.h>
27 #include <linux/backing-dev.h>
28 #include "page.h"
29 #include "segbuf.h"
32 struct nilfs_write_info {
33 struct the_nilfs *nilfs;
34 struct bio *bio;
35 int start, end; /* The region to be submitted */
36 int rest_blocks;
37 int max_pages;
38 int nr_vecs;
39 sector_t blocknr;
43 static struct kmem_cache *nilfs_segbuf_cachep;
45 static void nilfs_segbuf_init_once(void *obj)
47 memset(obj, 0, sizeof(struct nilfs_segment_buffer));
50 int __init nilfs_init_segbuf_cache(void)
52 nilfs_segbuf_cachep =
53 kmem_cache_create("nilfs2_segbuf_cache",
54 sizeof(struct nilfs_segment_buffer),
55 0, SLAB_RECLAIM_ACCOUNT,
56 nilfs_segbuf_init_once);
58 return (nilfs_segbuf_cachep == NULL) ? -ENOMEM : 0;
61 void nilfs_destroy_segbuf_cache(void)
63 kmem_cache_destroy(nilfs_segbuf_cachep);
66 struct nilfs_segment_buffer *nilfs_segbuf_new(struct super_block *sb)
68 struct nilfs_segment_buffer *segbuf;
70 segbuf = kmem_cache_alloc(nilfs_segbuf_cachep, GFP_NOFS);
71 if (unlikely(!segbuf))
72 return NULL;
74 segbuf->sb_super = sb;
75 INIT_LIST_HEAD(&segbuf->sb_list);
76 INIT_LIST_HEAD(&segbuf->sb_segsum_buffers);
77 INIT_LIST_HEAD(&segbuf->sb_payload_buffers);
79 init_completion(&segbuf->sb_bio_event);
80 atomic_set(&segbuf->sb_err, 0);
81 segbuf->sb_nbio = 0;
83 return segbuf;
86 void nilfs_segbuf_free(struct nilfs_segment_buffer *segbuf)
88 kmem_cache_free(nilfs_segbuf_cachep, segbuf);
91 void nilfs_segbuf_map(struct nilfs_segment_buffer *segbuf, __u64 segnum,
92 unsigned long offset, struct the_nilfs *nilfs)
94 segbuf->sb_segnum = segnum;
95 nilfs_get_segment_range(nilfs, segnum, &segbuf->sb_fseg_start,
96 &segbuf->sb_fseg_end);
98 segbuf->sb_pseg_start = segbuf->sb_fseg_start + offset;
99 segbuf->sb_rest_blocks =
100 segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1;
103 void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *segbuf,
104 __u64 nextnum, struct the_nilfs *nilfs)
106 segbuf->sb_nextnum = nextnum;
107 segbuf->sb_sum.next = nilfs_get_segment_start_blocknr(nilfs, nextnum);
110 int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer *segbuf)
112 struct buffer_head *bh;
114 bh = sb_getblk(segbuf->sb_super,
115 segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk);
116 if (unlikely(!bh))
117 return -ENOMEM;
119 nilfs_segbuf_add_segsum_buffer(segbuf, bh);
120 return 0;
123 int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *segbuf,
124 struct buffer_head **bhp)
126 struct buffer_head *bh;
128 bh = sb_getblk(segbuf->sb_super,
129 segbuf->sb_pseg_start + segbuf->sb_sum.nblocks);
130 if (unlikely(!bh))
131 return -ENOMEM;
133 nilfs_segbuf_add_payload_buffer(segbuf, bh);
134 *bhp = bh;
135 return 0;
138 int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned flags,
139 time_t ctime)
141 int err;
143 segbuf->sb_sum.nblocks = segbuf->sb_sum.nsumblk = 0;
144 err = nilfs_segbuf_extend_segsum(segbuf);
145 if (unlikely(err))
146 return err;
148 segbuf->sb_sum.flags = flags;
149 segbuf->sb_sum.sumbytes = sizeof(struct nilfs_segment_summary);
150 segbuf->sb_sum.nfinfo = segbuf->sb_sum.nfileblk = 0;
151 segbuf->sb_sum.ctime = ctime;
152 return 0;
156 * Setup segument summary
158 void nilfs_segbuf_fill_in_segsum(struct nilfs_segment_buffer *segbuf)
160 struct nilfs_segment_summary *raw_sum;
161 struct buffer_head *bh_sum;
163 bh_sum = list_entry(segbuf->sb_segsum_buffers.next,
164 struct buffer_head, b_assoc_buffers);
165 raw_sum = (struct nilfs_segment_summary *)bh_sum->b_data;
167 raw_sum->ss_magic = cpu_to_le32(NILFS_SEGSUM_MAGIC);
168 raw_sum->ss_bytes = cpu_to_le16(sizeof(*raw_sum));
169 raw_sum->ss_flags = cpu_to_le16(segbuf->sb_sum.flags);
170 raw_sum->ss_seq = cpu_to_le64(segbuf->sb_sum.seg_seq);
171 raw_sum->ss_create = cpu_to_le64(segbuf->sb_sum.ctime);
172 raw_sum->ss_next = cpu_to_le64(segbuf->sb_sum.next);
173 raw_sum->ss_nblocks = cpu_to_le32(segbuf->sb_sum.nblocks);
174 raw_sum->ss_nfinfo = cpu_to_le32(segbuf->sb_sum.nfinfo);
175 raw_sum->ss_sumbytes = cpu_to_le32(segbuf->sb_sum.sumbytes);
176 raw_sum->ss_pad = 0;
180 * CRC calculation routines
182 void nilfs_segbuf_fill_in_segsum_crc(struct nilfs_segment_buffer *segbuf,
183 u32 seed)
185 struct buffer_head *bh;
186 struct nilfs_segment_summary *raw_sum;
187 unsigned long size, bytes = segbuf->sb_sum.sumbytes;
188 u32 crc;
190 bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head,
191 b_assoc_buffers);
193 raw_sum = (struct nilfs_segment_summary *)bh->b_data;
194 size = min_t(unsigned long, bytes, bh->b_size);
195 crc = crc32_le(seed,
196 (unsigned char *)raw_sum +
197 sizeof(raw_sum->ss_datasum) + sizeof(raw_sum->ss_sumsum),
198 size - (sizeof(raw_sum->ss_datasum) +
199 sizeof(raw_sum->ss_sumsum)));
201 list_for_each_entry_continue(bh, &segbuf->sb_segsum_buffers,
202 b_assoc_buffers) {
203 bytes -= size;
204 size = min_t(unsigned long, bytes, bh->b_size);
205 crc = crc32_le(crc, bh->b_data, size);
207 raw_sum->ss_sumsum = cpu_to_le32(crc);
210 void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
211 u32 seed)
213 struct buffer_head *bh;
214 struct nilfs_segment_summary *raw_sum;
215 void *kaddr;
216 u32 crc;
218 bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head,
219 b_assoc_buffers);
220 raw_sum = (struct nilfs_segment_summary *)bh->b_data;
221 crc = crc32_le(seed,
222 (unsigned char *)raw_sum + sizeof(raw_sum->ss_datasum),
223 bh->b_size - sizeof(raw_sum->ss_datasum));
225 list_for_each_entry_continue(bh, &segbuf->sb_segsum_buffers,
226 b_assoc_buffers) {
227 crc = crc32_le(crc, bh->b_data, bh->b_size);
229 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
230 kaddr = kmap_atomic(bh->b_page, KM_USER0);
231 crc = crc32_le(crc, kaddr + bh_offset(bh), bh->b_size);
232 kunmap_atomic(kaddr, KM_USER0);
234 raw_sum->ss_datasum = cpu_to_le32(crc);
237 void nilfs_release_buffers(struct list_head *list)
239 struct buffer_head *bh, *n;
241 list_for_each_entry_safe(bh, n, list, b_assoc_buffers) {
242 list_del_init(&bh->b_assoc_buffers);
243 if (buffer_nilfs_allocated(bh)) {
244 struct page *clone_page = bh->b_page;
246 /* remove clone page */
247 brelse(bh);
248 page_cache_release(clone_page); /* for each bh */
249 if (page_count(clone_page) <= 2) {
250 lock_page(clone_page);
251 nilfs_free_private_page(clone_page);
253 continue;
255 brelse(bh);
260 * BIO operations
262 static void nilfs_end_bio_write(struct bio *bio, int err)
264 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
265 struct nilfs_segment_buffer *segbuf = bio->bi_private;
267 if (err == -EOPNOTSUPP) {
268 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
269 bio_put(bio);
270 /* to be detected by submit_seg_bio() */
273 if (!uptodate)
274 atomic_inc(&segbuf->sb_err);
276 bio_put(bio);
277 complete(&segbuf->sb_bio_event);
280 static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
281 struct nilfs_write_info *wi, int mode)
283 struct bio *bio = wi->bio;
284 int err;
286 if (segbuf->sb_nbio > 0 && bdi_write_congested(wi->nilfs->ns_bdi)) {
287 wait_for_completion(&segbuf->sb_bio_event);
288 segbuf->sb_nbio--;
289 if (unlikely(atomic_read(&segbuf->sb_err))) {
290 bio_put(bio);
291 err = -EIO;
292 goto failed;
296 bio->bi_end_io = nilfs_end_bio_write;
297 bio->bi_private = segbuf;
298 bio_get(bio);
299 submit_bio(mode, bio);
300 if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
301 bio_put(bio);
302 err = -EOPNOTSUPP;
303 goto failed;
305 segbuf->sb_nbio++;
306 bio_put(bio);
308 wi->bio = NULL;
309 wi->rest_blocks -= wi->end - wi->start;
310 wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
311 wi->start = wi->end;
312 return 0;
314 failed:
315 wi->bio = NULL;
316 return err;
320 * nilfs_alloc_seg_bio - allocate a new bio for writing log
321 * @nilfs: nilfs object
322 * @start: start block number of the bio
323 * @nr_vecs: request size of page vector.
325 * Return Value: On success, pointer to the struct bio is returned.
326 * On error, NULL is returned.
328 static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start,
329 int nr_vecs)
331 struct bio *bio;
333 bio = bio_alloc(GFP_NOIO, nr_vecs);
334 if (bio == NULL) {
335 while (!bio && (nr_vecs >>= 1))
336 bio = bio_alloc(GFP_NOIO, nr_vecs);
338 if (likely(bio)) {
339 bio->bi_bdev = nilfs->ns_bdev;
340 bio->bi_sector = start << (nilfs->ns_blocksize_bits - 9);
342 return bio;
345 static void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf,
346 struct nilfs_write_info *wi)
348 wi->bio = NULL;
349 wi->rest_blocks = segbuf->sb_sum.nblocks;
350 wi->max_pages = bio_get_nr_vecs(wi->nilfs->ns_bdev);
351 wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
352 wi->start = wi->end = 0;
353 wi->blocknr = segbuf->sb_pseg_start;
356 static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf,
357 struct nilfs_write_info *wi,
358 struct buffer_head *bh, int mode)
360 int len, err;
362 BUG_ON(wi->nr_vecs <= 0);
363 repeat:
364 if (!wi->bio) {
365 wi->bio = nilfs_alloc_seg_bio(wi->nilfs, wi->blocknr + wi->end,
366 wi->nr_vecs);
367 if (unlikely(!wi->bio))
368 return -ENOMEM;
371 len = bio_add_page(wi->bio, bh->b_page, bh->b_size, bh_offset(bh));
372 if (len == bh->b_size) {
373 wi->end++;
374 return 0;
376 /* bio is FULL */
377 err = nilfs_segbuf_submit_bio(segbuf, wi, mode);
378 /* never submit current bh */
379 if (likely(!err))
380 goto repeat;
381 return err;
385 * nilfs_segbuf_write - submit write requests of a log
386 * @segbuf: buffer storing a log to be written
387 * @nilfs: nilfs object
389 * Return Value: On Success, 0 is returned. On Error, one of the following
390 * negative error code is returned.
392 * %-EIO - I/O error
394 * %-ENOMEM - Insufficient memory available.
396 int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
397 struct the_nilfs *nilfs)
399 struct nilfs_write_info wi;
400 struct buffer_head *bh;
401 int res = 0, rw = WRITE;
403 wi.nilfs = nilfs;
404 nilfs_segbuf_prepare_write(segbuf, &wi);
406 list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) {
407 res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw);
408 if (unlikely(res))
409 goto failed_bio;
412 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
413 res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw);
414 if (unlikely(res))
415 goto failed_bio;
418 if (wi.bio) {
420 * Last BIO is always sent through the following
421 * submission.
423 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
424 res = nilfs_segbuf_submit_bio(segbuf, &wi, rw);
427 failed_bio:
428 return res;
432 * nilfs_segbuf_wait - wait for completion of requested BIOs
433 * @segbuf: segment buffer
435 * Return Value: On Success, 0 is returned. On Error, one of the following
436 * negative error code is returned.
438 * %-EIO - I/O error
440 int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf)
442 int err = 0;
444 if (!segbuf->sb_nbio)
445 return 0;
447 do {
448 wait_for_completion(&segbuf->sb_bio_event);
449 } while (--segbuf->sb_nbio > 0);
451 if (unlikely(atomic_read(&segbuf->sb_err) > 0)) {
452 printk(KERN_ERR "NILFS: IO error writing segment\n");
453 err = -EIO;
455 return err;