2 * segbuf.c - NILFS segment buffer
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
24 #include <linux/buffer_head.h>
25 #include <linux/writeback.h>
26 #include <linux/crc32.h>
32 static struct kmem_cache
*nilfs_segbuf_cachep
;
34 static void nilfs_segbuf_init_once(void *obj
)
36 memset(obj
, 0, sizeof(struct nilfs_segment_buffer
));
39 int __init
nilfs_init_segbuf_cache(void)
42 kmem_cache_create("nilfs2_segbuf_cache",
43 sizeof(struct nilfs_segment_buffer
),
44 0, SLAB_RECLAIM_ACCOUNT
,
45 nilfs_segbuf_init_once
);
47 return (nilfs_segbuf_cachep
== NULL
) ? -ENOMEM
: 0;
50 void nilfs_destroy_segbuf_cache(void)
52 kmem_cache_destroy(nilfs_segbuf_cachep
);
55 struct nilfs_segment_buffer
*nilfs_segbuf_new(struct super_block
*sb
)
57 struct nilfs_segment_buffer
*segbuf
;
59 segbuf
= kmem_cache_alloc(nilfs_segbuf_cachep
, GFP_NOFS
);
60 if (unlikely(!segbuf
))
63 segbuf
->sb_super
= sb
;
64 INIT_LIST_HEAD(&segbuf
->sb_list
);
65 INIT_LIST_HEAD(&segbuf
->sb_segsum_buffers
);
66 INIT_LIST_HEAD(&segbuf
->sb_payload_buffers
);
70 void nilfs_segbuf_free(struct nilfs_segment_buffer
*segbuf
)
72 kmem_cache_free(nilfs_segbuf_cachep
, segbuf
);
75 void nilfs_segbuf_map(struct nilfs_segment_buffer
*segbuf
, __u64 segnum
,
76 unsigned long offset
, struct the_nilfs
*nilfs
)
78 segbuf
->sb_segnum
= segnum
;
79 nilfs_get_segment_range(nilfs
, segnum
, &segbuf
->sb_fseg_start
,
80 &segbuf
->sb_fseg_end
);
82 segbuf
->sb_pseg_start
= segbuf
->sb_fseg_start
+ offset
;
83 segbuf
->sb_rest_blocks
=
84 segbuf
->sb_fseg_end
- segbuf
->sb_pseg_start
+ 1;
87 void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer
*segbuf
,
88 __u64 nextnum
, struct the_nilfs
*nilfs
)
90 segbuf
->sb_nextnum
= nextnum
;
91 segbuf
->sb_sum
.next
= nilfs_get_segment_start_blocknr(nilfs
, nextnum
);
94 int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer
*segbuf
)
96 struct buffer_head
*bh
;
98 bh
= sb_getblk(segbuf
->sb_super
,
99 segbuf
->sb_pseg_start
+ segbuf
->sb_sum
.nsumblk
);
103 nilfs_segbuf_add_segsum_buffer(segbuf
, bh
);
107 int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer
*segbuf
,
108 struct buffer_head
**bhp
)
110 struct buffer_head
*bh
;
112 bh
= sb_getblk(segbuf
->sb_super
,
113 segbuf
->sb_pseg_start
+ segbuf
->sb_sum
.nblocks
);
117 nilfs_segbuf_add_payload_buffer(segbuf
, bh
);
122 int nilfs_segbuf_reset(struct nilfs_segment_buffer
*segbuf
, unsigned flags
,
127 segbuf
->sb_sum
.nblocks
= segbuf
->sb_sum
.nsumblk
= 0;
128 err
= nilfs_segbuf_extend_segsum(segbuf
);
132 segbuf
->sb_sum
.flags
= flags
;
133 segbuf
->sb_sum
.sumbytes
= sizeof(struct nilfs_segment_summary
);
134 segbuf
->sb_sum
.nfinfo
= segbuf
->sb_sum
.nfileblk
= 0;
135 segbuf
->sb_sum
.ctime
= ctime
;
137 segbuf
->sb_io_error
= 0;
142 * Setup segument summary
144 void nilfs_segbuf_fill_in_segsum(struct nilfs_segment_buffer
*segbuf
)
146 struct nilfs_segment_summary
*raw_sum
;
147 struct buffer_head
*bh_sum
;
149 bh_sum
= list_entry(segbuf
->sb_segsum_buffers
.next
,
150 struct buffer_head
, b_assoc_buffers
);
151 raw_sum
= (struct nilfs_segment_summary
*)bh_sum
->b_data
;
153 raw_sum
->ss_magic
= cpu_to_le32(NILFS_SEGSUM_MAGIC
);
154 raw_sum
->ss_bytes
= cpu_to_le16(sizeof(*raw_sum
));
155 raw_sum
->ss_flags
= cpu_to_le16(segbuf
->sb_sum
.flags
);
156 raw_sum
->ss_seq
= cpu_to_le64(segbuf
->sb_sum
.seg_seq
);
157 raw_sum
->ss_create
= cpu_to_le64(segbuf
->sb_sum
.ctime
);
158 raw_sum
->ss_next
= cpu_to_le64(segbuf
->sb_sum
.next
);
159 raw_sum
->ss_nblocks
= cpu_to_le32(segbuf
->sb_sum
.nblocks
);
160 raw_sum
->ss_nfinfo
= cpu_to_le32(segbuf
->sb_sum
.nfinfo
);
161 raw_sum
->ss_sumbytes
= cpu_to_le32(segbuf
->sb_sum
.sumbytes
);
166 * CRC calculation routines
168 void nilfs_segbuf_fill_in_segsum_crc(struct nilfs_segment_buffer
*segbuf
,
171 struct buffer_head
*bh
;
172 struct nilfs_segment_summary
*raw_sum
;
173 unsigned long size
, bytes
= segbuf
->sb_sum
.sumbytes
;
176 bh
= list_entry(segbuf
->sb_segsum_buffers
.next
, struct buffer_head
,
179 raw_sum
= (struct nilfs_segment_summary
*)bh
->b_data
;
180 size
= min_t(unsigned long, bytes
, bh
->b_size
);
182 (unsigned char *)raw_sum
+
183 sizeof(raw_sum
->ss_datasum
) + sizeof(raw_sum
->ss_sumsum
),
184 size
- (sizeof(raw_sum
->ss_datasum
) +
185 sizeof(raw_sum
->ss_sumsum
)));
187 list_for_each_entry_continue(bh
, &segbuf
->sb_segsum_buffers
,
190 size
= min_t(unsigned long, bytes
, bh
->b_size
);
191 crc
= crc32_le(crc
, bh
->b_data
, size
);
193 raw_sum
->ss_sumsum
= cpu_to_le32(crc
);
196 void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer
*segbuf
,
199 struct buffer_head
*bh
;
200 struct nilfs_segment_summary
*raw_sum
;
204 bh
= list_entry(segbuf
->sb_segsum_buffers
.next
, struct buffer_head
,
206 raw_sum
= (struct nilfs_segment_summary
*)bh
->b_data
;
208 (unsigned char *)raw_sum
+ sizeof(raw_sum
->ss_datasum
),
209 bh
->b_size
- sizeof(raw_sum
->ss_datasum
));
211 list_for_each_entry_continue(bh
, &segbuf
->sb_segsum_buffers
,
213 crc
= crc32_le(crc
, bh
->b_data
, bh
->b_size
);
215 list_for_each_entry(bh
, &segbuf
->sb_payload_buffers
, b_assoc_buffers
) {
216 kaddr
= kmap_atomic(bh
->b_page
, KM_USER0
);
217 crc
= crc32_le(crc
, kaddr
+ bh_offset(bh
), bh
->b_size
);
218 kunmap_atomic(kaddr
, KM_USER0
);
220 raw_sum
->ss_datasum
= cpu_to_le32(crc
);
223 void nilfs_release_buffers(struct list_head
*list
)
225 struct buffer_head
*bh
, *n
;
227 list_for_each_entry_safe(bh
, n
, list
, b_assoc_buffers
) {
228 list_del_init(&bh
->b_assoc_buffers
);
229 if (buffer_nilfs_allocated(bh
)) {
230 struct page
*clone_page
= bh
->b_page
;
232 /* remove clone page */
234 page_cache_release(clone_page
); /* for each bh */
235 if (page_count(clone_page
) <= 2) {
236 lock_page(clone_page
);
237 nilfs_free_private_page(clone_page
);
248 static void nilfs_end_bio_write(struct bio
*bio
, int err
)
250 const int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
251 struct nilfs_write_info
*wi
= bio
->bi_private
;
253 if (err
== -EOPNOTSUPP
) {
254 set_bit(BIO_EOPNOTSUPP
, &bio
->bi_flags
);
256 /* to be detected by submit_seg_bio() */
260 atomic_inc(&wi
->err
);
263 complete(&wi
->bio_event
);
266 static int nilfs_submit_seg_bio(struct nilfs_write_info
*wi
, int mode
)
268 struct bio
*bio
= wi
->bio
;
271 if (wi
->nbio
> 0 && bdi_write_congested(wi
->bdi
)) {
272 wait_for_completion(&wi
->bio_event
);
274 if (unlikely(atomic_read(&wi
->err
))) {
281 bio
->bi_end_io
= nilfs_end_bio_write
;
282 bio
->bi_private
= wi
;
284 submit_bio(mode
, bio
);
285 if (bio_flagged(bio
, BIO_EOPNOTSUPP
)) {
294 wi
->rest_blocks
-= wi
->end
- wi
->start
;
295 wi
->nr_vecs
= min(wi
->max_pages
, wi
->rest_blocks
);
305 * nilfs_alloc_seg_bio - allocate a bio for writing segment.
307 * @start: beginning disk block number of this BIO.
308 * @nr_vecs: request size of page vector.
310 * alloc_seg_bio() allocates a new BIO structure and initialize it.
312 * Return Value: On success, pointer to the struct bio is returned.
313 * On error, NULL is returned.
315 static struct bio
*nilfs_alloc_seg_bio(struct super_block
*sb
, sector_t start
,
320 bio
= bio_alloc(GFP_NOWAIT
, nr_vecs
);
322 while (!bio
&& (nr_vecs
>>= 1))
323 bio
= bio_alloc(GFP_NOWAIT
, nr_vecs
);
326 bio
->bi_bdev
= sb
->s_bdev
;
327 bio
->bi_sector
= (sector_t
)start
<< (sb
->s_blocksize_bits
- 9);
332 void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer
*segbuf
,
333 struct nilfs_write_info
*wi
)
336 wi
->rest_blocks
= segbuf
->sb_sum
.nblocks
;
337 wi
->max_pages
= bio_get_nr_vecs(wi
->sb
->s_bdev
);
338 wi
->nr_vecs
= min(wi
->max_pages
, wi
->rest_blocks
);
339 wi
->start
= wi
->end
= 0;
341 wi
->blocknr
= segbuf
->sb_pseg_start
;
343 atomic_set(&wi
->err
, 0);
344 init_completion(&wi
->bio_event
);
347 static int nilfs_submit_bh(struct nilfs_write_info
*wi
, struct buffer_head
*bh
,
352 BUG_ON(wi
->nr_vecs
<= 0);
355 wi
->bio
= nilfs_alloc_seg_bio(wi
->sb
, wi
->blocknr
+ wi
->end
,
357 if (unlikely(!wi
->bio
))
361 len
= bio_add_page(wi
->bio
, bh
->b_page
, bh
->b_size
, bh_offset(bh
));
362 if (len
== bh
->b_size
) {
367 err
= nilfs_submit_seg_bio(wi
, mode
);
368 /* never submit current bh */
374 int nilfs_segbuf_write(struct nilfs_segment_buffer
*segbuf
,
375 struct nilfs_write_info
*wi
)
377 struct buffer_head
*bh
;
380 list_for_each_entry(bh
, &segbuf
->sb_segsum_buffers
, b_assoc_buffers
) {
381 res
= nilfs_submit_bh(wi
, bh
, rw
);
386 list_for_each_entry(bh
, &segbuf
->sb_payload_buffers
, b_assoc_buffers
) {
387 res
= nilfs_submit_bh(wi
, bh
, rw
);
394 * Last BIO is always sent through the following
397 rw
|= (1 << BIO_RW_SYNCIO
);
398 res
= nilfs_submit_seg_bio(wi
, rw
);
408 atomic_inc(&wi
->err
);
413 * nilfs_segbuf_wait - wait for completion of requested BIOs
414 * @wi: nilfs_write_info
416 * Return Value: On Success, 0 is returned. On Error, one of the following
417 * negative error code is returned.
421 int nilfs_segbuf_wait(struct nilfs_segment_buffer
*segbuf
,
422 struct nilfs_write_info
*wi
)
430 wait_for_completion(&wi
->bio_event
);
431 } while (--wi
->nbio
> 0);
433 if (unlikely(atomic_read(&wi
->err
) > 0)) {
434 printk(KERN_ERR
"NILFS: IO error writing segment\n");
436 segbuf
->sb_io_error
= 1;