drm/radeon/r600: add missing license and comments to r600_blit_shaders.c
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / nilfs2 / segbuf.c
blob645c78656aa0e6f95c8e2c2b8dc5d2e7b24b48ea
1 /*
2 * segbuf.c - NILFS segment buffer
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
24 #include <linux/buffer_head.h>
25 #include <linux/writeback.h>
26 #include <linux/crc32.h>
27 #include <linux/backing-dev.h>
28 #include "page.h"
29 #include "segbuf.h"
32 struct nilfs_write_info {
33 struct the_nilfs *nilfs;
34 struct bio *bio;
35 int start, end; /* The region to be submitted */
36 int rest_blocks;
37 int max_pages;
38 int nr_vecs;
39 sector_t blocknr;
43 static struct kmem_cache *nilfs_segbuf_cachep;
45 static void nilfs_segbuf_init_once(void *obj)
47 memset(obj, 0, sizeof(struct nilfs_segment_buffer));
50 int __init nilfs_init_segbuf_cache(void)
52 nilfs_segbuf_cachep =
53 kmem_cache_create("nilfs2_segbuf_cache",
54 sizeof(struct nilfs_segment_buffer),
55 0, SLAB_RECLAIM_ACCOUNT,
56 nilfs_segbuf_init_once);
58 return (nilfs_segbuf_cachep == NULL) ? -ENOMEM : 0;
61 void nilfs_destroy_segbuf_cache(void)
63 kmem_cache_destroy(nilfs_segbuf_cachep);
66 struct nilfs_segment_buffer *nilfs_segbuf_new(struct super_block *sb)
68 struct nilfs_segment_buffer *segbuf;
70 segbuf = kmem_cache_alloc(nilfs_segbuf_cachep, GFP_NOFS);
71 if (unlikely(!segbuf))
72 return NULL;
74 segbuf->sb_super = sb;
75 INIT_LIST_HEAD(&segbuf->sb_list);
76 INIT_LIST_HEAD(&segbuf->sb_segsum_buffers);
77 INIT_LIST_HEAD(&segbuf->sb_payload_buffers);
79 init_completion(&segbuf->sb_bio_event);
80 atomic_set(&segbuf->sb_err, 0);
81 segbuf->sb_nbio = 0;
83 return segbuf;
86 void nilfs_segbuf_free(struct nilfs_segment_buffer *segbuf)
88 kmem_cache_free(nilfs_segbuf_cachep, segbuf);
91 void nilfs_segbuf_map(struct nilfs_segment_buffer *segbuf, __u64 segnum,
92 unsigned long offset, struct the_nilfs *nilfs)
94 segbuf->sb_segnum = segnum;
95 nilfs_get_segment_range(nilfs, segnum, &segbuf->sb_fseg_start,
96 &segbuf->sb_fseg_end);
98 segbuf->sb_pseg_start = segbuf->sb_fseg_start + offset;
99 segbuf->sb_rest_blocks =
100 segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1;
104 * nilfs_segbuf_map_cont - map a new log behind a given log
105 * @segbuf: new segment buffer
106 * @prev: segment buffer containing a log to be continued
108 void nilfs_segbuf_map_cont(struct nilfs_segment_buffer *segbuf,
109 struct nilfs_segment_buffer *prev)
111 segbuf->sb_segnum = prev->sb_segnum;
112 segbuf->sb_fseg_start = prev->sb_fseg_start;
113 segbuf->sb_fseg_end = prev->sb_fseg_end;
114 segbuf->sb_pseg_start = prev->sb_pseg_start + prev->sb_sum.nblocks;
115 segbuf->sb_rest_blocks =
116 segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1;
119 void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *segbuf,
120 __u64 nextnum, struct the_nilfs *nilfs)
122 segbuf->sb_nextnum = nextnum;
123 segbuf->sb_sum.next = nilfs_get_segment_start_blocknr(nilfs, nextnum);
126 int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer *segbuf)
128 struct buffer_head *bh;
130 bh = sb_getblk(segbuf->sb_super,
131 segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk);
132 if (unlikely(!bh))
133 return -ENOMEM;
135 nilfs_segbuf_add_segsum_buffer(segbuf, bh);
136 return 0;
139 int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *segbuf,
140 struct buffer_head **bhp)
142 struct buffer_head *bh;
144 bh = sb_getblk(segbuf->sb_super,
145 segbuf->sb_pseg_start + segbuf->sb_sum.nblocks);
146 if (unlikely(!bh))
147 return -ENOMEM;
149 nilfs_segbuf_add_payload_buffer(segbuf, bh);
150 *bhp = bh;
151 return 0;
154 int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned flags,
155 time_t ctime)
157 int err;
159 segbuf->sb_sum.nblocks = segbuf->sb_sum.nsumblk = 0;
160 err = nilfs_segbuf_extend_segsum(segbuf);
161 if (unlikely(err))
162 return err;
164 segbuf->sb_sum.flags = flags;
165 segbuf->sb_sum.sumbytes = sizeof(struct nilfs_segment_summary);
166 segbuf->sb_sum.nfinfo = segbuf->sb_sum.nfileblk = 0;
167 segbuf->sb_sum.ctime = ctime;
168 return 0;
172 * Setup segument summary
174 void nilfs_segbuf_fill_in_segsum(struct nilfs_segment_buffer *segbuf)
176 struct nilfs_segment_summary *raw_sum;
177 struct buffer_head *bh_sum;
179 bh_sum = list_entry(segbuf->sb_segsum_buffers.next,
180 struct buffer_head, b_assoc_buffers);
181 raw_sum = (struct nilfs_segment_summary *)bh_sum->b_data;
183 raw_sum->ss_magic = cpu_to_le32(NILFS_SEGSUM_MAGIC);
184 raw_sum->ss_bytes = cpu_to_le16(sizeof(*raw_sum));
185 raw_sum->ss_flags = cpu_to_le16(segbuf->sb_sum.flags);
186 raw_sum->ss_seq = cpu_to_le64(segbuf->sb_sum.seg_seq);
187 raw_sum->ss_create = cpu_to_le64(segbuf->sb_sum.ctime);
188 raw_sum->ss_next = cpu_to_le64(segbuf->sb_sum.next);
189 raw_sum->ss_nblocks = cpu_to_le32(segbuf->sb_sum.nblocks);
190 raw_sum->ss_nfinfo = cpu_to_le32(segbuf->sb_sum.nfinfo);
191 raw_sum->ss_sumbytes = cpu_to_le32(segbuf->sb_sum.sumbytes);
192 raw_sum->ss_pad = 0;
196 * CRC calculation routines
198 void nilfs_segbuf_fill_in_segsum_crc(struct nilfs_segment_buffer *segbuf,
199 u32 seed)
201 struct buffer_head *bh;
202 struct nilfs_segment_summary *raw_sum;
203 unsigned long size, bytes = segbuf->sb_sum.sumbytes;
204 u32 crc;
206 bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head,
207 b_assoc_buffers);
209 raw_sum = (struct nilfs_segment_summary *)bh->b_data;
210 size = min_t(unsigned long, bytes, bh->b_size);
211 crc = crc32_le(seed,
212 (unsigned char *)raw_sum +
213 sizeof(raw_sum->ss_datasum) + sizeof(raw_sum->ss_sumsum),
214 size - (sizeof(raw_sum->ss_datasum) +
215 sizeof(raw_sum->ss_sumsum)));
217 list_for_each_entry_continue(bh, &segbuf->sb_segsum_buffers,
218 b_assoc_buffers) {
219 bytes -= size;
220 size = min_t(unsigned long, bytes, bh->b_size);
221 crc = crc32_le(crc, bh->b_data, size);
223 raw_sum->ss_sumsum = cpu_to_le32(crc);
226 void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
227 u32 seed)
229 struct buffer_head *bh;
230 struct nilfs_segment_summary *raw_sum;
231 void *kaddr;
232 u32 crc;
234 bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head,
235 b_assoc_buffers);
236 raw_sum = (struct nilfs_segment_summary *)bh->b_data;
237 crc = crc32_le(seed,
238 (unsigned char *)raw_sum + sizeof(raw_sum->ss_datasum),
239 bh->b_size - sizeof(raw_sum->ss_datasum));
241 list_for_each_entry_continue(bh, &segbuf->sb_segsum_buffers,
242 b_assoc_buffers) {
243 crc = crc32_le(crc, bh->b_data, bh->b_size);
245 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
246 kaddr = kmap_atomic(bh->b_page, KM_USER0);
247 crc = crc32_le(crc, kaddr + bh_offset(bh), bh->b_size);
248 kunmap_atomic(kaddr, KM_USER0);
250 raw_sum->ss_datasum = cpu_to_le32(crc);
253 static void nilfs_release_buffers(struct list_head *list)
255 struct buffer_head *bh, *n;
257 list_for_each_entry_safe(bh, n, list, b_assoc_buffers) {
258 list_del_init(&bh->b_assoc_buffers);
259 if (buffer_nilfs_allocated(bh)) {
260 struct page *clone_page = bh->b_page;
262 /* remove clone page */
263 brelse(bh);
264 page_cache_release(clone_page); /* for each bh */
265 if (page_count(clone_page) <= 2) {
266 lock_page(clone_page);
267 nilfs_free_private_page(clone_page);
269 continue;
271 brelse(bh);
275 static void nilfs_segbuf_clear(struct nilfs_segment_buffer *segbuf)
277 nilfs_release_buffers(&segbuf->sb_segsum_buffers);
278 nilfs_release_buffers(&segbuf->sb_payload_buffers);
282 * Iterators for segment buffers
284 void nilfs_clear_logs(struct list_head *logs)
286 struct nilfs_segment_buffer *segbuf;
288 list_for_each_entry(segbuf, logs, sb_list)
289 nilfs_segbuf_clear(segbuf);
292 void nilfs_truncate_logs(struct list_head *logs,
293 struct nilfs_segment_buffer *last)
295 struct nilfs_segment_buffer *n, *segbuf;
297 segbuf = list_prepare_entry(last, logs, sb_list);
298 list_for_each_entry_safe_continue(segbuf, n, logs, sb_list) {
299 list_del_init(&segbuf->sb_list);
300 nilfs_segbuf_clear(segbuf);
301 nilfs_segbuf_free(segbuf);
305 int nilfs_wait_on_logs(struct list_head *logs)
307 struct nilfs_segment_buffer *segbuf;
308 int err;
310 list_for_each_entry(segbuf, logs, sb_list) {
311 err = nilfs_segbuf_wait(segbuf);
312 if (err)
313 return err;
315 return 0;
319 * BIO operations
321 static void nilfs_end_bio_write(struct bio *bio, int err)
323 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
324 struct nilfs_segment_buffer *segbuf = bio->bi_private;
326 if (err == -EOPNOTSUPP) {
327 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
328 bio_put(bio);
329 /* to be detected by submit_seg_bio() */
332 if (!uptodate)
333 atomic_inc(&segbuf->sb_err);
335 bio_put(bio);
336 complete(&segbuf->sb_bio_event);
339 static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
340 struct nilfs_write_info *wi, int mode)
342 struct bio *bio = wi->bio;
343 int err;
345 if (segbuf->sb_nbio > 0 && bdi_write_congested(wi->nilfs->ns_bdi)) {
346 wait_for_completion(&segbuf->sb_bio_event);
347 segbuf->sb_nbio--;
348 if (unlikely(atomic_read(&segbuf->sb_err))) {
349 bio_put(bio);
350 err = -EIO;
351 goto failed;
355 bio->bi_end_io = nilfs_end_bio_write;
356 bio->bi_private = segbuf;
357 bio_get(bio);
358 submit_bio(mode, bio);
359 if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
360 bio_put(bio);
361 err = -EOPNOTSUPP;
362 goto failed;
364 segbuf->sb_nbio++;
365 bio_put(bio);
367 wi->bio = NULL;
368 wi->rest_blocks -= wi->end - wi->start;
369 wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
370 wi->start = wi->end;
371 return 0;
373 failed:
374 wi->bio = NULL;
375 return err;
379 * nilfs_alloc_seg_bio - allocate a new bio for writing log
380 * @nilfs: nilfs object
381 * @start: start block number of the bio
382 * @nr_vecs: request size of page vector.
384 * Return Value: On success, pointer to the struct bio is returned.
385 * On error, NULL is returned.
387 static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start,
388 int nr_vecs)
390 struct bio *bio;
392 bio = bio_alloc(GFP_NOIO, nr_vecs);
393 if (bio == NULL) {
394 while (!bio && (nr_vecs >>= 1))
395 bio = bio_alloc(GFP_NOIO, nr_vecs);
397 if (likely(bio)) {
398 bio->bi_bdev = nilfs->ns_bdev;
399 bio->bi_sector = start << (nilfs->ns_blocksize_bits - 9);
401 return bio;
404 static void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf,
405 struct nilfs_write_info *wi)
407 wi->bio = NULL;
408 wi->rest_blocks = segbuf->sb_sum.nblocks;
409 wi->max_pages = bio_get_nr_vecs(wi->nilfs->ns_bdev);
410 wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
411 wi->start = wi->end = 0;
412 wi->blocknr = segbuf->sb_pseg_start;
415 static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf,
416 struct nilfs_write_info *wi,
417 struct buffer_head *bh, int mode)
419 int len, err;
421 BUG_ON(wi->nr_vecs <= 0);
422 repeat:
423 if (!wi->bio) {
424 wi->bio = nilfs_alloc_seg_bio(wi->nilfs, wi->blocknr + wi->end,
425 wi->nr_vecs);
426 if (unlikely(!wi->bio))
427 return -ENOMEM;
430 len = bio_add_page(wi->bio, bh->b_page, bh->b_size, bh_offset(bh));
431 if (len == bh->b_size) {
432 wi->end++;
433 return 0;
435 /* bio is FULL */
436 err = nilfs_segbuf_submit_bio(segbuf, wi, mode);
437 /* never submit current bh */
438 if (likely(!err))
439 goto repeat;
440 return err;
444 * nilfs_segbuf_write - submit write requests of a log
445 * @segbuf: buffer storing a log to be written
446 * @nilfs: nilfs object
448 * Return Value: On Success, 0 is returned. On Error, one of the following
449 * negative error code is returned.
451 * %-EIO - I/O error
453 * %-ENOMEM - Insufficient memory available.
455 int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
456 struct the_nilfs *nilfs)
458 struct nilfs_write_info wi;
459 struct buffer_head *bh;
460 int res = 0, rw = WRITE;
462 wi.nilfs = nilfs;
463 nilfs_segbuf_prepare_write(segbuf, &wi);
465 list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) {
466 res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw);
467 if (unlikely(res))
468 goto failed_bio;
471 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
472 res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw);
473 if (unlikely(res))
474 goto failed_bio;
477 if (wi.bio) {
479 * Last BIO is always sent through the following
480 * submission.
482 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
483 res = nilfs_segbuf_submit_bio(segbuf, &wi, rw);
486 failed_bio:
487 return res;
491 * nilfs_segbuf_wait - wait for completion of requested BIOs
492 * @segbuf: segment buffer
494 * Return Value: On Success, 0 is returned. On Error, one of the following
495 * negative error code is returned.
497 * %-EIO - I/O error
499 int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf)
501 int err = 0;
503 if (!segbuf->sb_nbio)
504 return 0;
506 do {
507 wait_for_completion(&segbuf->sb_bio_event);
508 } while (--segbuf->sb_nbio > 0);
510 if (unlikely(atomic_read(&segbuf->sb_err) > 0)) {
511 printk(KERN_ERR "NILFS: IO error writing segment\n");
512 err = -EIO;
514 return err;