logfs get_sb, part 2
[linux-2.6/btrfs-unstable.git] / fs / logfs / dev_bdev.c
bloba322fec101736919918fbce7d6defe1c07f4f753
1 /*
2 * fs/logfs/dev_bdev.c - Device access methods for block devices
4 * As should be obvious for Linux kernel code, license is GPLv2
6 * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
7 */
8 #include "logfs.h"
9 #include <linux/bio.h>
10 #include <linux/blkdev.h>
11 #include <linux/buffer_head.h>
12 #include <linux/slab.h>
13 #include <linux/gfp.h>
15 #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
17 static void request_complete(struct bio *bio, int err)
19 complete((struct completion *)bio->bi_private);
22 static int sync_request(struct page *page, struct block_device *bdev, int rw)
24 struct bio bio;
25 struct bio_vec bio_vec;
26 struct completion complete;
28 bio_init(&bio);
29 bio.bi_io_vec = &bio_vec;
30 bio_vec.bv_page = page;
31 bio_vec.bv_len = PAGE_SIZE;
32 bio_vec.bv_offset = 0;
33 bio.bi_vcnt = 1;
34 bio.bi_idx = 0;
35 bio.bi_size = PAGE_SIZE;
36 bio.bi_bdev = bdev;
37 bio.bi_sector = page->index * (PAGE_SIZE >> 9);
38 init_completion(&complete);
39 bio.bi_private = &complete;
40 bio.bi_end_io = request_complete;
42 submit_bio(rw, &bio);
43 generic_unplug_device(bdev_get_queue(bdev));
44 wait_for_completion(&complete);
45 return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
48 static int bdev_readpage(void *_sb, struct page *page)
50 struct super_block *sb = _sb;
51 struct block_device *bdev = logfs_super(sb)->s_bdev;
52 int err;
54 err = sync_request(page, bdev, READ);
55 if (err) {
56 ClearPageUptodate(page);
57 SetPageError(page);
58 } else {
59 SetPageUptodate(page);
60 ClearPageError(page);
62 unlock_page(page);
63 return err;
66 static DECLARE_WAIT_QUEUE_HEAD(wq);
68 static void writeseg_end_io(struct bio *bio, int err)
70 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
71 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
72 struct super_block *sb = bio->bi_private;
73 struct logfs_super *super = logfs_super(sb);
74 struct page *page;
76 BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */
77 BUG_ON(err);
78 BUG_ON(bio->bi_vcnt == 0);
79 do {
80 page = bvec->bv_page;
81 if (--bvec >= bio->bi_io_vec)
82 prefetchw(&bvec->bv_page->flags);
84 end_page_writeback(page);
85 page_cache_release(page);
86 } while (bvec >= bio->bi_io_vec);
87 bio_put(bio);
88 if (atomic_dec_and_test(&super->s_pending_writes))
89 wake_up(&wq);
92 static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
93 size_t nr_pages)
95 struct logfs_super *super = logfs_super(sb);
96 struct address_space *mapping = super->s_mapping_inode->i_mapping;
97 struct bio *bio;
98 struct page *page;
99 struct request_queue *q = bdev_get_queue(sb->s_bdev);
100 unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
101 int i;
103 if (max_pages > BIO_MAX_PAGES)
104 max_pages = BIO_MAX_PAGES;
105 bio = bio_alloc(GFP_NOFS, max_pages);
106 BUG_ON(!bio);
108 for (i = 0; i < nr_pages; i++) {
109 if (i >= max_pages) {
110 /* Block layer cannot split bios :( */
111 bio->bi_vcnt = i;
112 bio->bi_idx = 0;
113 bio->bi_size = i * PAGE_SIZE;
114 bio->bi_bdev = super->s_bdev;
115 bio->bi_sector = ofs >> 9;
116 bio->bi_private = sb;
117 bio->bi_end_io = writeseg_end_io;
118 atomic_inc(&super->s_pending_writes);
119 submit_bio(WRITE, bio);
121 ofs += i * PAGE_SIZE;
122 index += i;
123 nr_pages -= i;
124 i = 0;
126 bio = bio_alloc(GFP_NOFS, max_pages);
127 BUG_ON(!bio);
129 page = find_lock_page(mapping, index + i);
130 BUG_ON(!page);
131 bio->bi_io_vec[i].bv_page = page;
132 bio->bi_io_vec[i].bv_len = PAGE_SIZE;
133 bio->bi_io_vec[i].bv_offset = 0;
135 BUG_ON(PageWriteback(page));
136 set_page_writeback(page);
137 unlock_page(page);
139 bio->bi_vcnt = nr_pages;
140 bio->bi_idx = 0;
141 bio->bi_size = nr_pages * PAGE_SIZE;
142 bio->bi_bdev = super->s_bdev;
143 bio->bi_sector = ofs >> 9;
144 bio->bi_private = sb;
145 bio->bi_end_io = writeseg_end_io;
146 atomic_inc(&super->s_pending_writes);
147 submit_bio(WRITE, bio);
148 return 0;
151 static void bdev_writeseg(struct super_block *sb, u64 ofs, size_t len)
153 struct logfs_super *super = logfs_super(sb);
154 int head;
156 BUG_ON(super->s_flags & LOGFS_SB_FLAG_RO);
158 if (len == 0) {
159 /* This can happen when the object fit perfectly into a
160 * segment, the segment gets written per sync and subsequently
161 * closed.
163 return;
165 head = ofs & (PAGE_SIZE - 1);
166 if (head) {
167 ofs -= head;
168 len += head;
170 len = PAGE_ALIGN(len);
171 __bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
172 generic_unplug_device(bdev_get_queue(logfs_super(sb)->s_bdev));
176 static void erase_end_io(struct bio *bio, int err)
178 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
179 struct super_block *sb = bio->bi_private;
180 struct logfs_super *super = logfs_super(sb);
182 BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */
183 BUG_ON(err);
184 BUG_ON(bio->bi_vcnt == 0);
185 bio_put(bio);
186 if (atomic_dec_and_test(&super->s_pending_writes))
187 wake_up(&wq);
190 static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
191 size_t nr_pages)
193 struct logfs_super *super = logfs_super(sb);
194 struct bio *bio;
195 struct request_queue *q = bdev_get_queue(sb->s_bdev);
196 unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
197 int i;
199 if (max_pages > BIO_MAX_PAGES)
200 max_pages = BIO_MAX_PAGES;
201 bio = bio_alloc(GFP_NOFS, max_pages);
202 BUG_ON(!bio);
204 for (i = 0; i < nr_pages; i++) {
205 if (i >= max_pages) {
206 /* Block layer cannot split bios :( */
207 bio->bi_vcnt = i;
208 bio->bi_idx = 0;
209 bio->bi_size = i * PAGE_SIZE;
210 bio->bi_bdev = super->s_bdev;
211 bio->bi_sector = ofs >> 9;
212 bio->bi_private = sb;
213 bio->bi_end_io = erase_end_io;
214 atomic_inc(&super->s_pending_writes);
215 submit_bio(WRITE, bio);
217 ofs += i * PAGE_SIZE;
218 index += i;
219 nr_pages -= i;
220 i = 0;
222 bio = bio_alloc(GFP_NOFS, max_pages);
223 BUG_ON(!bio);
225 bio->bi_io_vec[i].bv_page = super->s_erase_page;
226 bio->bi_io_vec[i].bv_len = PAGE_SIZE;
227 bio->bi_io_vec[i].bv_offset = 0;
229 bio->bi_vcnt = nr_pages;
230 bio->bi_idx = 0;
231 bio->bi_size = nr_pages * PAGE_SIZE;
232 bio->bi_bdev = super->s_bdev;
233 bio->bi_sector = ofs >> 9;
234 bio->bi_private = sb;
235 bio->bi_end_io = erase_end_io;
236 atomic_inc(&super->s_pending_writes);
237 submit_bio(WRITE, bio);
238 return 0;
241 static int bdev_erase(struct super_block *sb, loff_t to, size_t len,
242 int ensure_write)
244 struct logfs_super *super = logfs_super(sb);
246 BUG_ON(to & (PAGE_SIZE - 1));
247 BUG_ON(len & (PAGE_SIZE - 1));
249 if (super->s_flags & LOGFS_SB_FLAG_RO)
250 return -EROFS;
252 if (ensure_write) {
254 * Object store doesn't care whether erases happen or not.
255 * But for the journal they are required. Otherwise a scan
256 * can find an old commit entry and assume it is the current
257 * one, travelling back in time.
259 do_erase(sb, to, to >> PAGE_SHIFT, len >> PAGE_SHIFT);
262 return 0;
265 static void bdev_sync(struct super_block *sb)
267 struct logfs_super *super = logfs_super(sb);
269 wait_event(wq, atomic_read(&super->s_pending_writes) == 0);
272 static struct page *bdev_find_first_sb(struct super_block *sb, u64 *ofs)
274 struct logfs_super *super = logfs_super(sb);
275 struct address_space *mapping = super->s_mapping_inode->i_mapping;
276 filler_t *filler = bdev_readpage;
278 *ofs = 0;
279 return read_cache_page(mapping, 0, filler, sb);
282 static struct page *bdev_find_last_sb(struct super_block *sb, u64 *ofs)
284 struct logfs_super *super = logfs_super(sb);
285 struct address_space *mapping = super->s_mapping_inode->i_mapping;
286 filler_t *filler = bdev_readpage;
287 u64 pos = (super->s_bdev->bd_inode->i_size & ~0xfffULL) - 0x1000;
288 pgoff_t index = pos >> PAGE_SHIFT;
290 *ofs = pos;
291 return read_cache_page(mapping, index, filler, sb);
294 static int bdev_write_sb(struct super_block *sb, struct page *page)
296 struct block_device *bdev = logfs_super(sb)->s_bdev;
298 /* Nothing special to do for block devices. */
299 return sync_request(page, bdev, WRITE);
302 static void bdev_put_device(struct super_block *sb)
304 close_bdev_exclusive(logfs_super(sb)->s_bdev, FMODE_READ|FMODE_WRITE);
307 static int bdev_can_write_buf(struct super_block *sb, u64 ofs)
309 return 0;
312 static const struct logfs_device_ops bd_devops = {
313 .find_first_sb = bdev_find_first_sb,
314 .find_last_sb = bdev_find_last_sb,
315 .write_sb = bdev_write_sb,
316 .readpage = bdev_readpage,
317 .writeseg = bdev_writeseg,
318 .erase = bdev_erase,
319 .can_write_buf = bdev_can_write_buf,
320 .sync = bdev_sync,
321 .put_device = bdev_put_device,
324 int logfs_get_sb_bdev(struct logfs_super *p,
325 struct file_system_type *type, int flags,
326 const char *devname, struct vfsmount *mnt)
328 struct block_device *bdev;
330 bdev = open_bdev_exclusive(devname, FMODE_READ|FMODE_WRITE, type);
331 if (IS_ERR(bdev)) {
332 kfree(p);
333 return PTR_ERR(bdev);
336 if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
337 int mtdnr = MINOR(bdev->bd_dev);
338 close_bdev_exclusive(bdev, FMODE_READ|FMODE_WRITE);
339 return logfs_get_sb_mtd(p, type, flags, mtdnr, mnt);
342 p->s_bdev = bdev;
343 p->s_mtd = NULL;
344 p->s_devops = &bd_devops;
346 return logfs_get_sb_device(p, type, flags, mnt);