igb: Use dma_unmap_addr and dma_unmap_len defines
[linux-2.6/cjktty.git] / fs / logfs / dev_bdev.c
blobe784a217b50067919ad3ebffe559b3552b58a9bc
1 /*
2 * fs/logfs/dev_bdev.c - Device access methods for block devices
4 * As should be obvious for Linux kernel code, license is GPLv2
6 * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
7 */
8 #include "logfs.h"
9 #include <linux/bio.h>
10 #include <linux/blkdev.h>
11 #include <linux/buffer_head.h>
12 #include <linux/gfp.h>
13 #include <linux/prefetch.h>
15 #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
17 static void request_complete(struct bio *bio, int err)
19 complete((struct completion *)bio->bi_private);
22 static int sync_request(struct page *page, struct block_device *bdev, int rw)
24 struct bio bio;
25 struct bio_vec bio_vec;
26 struct completion complete;
28 bio_init(&bio);
29 bio.bi_max_vecs = 1;
30 bio.bi_io_vec = &bio_vec;
31 bio_vec.bv_page = page;
32 bio_vec.bv_len = PAGE_SIZE;
33 bio_vec.bv_offset = 0;
34 bio.bi_vcnt = 1;
35 bio.bi_idx = 0;
36 bio.bi_size = PAGE_SIZE;
37 bio.bi_bdev = bdev;
38 bio.bi_sector = page->index * (PAGE_SIZE >> 9);
39 init_completion(&complete);
40 bio.bi_private = &complete;
41 bio.bi_end_io = request_complete;
43 submit_bio(rw, &bio);
44 wait_for_completion(&complete);
45 return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
48 static int bdev_readpage(void *_sb, struct page *page)
50 struct super_block *sb = _sb;
51 struct block_device *bdev = logfs_super(sb)->s_bdev;
52 int err;
54 err = sync_request(page, bdev, READ);
55 if (err) {
56 ClearPageUptodate(page);
57 SetPageError(page);
58 } else {
59 SetPageUptodate(page);
60 ClearPageError(page);
62 unlock_page(page);
63 return err;
66 static DECLARE_WAIT_QUEUE_HEAD(wq);
68 static void writeseg_end_io(struct bio *bio, int err)
70 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
71 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
72 struct super_block *sb = bio->bi_private;
73 struct logfs_super *super = logfs_super(sb);
74 struct page *page;
76 BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */
77 BUG_ON(err);
78 BUG_ON(bio->bi_vcnt == 0);
79 do {
80 page = bvec->bv_page;
81 if (--bvec >= bio->bi_io_vec)
82 prefetchw(&bvec->bv_page->flags);
84 end_page_writeback(page);
85 page_cache_release(page);
86 } while (bvec >= bio->bi_io_vec);
87 bio_put(bio);
88 if (atomic_dec_and_test(&super->s_pending_writes))
89 wake_up(&wq);
92 static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
93 size_t nr_pages)
95 struct logfs_super *super = logfs_super(sb);
96 struct address_space *mapping = super->s_mapping_inode->i_mapping;
97 struct bio *bio;
98 struct page *page;
99 unsigned int max_pages;
100 int i;
102 max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev));
104 bio = bio_alloc(GFP_NOFS, max_pages);
105 BUG_ON(!bio);
107 for (i = 0; i < nr_pages; i++) {
108 if (i >= max_pages) {
109 /* Block layer cannot split bios :( */
110 bio->bi_vcnt = i;
111 bio->bi_idx = 0;
112 bio->bi_size = i * PAGE_SIZE;
113 bio->bi_bdev = super->s_bdev;
114 bio->bi_sector = ofs >> 9;
115 bio->bi_private = sb;
116 bio->bi_end_io = writeseg_end_io;
117 atomic_inc(&super->s_pending_writes);
118 submit_bio(WRITE, bio);
120 ofs += i * PAGE_SIZE;
121 index += i;
122 nr_pages -= i;
123 i = 0;
125 bio = bio_alloc(GFP_NOFS, max_pages);
126 BUG_ON(!bio);
128 page = find_lock_page(mapping, index + i);
129 BUG_ON(!page);
130 bio->bi_io_vec[i].bv_page = page;
131 bio->bi_io_vec[i].bv_len = PAGE_SIZE;
132 bio->bi_io_vec[i].bv_offset = 0;
134 BUG_ON(PageWriteback(page));
135 set_page_writeback(page);
136 unlock_page(page);
138 bio->bi_vcnt = nr_pages;
139 bio->bi_idx = 0;
140 bio->bi_size = nr_pages * PAGE_SIZE;
141 bio->bi_bdev = super->s_bdev;
142 bio->bi_sector = ofs >> 9;
143 bio->bi_private = sb;
144 bio->bi_end_io = writeseg_end_io;
145 atomic_inc(&super->s_pending_writes);
146 submit_bio(WRITE, bio);
147 return 0;
150 static void bdev_writeseg(struct super_block *sb, u64 ofs, size_t len)
152 struct logfs_super *super = logfs_super(sb);
153 int head;
155 BUG_ON(super->s_flags & LOGFS_SB_FLAG_RO);
157 if (len == 0) {
158 /* This can happen when the object fit perfectly into a
159 * segment, the segment gets written per sync and subsequently
160 * closed.
162 return;
164 head = ofs & (PAGE_SIZE - 1);
165 if (head) {
166 ofs -= head;
167 len += head;
169 len = PAGE_ALIGN(len);
170 __bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
174 static void erase_end_io(struct bio *bio, int err)
176 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
177 struct super_block *sb = bio->bi_private;
178 struct logfs_super *super = logfs_super(sb);
180 BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */
181 BUG_ON(err);
182 BUG_ON(bio->bi_vcnt == 0);
183 bio_put(bio);
184 if (atomic_dec_and_test(&super->s_pending_writes))
185 wake_up(&wq);
188 static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
189 size_t nr_pages)
191 struct logfs_super *super = logfs_super(sb);
192 struct bio *bio;
193 unsigned int max_pages;
194 int i;
196 max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev));
198 bio = bio_alloc(GFP_NOFS, max_pages);
199 BUG_ON(!bio);
201 for (i = 0; i < nr_pages; i++) {
202 if (i >= max_pages) {
203 /* Block layer cannot split bios :( */
204 bio->bi_vcnt = i;
205 bio->bi_idx = 0;
206 bio->bi_size = i * PAGE_SIZE;
207 bio->bi_bdev = super->s_bdev;
208 bio->bi_sector = ofs >> 9;
209 bio->bi_private = sb;
210 bio->bi_end_io = erase_end_io;
211 atomic_inc(&super->s_pending_writes);
212 submit_bio(WRITE, bio);
214 ofs += i * PAGE_SIZE;
215 index += i;
216 nr_pages -= i;
217 i = 0;
219 bio = bio_alloc(GFP_NOFS, max_pages);
220 BUG_ON(!bio);
222 bio->bi_io_vec[i].bv_page = super->s_erase_page;
223 bio->bi_io_vec[i].bv_len = PAGE_SIZE;
224 bio->bi_io_vec[i].bv_offset = 0;
226 bio->bi_vcnt = nr_pages;
227 bio->bi_idx = 0;
228 bio->bi_size = nr_pages * PAGE_SIZE;
229 bio->bi_bdev = super->s_bdev;
230 bio->bi_sector = ofs >> 9;
231 bio->bi_private = sb;
232 bio->bi_end_io = erase_end_io;
233 atomic_inc(&super->s_pending_writes);
234 submit_bio(WRITE, bio);
235 return 0;
238 static int bdev_erase(struct super_block *sb, loff_t to, size_t len,
239 int ensure_write)
241 struct logfs_super *super = logfs_super(sb);
243 BUG_ON(to & (PAGE_SIZE - 1));
244 BUG_ON(len & (PAGE_SIZE - 1));
246 if (super->s_flags & LOGFS_SB_FLAG_RO)
247 return -EROFS;
249 if (ensure_write) {
251 * Object store doesn't care whether erases happen or not.
252 * But for the journal they are required. Otherwise a scan
253 * can find an old commit entry and assume it is the current
254 * one, travelling back in time.
256 do_erase(sb, to, to >> PAGE_SHIFT, len >> PAGE_SHIFT);
259 return 0;
262 static void bdev_sync(struct super_block *sb)
264 struct logfs_super *super = logfs_super(sb);
266 wait_event(wq, atomic_read(&super->s_pending_writes) == 0);
269 static struct page *bdev_find_first_sb(struct super_block *sb, u64 *ofs)
271 struct logfs_super *super = logfs_super(sb);
272 struct address_space *mapping = super->s_mapping_inode->i_mapping;
273 filler_t *filler = bdev_readpage;
275 *ofs = 0;
276 return read_cache_page(mapping, 0, filler, sb);
279 static struct page *bdev_find_last_sb(struct super_block *sb, u64 *ofs)
281 struct logfs_super *super = logfs_super(sb);
282 struct address_space *mapping = super->s_mapping_inode->i_mapping;
283 filler_t *filler = bdev_readpage;
284 u64 pos = (super->s_bdev->bd_inode->i_size & ~0xfffULL) - 0x1000;
285 pgoff_t index = pos >> PAGE_SHIFT;
287 *ofs = pos;
288 return read_cache_page(mapping, index, filler, sb);
291 static int bdev_write_sb(struct super_block *sb, struct page *page)
293 struct block_device *bdev = logfs_super(sb)->s_bdev;
295 /* Nothing special to do for block devices. */
296 return sync_request(page, bdev, WRITE);
299 static void bdev_put_device(struct logfs_super *s)
301 blkdev_put(s->s_bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
304 static int bdev_can_write_buf(struct super_block *sb, u64 ofs)
306 return 0;
309 static const struct logfs_device_ops bd_devops = {
310 .find_first_sb = bdev_find_first_sb,
311 .find_last_sb = bdev_find_last_sb,
312 .write_sb = bdev_write_sb,
313 .readpage = bdev_readpage,
314 .writeseg = bdev_writeseg,
315 .erase = bdev_erase,
316 .can_write_buf = bdev_can_write_buf,
317 .sync = bdev_sync,
318 .put_device = bdev_put_device,
321 int logfs_get_sb_bdev(struct logfs_super *p, struct file_system_type *type,
322 const char *devname)
324 struct block_device *bdev;
326 bdev = blkdev_get_by_path(devname, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
327 type);
328 if (IS_ERR(bdev))
329 return PTR_ERR(bdev);
331 if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
332 int mtdnr = MINOR(bdev->bd_dev);
333 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
334 return logfs_get_sb_mtd(p, mtdnr);
337 p->s_bdev = bdev;
338 p->s_mtd = NULL;
339 p->s_devops = &bd_devops;
340 return 0;