2 * fs/logfs/dev_bdev.c - Device access methods for block devices
4 * As should be obvious for Linux kernel code, license is GPLv2
6 * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
10 #include <linux/blkdev.h>
11 #include <linux/buffer_head.h>
12 #include <linux/slab.h>
13 #include <linux/gfp.h>
15 #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
17 static void request_complete(struct bio
*bio
, int err
)
19 complete((struct completion
*)bio
->bi_private
);
22 static int sync_request(struct page
*page
, struct block_device
*bdev
, int rw
)
25 struct bio_vec bio_vec
;
26 struct completion complete
;
29 bio
.bi_io_vec
= &bio_vec
;
30 bio_vec
.bv_page
= page
;
31 bio_vec
.bv_len
= PAGE_SIZE
;
32 bio_vec
.bv_offset
= 0;
35 bio
.bi_size
= PAGE_SIZE
;
37 bio
.bi_sector
= page
->index
* (PAGE_SIZE
>> 9);
38 init_completion(&complete
);
39 bio
.bi_private
= &complete
;
40 bio
.bi_end_io
= request_complete
;
43 generic_unplug_device(bdev_get_queue(bdev
));
44 wait_for_completion(&complete
);
45 return test_bit(BIO_UPTODATE
, &bio
.bi_flags
) ? 0 : -EIO
;
48 static int bdev_readpage(void *_sb
, struct page
*page
)
50 struct super_block
*sb
= _sb
;
51 struct block_device
*bdev
= logfs_super(sb
)->s_bdev
;
54 err
= sync_request(page
, bdev
, READ
);
56 ClearPageUptodate(page
);
59 SetPageUptodate(page
);
66 static DECLARE_WAIT_QUEUE_HEAD(wq
);
68 static void writeseg_end_io(struct bio
*bio
, int err
)
70 const int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
71 struct bio_vec
*bvec
= bio
->bi_io_vec
+ bio
->bi_vcnt
- 1;
72 struct super_block
*sb
= bio
->bi_private
;
73 struct logfs_super
*super
= logfs_super(sb
);
76 BUG_ON(!uptodate
); /* FIXME: Retry io or write elsewhere */
78 BUG_ON(bio
->bi_vcnt
== 0);
81 if (--bvec
>= bio
->bi_io_vec
)
82 prefetchw(&bvec
->bv_page
->flags
);
84 end_page_writeback(page
);
85 page_cache_release(page
);
86 } while (bvec
>= bio
->bi_io_vec
);
88 if (atomic_dec_and_test(&super
->s_pending_writes
))
92 static int __bdev_writeseg(struct super_block
*sb
, u64 ofs
, pgoff_t index
,
95 struct logfs_super
*super
= logfs_super(sb
);
96 struct address_space
*mapping
= super
->s_mapping_inode
->i_mapping
;
99 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
100 unsigned int max_pages
= queue_max_hw_sectors(q
) >> (PAGE_SHIFT
- 9);
103 if (max_pages
> BIO_MAX_PAGES
)
104 max_pages
= BIO_MAX_PAGES
;
105 bio
= bio_alloc(GFP_NOFS
, max_pages
);
108 for (i
= 0; i
< nr_pages
; i
++) {
109 if (i
>= max_pages
) {
110 /* Block layer cannot split bios :( */
113 bio
->bi_size
= i
* PAGE_SIZE
;
114 bio
->bi_bdev
= super
->s_bdev
;
115 bio
->bi_sector
= ofs
>> 9;
116 bio
->bi_private
= sb
;
117 bio
->bi_end_io
= writeseg_end_io
;
118 atomic_inc(&super
->s_pending_writes
);
119 submit_bio(WRITE
, bio
);
121 ofs
+= i
* PAGE_SIZE
;
126 bio
= bio_alloc(GFP_NOFS
, max_pages
);
129 page
= find_lock_page(mapping
, index
+ i
);
131 bio
->bi_io_vec
[i
].bv_page
= page
;
132 bio
->bi_io_vec
[i
].bv_len
= PAGE_SIZE
;
133 bio
->bi_io_vec
[i
].bv_offset
= 0;
135 BUG_ON(PageWriteback(page
));
136 set_page_writeback(page
);
139 bio
->bi_vcnt
= nr_pages
;
141 bio
->bi_size
= nr_pages
* PAGE_SIZE
;
142 bio
->bi_bdev
= super
->s_bdev
;
143 bio
->bi_sector
= ofs
>> 9;
144 bio
->bi_private
= sb
;
145 bio
->bi_end_io
= writeseg_end_io
;
146 atomic_inc(&super
->s_pending_writes
);
147 submit_bio(WRITE
, bio
);
151 static void bdev_writeseg(struct super_block
*sb
, u64 ofs
, size_t len
)
153 struct logfs_super
*super
= logfs_super(sb
);
156 BUG_ON(super
->s_flags
& LOGFS_SB_FLAG_RO
);
159 /* This can happen when the object fit perfectly into a
160 * segment, the segment gets written per sync and subsequently
165 head
= ofs
& (PAGE_SIZE
- 1);
170 len
= PAGE_ALIGN(len
);
171 __bdev_writeseg(sb
, ofs
, ofs
>> PAGE_SHIFT
, len
>> PAGE_SHIFT
);
172 generic_unplug_device(bdev_get_queue(logfs_super(sb
)->s_bdev
));
176 static void erase_end_io(struct bio
*bio
, int err
)
178 const int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
179 struct super_block
*sb
= bio
->bi_private
;
180 struct logfs_super
*super
= logfs_super(sb
);
182 BUG_ON(!uptodate
); /* FIXME: Retry io or write elsewhere */
184 BUG_ON(bio
->bi_vcnt
== 0);
186 if (atomic_dec_and_test(&super
->s_pending_writes
))
190 static int do_erase(struct super_block
*sb
, u64 ofs
, pgoff_t index
,
193 struct logfs_super
*super
= logfs_super(sb
);
195 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
196 unsigned int max_pages
= queue_max_hw_sectors(q
) >> (PAGE_SHIFT
- 9);
199 if (max_pages
> BIO_MAX_PAGES
)
200 max_pages
= BIO_MAX_PAGES
;
201 bio
= bio_alloc(GFP_NOFS
, max_pages
);
204 for (i
= 0; i
< nr_pages
; i
++) {
205 if (i
>= max_pages
) {
206 /* Block layer cannot split bios :( */
209 bio
->bi_size
= i
* PAGE_SIZE
;
210 bio
->bi_bdev
= super
->s_bdev
;
211 bio
->bi_sector
= ofs
>> 9;
212 bio
->bi_private
= sb
;
213 bio
->bi_end_io
= erase_end_io
;
214 atomic_inc(&super
->s_pending_writes
);
215 submit_bio(WRITE
, bio
);
217 ofs
+= i
* PAGE_SIZE
;
222 bio
= bio_alloc(GFP_NOFS
, max_pages
);
225 bio
->bi_io_vec
[i
].bv_page
= super
->s_erase_page
;
226 bio
->bi_io_vec
[i
].bv_len
= PAGE_SIZE
;
227 bio
->bi_io_vec
[i
].bv_offset
= 0;
229 bio
->bi_vcnt
= nr_pages
;
231 bio
->bi_size
= nr_pages
* PAGE_SIZE
;
232 bio
->bi_bdev
= super
->s_bdev
;
233 bio
->bi_sector
= ofs
>> 9;
234 bio
->bi_private
= sb
;
235 bio
->bi_end_io
= erase_end_io
;
236 atomic_inc(&super
->s_pending_writes
);
237 submit_bio(WRITE
, bio
);
241 static int bdev_erase(struct super_block
*sb
, loff_t to
, size_t len
,
244 struct logfs_super
*super
= logfs_super(sb
);
246 BUG_ON(to
& (PAGE_SIZE
- 1));
247 BUG_ON(len
& (PAGE_SIZE
- 1));
249 if (super
->s_flags
& LOGFS_SB_FLAG_RO
)
254 * Object store doesn't care whether erases happen or not.
255 * But for the journal they are required. Otherwise a scan
256 * can find an old commit entry and assume it is the current
257 * one, travelling back in time.
259 do_erase(sb
, to
, to
>> PAGE_SHIFT
, len
>> PAGE_SHIFT
);
265 static void bdev_sync(struct super_block
*sb
)
267 struct logfs_super
*super
= logfs_super(sb
);
269 wait_event(wq
, atomic_read(&super
->s_pending_writes
) == 0);
272 static struct page
*bdev_find_first_sb(struct super_block
*sb
, u64
*ofs
)
274 struct logfs_super
*super
= logfs_super(sb
);
275 struct address_space
*mapping
= super
->s_mapping_inode
->i_mapping
;
276 filler_t
*filler
= bdev_readpage
;
279 return read_cache_page(mapping
, 0, filler
, sb
);
282 static struct page
*bdev_find_last_sb(struct super_block
*sb
, u64
*ofs
)
284 struct logfs_super
*super
= logfs_super(sb
);
285 struct address_space
*mapping
= super
->s_mapping_inode
->i_mapping
;
286 filler_t
*filler
= bdev_readpage
;
287 u64 pos
= (super
->s_bdev
->bd_inode
->i_size
& ~0xfffULL
) - 0x1000;
288 pgoff_t index
= pos
>> PAGE_SHIFT
;
291 return read_cache_page(mapping
, index
, filler
, sb
);
294 static int bdev_write_sb(struct super_block
*sb
, struct page
*page
)
296 struct block_device
*bdev
= logfs_super(sb
)->s_bdev
;
298 /* Nothing special to do for block devices. */
299 return sync_request(page
, bdev
, WRITE
);
302 static void bdev_put_device(struct super_block
*sb
)
304 close_bdev_exclusive(logfs_super(sb
)->s_bdev
, FMODE_READ
|FMODE_WRITE
);
307 static int bdev_can_write_buf(struct super_block
*sb
, u64 ofs
)
312 static const struct logfs_device_ops bd_devops
= {
313 .find_first_sb
= bdev_find_first_sb
,
314 .find_last_sb
= bdev_find_last_sb
,
315 .write_sb
= bdev_write_sb
,
316 .readpage
= bdev_readpage
,
317 .writeseg
= bdev_writeseg
,
319 .can_write_buf
= bdev_can_write_buf
,
321 .put_device
= bdev_put_device
,
324 int logfs_get_sb_bdev(struct logfs_super
*p
,
325 struct file_system_type
*type
, int flags
,
326 const char *devname
, struct vfsmount
*mnt
)
328 struct block_device
*bdev
;
330 bdev
= open_bdev_exclusive(devname
, FMODE_READ
|FMODE_WRITE
, type
);
333 return PTR_ERR(bdev
);
336 if (MAJOR(bdev
->bd_dev
) == MTD_BLOCK_MAJOR
) {
337 int mtdnr
= MINOR(bdev
->bd_dev
);
338 close_bdev_exclusive(bdev
, FMODE_READ
|FMODE_WRITE
);
339 return logfs_get_sb_mtd(p
, type
, flags
, mtdnr
, mnt
);
344 p
->s_devops
= &bd_devops
;
346 return logfs_get_sb_device(p
, type
, flags
, mnt
);