MIPS: Refactor arch/mips/boot/Makefile
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / block / blk-lib.c
blobd0216b9f22d457f626f1f77049c019434bf027a5
1 /*
2 * Functions related to generic helpers functions
3 */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
10 #include "blk.h"
12 static void blkdev_discard_end_io(struct bio *bio, int err)
14 if (err) {
15 if (err == -EOPNOTSUPP)
16 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
17 clear_bit(BIO_UPTODATE, &bio->bi_flags);
20 if (bio->bi_private)
21 complete(bio->bi_private);
22 __free_page(bio_page(bio));
24 bio_put(bio);
27 /**
28 * blkdev_issue_discard - queue a discard
29 * @bdev: blockdev to issue discard for
30 * @sector: start sector
31 * @nr_sects: number of sectors to discard
32 * @gfp_mask: memory allocation flags (for bio_alloc)
33 * @flags: BLKDEV_IFL_* flags to control behaviour
35 * Description:
36 * Issue a discard request for the sectors in question.
38 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
39 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
41 DECLARE_COMPLETION_ONSTACK(wait);
42 struct request_queue *q = bdev_get_queue(bdev);
43 int type = flags & BLKDEV_IFL_BARRIER ?
44 DISCARD_BARRIER : DISCARD_NOBARRIER;
45 struct bio *bio;
46 struct page *page;
47 int ret = 0;
49 if (!q)
50 return -ENXIO;
52 if (!blk_queue_discard(q))
53 return -EOPNOTSUPP;
55 while (nr_sects && !ret) {
56 unsigned int sector_size = q->limits.logical_block_size;
57 unsigned int max_discard_sectors =
58 min(q->limits.max_discard_sectors, UINT_MAX >> 9);
60 bio = bio_alloc(gfp_mask, 1);
61 if (!bio)
62 goto out;
63 bio->bi_sector = sector;
64 bio->bi_end_io = blkdev_discard_end_io;
65 bio->bi_bdev = bdev;
66 if (flags & BLKDEV_IFL_WAIT)
67 bio->bi_private = &wait;
70 * Add a zeroed one-sector payload as that's what
71 * our current implementations need. If we'll ever need
72 * more the interface will need revisiting.
74 page = alloc_page(gfp_mask | __GFP_ZERO);
75 if (!page)
76 goto out_free_bio;
77 if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
78 goto out_free_page;
81 * And override the bio size - the way discard works we
82 * touch many more blocks on disk than the actual payload
83 * length.
85 if (nr_sects > max_discard_sectors) {
86 bio->bi_size = max_discard_sectors << 9;
87 nr_sects -= max_discard_sectors;
88 sector += max_discard_sectors;
89 } else {
90 bio->bi_size = nr_sects << 9;
91 nr_sects = 0;
94 bio_get(bio);
95 submit_bio(type, bio);
97 if (flags & BLKDEV_IFL_WAIT)
98 wait_for_completion(&wait);
100 if (bio_flagged(bio, BIO_EOPNOTSUPP))
101 ret = -EOPNOTSUPP;
102 else if (!bio_flagged(bio, BIO_UPTODATE))
103 ret = -EIO;
104 bio_put(bio);
106 return ret;
107 out_free_page:
108 __free_page(page);
109 out_free_bio:
110 bio_put(bio);
111 out:
112 return -ENOMEM;
114 EXPORT_SYMBOL(blkdev_issue_discard);
116 struct bio_batch
118 atomic_t done;
119 unsigned long flags;
120 struct completion *wait;
121 bio_end_io_t *end_io;
124 static void bio_batch_end_io(struct bio *bio, int err)
126 struct bio_batch *bb = bio->bi_private;
128 if (err) {
129 if (err == -EOPNOTSUPP)
130 set_bit(BIO_EOPNOTSUPP, &bb->flags);
131 else
132 clear_bit(BIO_UPTODATE, &bb->flags);
134 if (bb) {
135 if (bb->end_io)
136 bb->end_io(bio, err);
137 atomic_inc(&bb->done);
138 complete(bb->wait);
140 bio_put(bio);
144 * blkdev_issue_zeroout generate number of zero filed write bios
145 * @bdev: blockdev to issue
146 * @sector: start sector
147 * @nr_sects: number of sectors to write
148 * @gfp_mask: memory allocation flags (for bio_alloc)
149 * @flags: BLKDEV_IFL_* flags to control behaviour
151 * Description:
152 * Generate and issue number of bios with zerofiled pages.
153 * Send barrier at the beginning and at the end if requested. This guarantie
154 * correct request ordering. Empty barrier allow us to avoid post queue flush.
157 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
158 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
160 int ret = 0;
161 struct bio *bio;
162 struct bio_batch bb;
163 unsigned int sz, issued = 0;
164 DECLARE_COMPLETION_ONSTACK(wait);
166 atomic_set(&bb.done, 0);
167 bb.flags = 1 << BIO_UPTODATE;
168 bb.wait = &wait;
169 bb.end_io = NULL;
171 if (flags & BLKDEV_IFL_BARRIER) {
172 /* issue async barrier before the data */
173 ret = blkdev_issue_flush(bdev, gfp_mask, NULL, 0);
174 if (ret)
175 return ret;
177 submit:
178 while (nr_sects != 0) {
179 bio = bio_alloc(gfp_mask,
180 min(nr_sects, (sector_t)BIO_MAX_PAGES));
181 if (!bio)
182 break;
184 bio->bi_sector = sector;
185 bio->bi_bdev = bdev;
186 bio->bi_end_io = bio_batch_end_io;
187 if (flags & BLKDEV_IFL_WAIT)
188 bio->bi_private = &bb;
190 while (nr_sects != 0) {
191 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
192 if (sz == 0)
193 /* bio has maximum size possible */
194 break;
195 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
196 nr_sects -= ret >> 9;
197 sector += ret >> 9;
198 if (ret < (sz << 9))
199 break;
201 issued++;
202 submit_bio(WRITE, bio);
205 * When all data bios are in flight. Send final barrier if requeted.
207 if (nr_sects == 0 && flags & BLKDEV_IFL_BARRIER)
208 ret = blkdev_issue_flush(bdev, gfp_mask, NULL,
209 flags & BLKDEV_IFL_WAIT);
212 if (flags & BLKDEV_IFL_WAIT)
213 /* Wait for bios in-flight */
214 while ( issued != atomic_read(&bb.done))
215 wait_for_completion(&wait);
217 if (!test_bit(BIO_UPTODATE, &bb.flags))
218 /* One of bios in the batch was completed with error.*/
219 ret = -EIO;
221 if (ret)
222 goto out;
224 if (test_bit(BIO_EOPNOTSUPP, &bb.flags)) {
225 ret = -EOPNOTSUPP;
226 goto out;
228 if (nr_sects != 0)
229 goto submit;
230 out:
231 return ret;
233 EXPORT_SYMBOL(blkdev_issue_zeroout);