2 * Functions related to generic helpers functions
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
15 struct completion
*wait
;
18 static void bio_batch_end_io(struct bio
*bio
, int err
)
20 struct bio_batch
*bb
= bio
->bi_private
;
22 if (err
&& (err
!= -EOPNOTSUPP
))
23 clear_bit(BIO_UPTODATE
, &bb
->flags
);
24 if (atomic_dec_and_test(&bb
->done
))
30 * blkdev_issue_discard - queue a discard
31 * @bdev: blockdev to issue discard for
32 * @sector: start sector
33 * @nr_sects: number of sectors to discard
34 * @gfp_mask: memory allocation flags (for bio_alloc)
35 * @flags: BLKDEV_IFL_* flags to control behaviour
38 * Issue a discard request for the sectors in question.
40 int blkdev_issue_discard(struct block_device
*bdev
, sector_t sector
,
41 sector_t nr_sects
, gfp_t gfp_mask
, unsigned long flags
)
43 DECLARE_COMPLETION_ONSTACK(wait
);
44 struct request_queue
*q
= bdev_get_queue(bdev
);
45 int type
= REQ_WRITE
| REQ_DISCARD
;
46 sector_t max_discard_sectors
;
47 sector_t granularity
, alignment
;
56 if (!blk_queue_discard(q
))
59 /* Zero-sector (unknown) and one-sector granularities are the same. */
60 granularity
= max(q
->limits
.discard_granularity
>> 9, 1U);
61 alignment
= bdev_discard_alignment(bdev
) >> 9;
62 alignment
= sector_div(alignment
, granularity
);
65 * Ensure that max_discard_sectors is of the proper
66 * granularity, so that requests stay aligned after a split.
68 max_discard_sectors
= min(q
->limits
.max_discard_sectors
, UINT_MAX
>> 9);
69 sector_div(max_discard_sectors
, granularity
);
70 max_discard_sectors
*= granularity
;
71 if (unlikely(!max_discard_sectors
)) {
72 /* Avoid infinite loop below. Being cautious never hurts. */
76 if (flags
& BLKDEV_DISCARD_SECURE
) {
77 if (!blk_queue_secdiscard(q
))
82 atomic_set(&bb
.done
, 1);
83 bb
.flags
= 1 << BIO_UPTODATE
;
86 blk_start_plug(&plug
);
88 unsigned int req_sects
;
89 sector_t end_sect
, tmp
;
91 bio
= bio_alloc(gfp_mask
, 1);
97 req_sects
= min_t(sector_t
, nr_sects
, max_discard_sectors
);
100 * If splitting a request, and the next starting sector would be
101 * misaligned, stop the discard at the previous aligned sector.
103 end_sect
= sector
+ req_sects
;
105 if (req_sects
< nr_sects
&&
106 sector_div(tmp
, granularity
) != alignment
) {
107 end_sect
= end_sect
- alignment
;
108 sector_div(end_sect
, granularity
);
109 end_sect
= end_sect
* granularity
+ alignment
;
110 req_sects
= end_sect
- sector
;
113 bio
->bi_sector
= sector
;
114 bio
->bi_end_io
= bio_batch_end_io
;
116 bio
->bi_private
= &bb
;
118 bio
->bi_size
= req_sects
<< 9;
119 nr_sects
-= req_sects
;
122 atomic_inc(&bb
.done
);
123 submit_bio(type
, bio
);
125 blk_finish_plug(&plug
);
127 /* Wait for bios in-flight */
128 if (!atomic_dec_and_test(&bb
.done
))
129 wait_for_completion(&wait
);
131 if (!test_bit(BIO_UPTODATE
, &bb
.flags
))
136 EXPORT_SYMBOL(blkdev_issue_discard
);
139 * blkdev_issue_write_same - queue a write same operation
140 * @bdev: target blockdev
141 * @sector: start sector
142 * @nr_sects: number of sectors to write
143 * @gfp_mask: memory allocation flags (for bio_alloc)
144 * @page: page containing data to write
147 * Issue a write same request for the sectors in question.
149 int blkdev_issue_write_same(struct block_device
*bdev
, sector_t sector
,
150 sector_t nr_sects
, gfp_t gfp_mask
,
153 DECLARE_COMPLETION_ONSTACK(wait
);
154 struct request_queue
*q
= bdev_get_queue(bdev
);
155 unsigned int max_write_same_sectors
;
163 max_write_same_sectors
= q
->limits
.max_write_same_sectors
;
165 if (max_write_same_sectors
== 0)
168 atomic_set(&bb
.done
, 1);
169 bb
.flags
= 1 << BIO_UPTODATE
;
173 bio
= bio_alloc(gfp_mask
, 1);
179 bio
->bi_sector
= sector
;
180 bio
->bi_end_io
= bio_batch_end_io
;
182 bio
->bi_private
= &bb
;
184 bio
->bi_io_vec
->bv_page
= page
;
185 bio
->bi_io_vec
->bv_offset
= 0;
186 bio
->bi_io_vec
->bv_len
= bdev_logical_block_size(bdev
);
188 if (nr_sects
> max_write_same_sectors
) {
189 bio
->bi_size
= max_write_same_sectors
<< 9;
190 nr_sects
-= max_write_same_sectors
;
191 sector
+= max_write_same_sectors
;
193 bio
->bi_size
= nr_sects
<< 9;
197 atomic_inc(&bb
.done
);
198 submit_bio(REQ_WRITE
| REQ_WRITE_SAME
, bio
);
201 /* Wait for bios in-flight */
202 if (!atomic_dec_and_test(&bb
.done
))
203 wait_for_completion(&wait
);
205 if (!test_bit(BIO_UPTODATE
, &bb
.flags
))
210 EXPORT_SYMBOL(blkdev_issue_write_same
);
213 * blkdev_issue_zeroout - generate number of zero filed write bios
214 * @bdev: blockdev to issue
215 * @sector: start sector
216 * @nr_sects: number of sectors to write
217 * @gfp_mask: memory allocation flags (for bio_alloc)
220 * Generate and issue number of bios with zerofiled pages.
223 int __blkdev_issue_zeroout(struct block_device
*bdev
, sector_t sector
,
224 sector_t nr_sects
, gfp_t gfp_mask
)
230 DECLARE_COMPLETION_ONSTACK(wait
);
232 atomic_set(&bb
.done
, 1);
233 bb
.flags
= 1 << BIO_UPTODATE
;
237 while (nr_sects
!= 0) {
238 bio
= bio_alloc(gfp_mask
,
239 min(nr_sects
, (sector_t
)BIO_MAX_PAGES
));
245 bio
->bi_sector
= sector
;
247 bio
->bi_end_io
= bio_batch_end_io
;
248 bio
->bi_private
= &bb
;
250 while (nr_sects
!= 0) {
251 sz
= min((sector_t
) PAGE_SIZE
>> 9 , nr_sects
);
252 ret
= bio_add_page(bio
, ZERO_PAGE(0), sz
<< 9, 0);
253 nr_sects
-= ret
>> 9;
259 atomic_inc(&bb
.done
);
260 submit_bio(WRITE
, bio
);
263 /* Wait for bios in-flight */
264 if (!atomic_dec_and_test(&bb
.done
))
265 wait_for_completion(&wait
);
267 if (!test_bit(BIO_UPTODATE
, &bb
.flags
))
268 /* One of bios in the batch was completed with error.*/
275 * blkdev_issue_zeroout - zero-fill a block range
276 * @bdev: blockdev to write
277 * @sector: start sector
278 * @nr_sects: number of sectors to write
279 * @gfp_mask: memory allocation flags (for bio_alloc)
282 * Generate and issue number of bios with zerofiled pages.
285 int blkdev_issue_zeroout(struct block_device
*bdev
, sector_t sector
,
286 sector_t nr_sects
, gfp_t gfp_mask
)
288 if (bdev_write_same(bdev
)) {
289 unsigned char bdn
[BDEVNAME_SIZE
];
291 if (!blkdev_issue_write_same(bdev
, sector
, nr_sects
, gfp_mask
,
296 pr_err("%s: WRITE SAME failed. Manually zeroing.\n", bdn
);
299 return __blkdev_issue_zeroout(bdev
, sector
, nr_sects
, gfp_mask
);
301 EXPORT_SYMBOL(blkdev_issue_zeroout
);