1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to generic helpers functions
5 #include <linux/kernel.h>
6 #include <linux/module.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
13 static struct bio
*next_bio(struct bio
*bio
, unsigned int nr_pages
,
16 struct bio
*new = bio_alloc(gfp
, nr_pages
);
26 int __blkdev_issue_discard(struct block_device
*bdev
, sector_t sector
,
27 sector_t nr_sects
, gfp_t gfp_mask
, int flags
,
30 struct request_queue
*q
= bdev_get_queue(bdev
);
31 struct bio
*bio
= *biop
;
32 unsigned int granularity
;
40 if (flags
& BLKDEV_DISCARD_SECURE
) {
41 if (!blk_queue_secure_erase(q
))
43 op
= REQ_OP_SECURE_ERASE
;
45 if (!blk_queue_discard(q
))
50 bs_mask
= (bdev_logical_block_size(bdev
) >> 9) - 1;
51 if ((sector
| nr_sects
) & bs_mask
)
54 /* Zero-sector (unknown) and one-sector granularities are the same. */
55 granularity
= max(q
->limits
.discard_granularity
>> 9, 1U);
56 alignment
= (bdev_discard_alignment(bdev
) >> 9) % granularity
;
59 unsigned int req_sects
;
60 sector_t end_sect
, tmp
;
63 * Issue in chunks of the user defined max discard setting,
64 * ensuring that bi_size doesn't overflow
66 req_sects
= min_t(sector_t
, nr_sects
,
67 q
->limits
.max_discard_sectors
);
70 if (req_sects
> UINT_MAX
>> 9)
71 req_sects
= UINT_MAX
>> 9;
74 * If splitting a request, and the next starting sector would be
75 * misaligned, stop the discard at the previous aligned sector.
77 end_sect
= sector
+ req_sects
;
79 if (req_sects
< nr_sects
&&
80 sector_div(tmp
, granularity
) != alignment
) {
81 end_sect
= end_sect
- alignment
;
82 sector_div(end_sect
, granularity
);
83 end_sect
= end_sect
* granularity
+ alignment
;
84 req_sects
= end_sect
- sector
;
87 bio
= next_bio(bio
, 0, gfp_mask
);
88 bio
->bi_iter
.bi_sector
= sector
;
89 bio_set_dev(bio
, bdev
);
90 bio_set_op_attrs(bio
, op
, 0);
92 bio
->bi_iter
.bi_size
= req_sects
<< 9;
93 nr_sects
-= req_sects
;
97 * We can loop for a long time in here, if someone does
98 * full device discards (like mkfs). Be nice and allow
99 * us to schedule out to avoid softlocking if preempt
110 submit_bio_wait(bio
);
116 EXPORT_SYMBOL(__blkdev_issue_discard
);
119 * blkdev_issue_discard - queue a discard
120 * @bdev: blockdev to issue discard for
121 * @sector: start sector
122 * @nr_sects: number of sectors to discard
123 * @gfp_mask: memory allocation flags (for bio_alloc)
124 * @flags: BLKDEV_DISCARD_* flags to control behaviour
127 * Issue a discard request for the sectors in question.
129 int blkdev_issue_discard(struct block_device
*bdev
, sector_t sector
,
130 sector_t nr_sects
, gfp_t gfp_mask
, unsigned long flags
)
132 struct bio
*bio
= NULL
;
133 struct blk_plug plug
;
136 blk_start_plug(&plug
);
137 ret
= __blkdev_issue_discard(bdev
, sector
, nr_sects
, gfp_mask
, flags
,
140 ret
= submit_bio_wait(bio
);
141 if (ret
== -EOPNOTSUPP
)
145 blk_finish_plug(&plug
);
149 EXPORT_SYMBOL(blkdev_issue_discard
);
152 * __blkdev_issue_write_same - generate number of bios with same page
153 * @bdev: target blockdev
154 * @sector: start sector
155 * @nr_sects: number of sectors to write
156 * @gfp_mask: memory allocation flags (for bio_alloc)
157 * @page: page containing data to write
158 * @biop: pointer to anchor bio
161 * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
163 static int __blkdev_issue_write_same(struct block_device
*bdev
, sector_t sector
,
164 sector_t nr_sects
, gfp_t gfp_mask
, struct page
*page
,
167 struct request_queue
*q
= bdev_get_queue(bdev
);
168 unsigned int max_write_same_sectors
;
169 struct bio
*bio
= *biop
;
175 bs_mask
= (bdev_logical_block_size(bdev
) >> 9) - 1;
176 if ((sector
| nr_sects
) & bs_mask
)
179 if (!bdev_write_same(bdev
))
182 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
183 max_write_same_sectors
= UINT_MAX
>> 9;
186 bio
= next_bio(bio
, 1, gfp_mask
);
187 bio
->bi_iter
.bi_sector
= sector
;
188 bio_set_dev(bio
, bdev
);
190 bio
->bi_io_vec
->bv_page
= page
;
191 bio
->bi_io_vec
->bv_offset
= 0;
192 bio
->bi_io_vec
->bv_len
= bdev_logical_block_size(bdev
);
193 bio_set_op_attrs(bio
, REQ_OP_WRITE_SAME
, 0);
195 if (nr_sects
> max_write_same_sectors
) {
196 bio
->bi_iter
.bi_size
= max_write_same_sectors
<< 9;
197 nr_sects
-= max_write_same_sectors
;
198 sector
+= max_write_same_sectors
;
200 bio
->bi_iter
.bi_size
= nr_sects
<< 9;
211 * blkdev_issue_write_same - queue a write same operation
212 * @bdev: target blockdev
213 * @sector: start sector
214 * @nr_sects: number of sectors to write
215 * @gfp_mask: memory allocation flags (for bio_alloc)
216 * @page: page containing data
219 * Issue a write same request for the sectors in question.
221 int blkdev_issue_write_same(struct block_device
*bdev
, sector_t sector
,
222 sector_t nr_sects
, gfp_t gfp_mask
,
225 struct bio
*bio
= NULL
;
226 struct blk_plug plug
;
229 blk_start_plug(&plug
);
230 ret
= __blkdev_issue_write_same(bdev
, sector
, nr_sects
, gfp_mask
, page
,
232 if (ret
== 0 && bio
) {
233 ret
= submit_bio_wait(bio
);
236 blk_finish_plug(&plug
);
239 EXPORT_SYMBOL(blkdev_issue_write_same
);
241 static int __blkdev_issue_write_zeroes(struct block_device
*bdev
,
242 sector_t sector
, sector_t nr_sects
, gfp_t gfp_mask
,
243 struct bio
**biop
, unsigned flags
)
245 struct bio
*bio
= *biop
;
246 unsigned int max_write_zeroes_sectors
;
247 struct request_queue
*q
= bdev_get_queue(bdev
);
252 /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
253 max_write_zeroes_sectors
= bdev_write_zeroes_sectors(bdev
);
255 if (max_write_zeroes_sectors
== 0)
259 bio
= next_bio(bio
, 0, gfp_mask
);
260 bio
->bi_iter
.bi_sector
= sector
;
261 bio_set_dev(bio
, bdev
);
262 bio
->bi_opf
= REQ_OP_WRITE_ZEROES
;
263 if (flags
& BLKDEV_ZERO_NOUNMAP
)
264 bio
->bi_opf
|= REQ_NOUNMAP
;
266 if (nr_sects
> max_write_zeroes_sectors
) {
267 bio
->bi_iter
.bi_size
= max_write_zeroes_sectors
<< 9;
268 nr_sects
-= max_write_zeroes_sectors
;
269 sector
+= max_write_zeroes_sectors
;
271 bio
->bi_iter
.bi_size
= nr_sects
<< 9;
282 * Convert a number of 512B sectors to a number of pages.
283 * The result is limited to a number of pages that can fit into a BIO.
284 * Also make sure that the result is always at least 1 (page) for the cases
285 * where nr_sects is lower than the number of sectors in a page.
287 static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects
)
289 sector_t pages
= DIV_ROUND_UP_SECTOR_T(nr_sects
, PAGE_SIZE
/ 512);
291 return min(pages
, (sector_t
)BIO_MAX_PAGES
);
294 static int __blkdev_issue_zero_pages(struct block_device
*bdev
,
295 sector_t sector
, sector_t nr_sects
, gfp_t gfp_mask
,
298 struct request_queue
*q
= bdev_get_queue(bdev
);
299 struct bio
*bio
= *biop
;
306 while (nr_sects
!= 0) {
307 bio
= next_bio(bio
, __blkdev_sectors_to_bio_pages(nr_sects
),
309 bio
->bi_iter
.bi_sector
= sector
;
310 bio_set_dev(bio
, bdev
);
311 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
313 while (nr_sects
!= 0) {
314 sz
= min((sector_t
) PAGE_SIZE
, nr_sects
<< 9);
315 bi_size
= bio_add_page(bio
, ZERO_PAGE(0), sz
, 0);
316 nr_sects
-= bi_size
>> 9;
317 sector
+= bi_size
>> 9;
329 * __blkdev_issue_zeroout - generate number of zero filed write bios
330 * @bdev: blockdev to issue
331 * @sector: start sector
332 * @nr_sects: number of sectors to write
333 * @gfp_mask: memory allocation flags (for bio_alloc)
334 * @biop: pointer to anchor bio
335 * @flags: controls detailed behavior
338 * Zero-fill a block range, either using hardware offload or by explicitly
339 * writing zeroes to the device.
341 * If a device is using logical block provisioning, the underlying space will
342 * not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
344 * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
345 * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
347 int __blkdev_issue_zeroout(struct block_device
*bdev
, sector_t sector
,
348 sector_t nr_sects
, gfp_t gfp_mask
, struct bio
**biop
,
354 bs_mask
= (bdev_logical_block_size(bdev
) >> 9) - 1;
355 if ((sector
| nr_sects
) & bs_mask
)
358 ret
= __blkdev_issue_write_zeroes(bdev
, sector
, nr_sects
, gfp_mask
,
360 if (ret
!= -EOPNOTSUPP
|| (flags
& BLKDEV_ZERO_NOFALLBACK
))
363 return __blkdev_issue_zero_pages(bdev
, sector
, nr_sects
, gfp_mask
,
366 EXPORT_SYMBOL(__blkdev_issue_zeroout
);
369 * blkdev_issue_zeroout - zero-fill a block range
370 * @bdev: blockdev to write
371 * @sector: start sector
372 * @nr_sects: number of sectors to write
373 * @gfp_mask: memory allocation flags (for bio_alloc)
374 * @flags: controls detailed behavior
377 * Zero-fill a block range, either using hardware offload or by explicitly
378 * writing zeroes to the device. See __blkdev_issue_zeroout() for the
379 * valid values for %flags.
381 int blkdev_issue_zeroout(struct block_device
*bdev
, sector_t sector
,
382 sector_t nr_sects
, gfp_t gfp_mask
, unsigned flags
)
387 struct blk_plug plug
;
388 bool try_write_zeroes
= !!bdev_write_zeroes_sectors(bdev
);
390 bs_mask
= (bdev_logical_block_size(bdev
) >> 9) - 1;
391 if ((sector
| nr_sects
) & bs_mask
)
396 blk_start_plug(&plug
);
397 if (try_write_zeroes
) {
398 ret
= __blkdev_issue_write_zeroes(bdev
, sector
, nr_sects
,
399 gfp_mask
, &bio
, flags
);
400 } else if (!(flags
& BLKDEV_ZERO_NOFALLBACK
)) {
401 ret
= __blkdev_issue_zero_pages(bdev
, sector
, nr_sects
,
404 /* No zeroing offload support */
407 if (ret
== 0 && bio
) {
408 ret
= submit_bio_wait(bio
);
411 blk_finish_plug(&plug
);
412 if (ret
&& try_write_zeroes
) {
413 if (!(flags
& BLKDEV_ZERO_NOFALLBACK
)) {
414 try_write_zeroes
= false;
417 if (!bdev_write_zeroes_sectors(bdev
)) {
419 * Zeroing offload support was indicated, but the
420 * device reported ILLEGAL REQUEST (for some devices
421 * there is no non-destructive way to verify whether
422 * WRITE ZEROES is actually supported).
430 EXPORT_SYMBOL(blkdev_issue_zeroout
);