net: Use rtnl_lock_killable() in register_netdev()
[linux-2.6/btrfs-unstable.git] / block / blk-lib.c
bloba676084d474043d7221f85100fc71008a4e49d2d
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions related to generic helpers functions
4 */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
11 #include "blk.h"
13 static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
14 gfp_t gfp)
16 struct bio *new = bio_alloc(gfp, nr_pages);
18 if (bio) {
19 bio_chain(bio, new);
20 submit_bio(bio);
23 return new;
26 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
27 sector_t nr_sects, gfp_t gfp_mask, int flags,
28 struct bio **biop)
30 struct request_queue *q = bdev_get_queue(bdev);
31 struct bio *bio = *biop;
32 unsigned int granularity;
33 unsigned int op;
34 int alignment;
35 sector_t bs_mask;
37 if (!q)
38 return -ENXIO;
40 if (bdev_read_only(bdev))
41 return -EPERM;
43 if (flags & BLKDEV_DISCARD_SECURE) {
44 if (!blk_queue_secure_erase(q))
45 return -EOPNOTSUPP;
46 op = REQ_OP_SECURE_ERASE;
47 } else {
48 if (!blk_queue_discard(q))
49 return -EOPNOTSUPP;
50 op = REQ_OP_DISCARD;
53 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
54 if ((sector | nr_sects) & bs_mask)
55 return -EINVAL;
57 /* Zero-sector (unknown) and one-sector granularities are the same. */
58 granularity = max(q->limits.discard_granularity >> 9, 1U);
59 alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
61 while (nr_sects) {
62 unsigned int req_sects;
63 sector_t end_sect, tmp;
65 /* Make sure bi_size doesn't overflow */
66 req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
68 /**
69 * If splitting a request, and the next starting sector would be
70 * misaligned, stop the discard at the previous aligned sector.
72 end_sect = sector + req_sects;
73 tmp = end_sect;
74 if (req_sects < nr_sects &&
75 sector_div(tmp, granularity) != alignment) {
76 end_sect = end_sect - alignment;
77 sector_div(end_sect, granularity);
78 end_sect = end_sect * granularity + alignment;
79 req_sects = end_sect - sector;
82 bio = next_bio(bio, 0, gfp_mask);
83 bio->bi_iter.bi_sector = sector;
84 bio_set_dev(bio, bdev);
85 bio_set_op_attrs(bio, op, 0);
87 bio->bi_iter.bi_size = req_sects << 9;
88 nr_sects -= req_sects;
89 sector = end_sect;
92 * We can loop for a long time in here, if someone does
93 * full device discards (like mkfs). Be nice and allow
94 * us to schedule out to avoid softlocking if preempt
95 * is disabled.
97 cond_resched();
100 *biop = bio;
101 return 0;
103 EXPORT_SYMBOL(__blkdev_issue_discard);
106 * blkdev_issue_discard - queue a discard
107 * @bdev: blockdev to issue discard for
108 * @sector: start sector
109 * @nr_sects: number of sectors to discard
110 * @gfp_mask: memory allocation flags (for bio_alloc)
111 * @flags: BLKDEV_DISCARD_* flags to control behaviour
113 * Description:
114 * Issue a discard request for the sectors in question.
116 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
117 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
119 struct bio *bio = NULL;
120 struct blk_plug plug;
121 int ret;
123 blk_start_plug(&plug);
124 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
125 &bio);
126 if (!ret && bio) {
127 ret = submit_bio_wait(bio);
128 if (ret == -EOPNOTSUPP)
129 ret = 0;
130 bio_put(bio);
132 blk_finish_plug(&plug);
134 return ret;
136 EXPORT_SYMBOL(blkdev_issue_discard);
139 * __blkdev_issue_write_same - generate number of bios with same page
140 * @bdev: target blockdev
141 * @sector: start sector
142 * @nr_sects: number of sectors to write
143 * @gfp_mask: memory allocation flags (for bio_alloc)
144 * @page: page containing data to write
145 * @biop: pointer to anchor bio
147 * Description:
148 * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
150 static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
151 sector_t nr_sects, gfp_t gfp_mask, struct page *page,
152 struct bio **biop)
154 struct request_queue *q = bdev_get_queue(bdev);
155 unsigned int max_write_same_sectors;
156 struct bio *bio = *biop;
157 sector_t bs_mask;
159 if (!q)
160 return -ENXIO;
162 if (bdev_read_only(bdev))
163 return -EPERM;
165 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
166 if ((sector | nr_sects) & bs_mask)
167 return -EINVAL;
169 if (!bdev_write_same(bdev))
170 return -EOPNOTSUPP;
172 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
173 max_write_same_sectors = UINT_MAX >> 9;
175 while (nr_sects) {
176 bio = next_bio(bio, 1, gfp_mask);
177 bio->bi_iter.bi_sector = sector;
178 bio_set_dev(bio, bdev);
179 bio->bi_vcnt = 1;
180 bio->bi_io_vec->bv_page = page;
181 bio->bi_io_vec->bv_offset = 0;
182 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
183 bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
185 if (nr_sects > max_write_same_sectors) {
186 bio->bi_iter.bi_size = max_write_same_sectors << 9;
187 nr_sects -= max_write_same_sectors;
188 sector += max_write_same_sectors;
189 } else {
190 bio->bi_iter.bi_size = nr_sects << 9;
191 nr_sects = 0;
193 cond_resched();
196 *biop = bio;
197 return 0;
201 * blkdev_issue_write_same - queue a write same operation
202 * @bdev: target blockdev
203 * @sector: start sector
204 * @nr_sects: number of sectors to write
205 * @gfp_mask: memory allocation flags (for bio_alloc)
206 * @page: page containing data
208 * Description:
209 * Issue a write same request for the sectors in question.
211 int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
212 sector_t nr_sects, gfp_t gfp_mask,
213 struct page *page)
215 struct bio *bio = NULL;
216 struct blk_plug plug;
217 int ret;
219 blk_start_plug(&plug);
220 ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
221 &bio);
222 if (ret == 0 && bio) {
223 ret = submit_bio_wait(bio);
224 bio_put(bio);
226 blk_finish_plug(&plug);
227 return ret;
229 EXPORT_SYMBOL(blkdev_issue_write_same);
231 static int __blkdev_issue_write_zeroes(struct block_device *bdev,
232 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
233 struct bio **biop, unsigned flags)
235 struct bio *bio = *biop;
236 unsigned int max_write_zeroes_sectors;
237 struct request_queue *q = bdev_get_queue(bdev);
239 if (!q)
240 return -ENXIO;
242 if (bdev_read_only(bdev))
243 return -EPERM;
245 /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
246 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
248 if (max_write_zeroes_sectors == 0)
249 return -EOPNOTSUPP;
251 while (nr_sects) {
252 bio = next_bio(bio, 0, gfp_mask);
253 bio->bi_iter.bi_sector = sector;
254 bio_set_dev(bio, bdev);
255 bio->bi_opf = REQ_OP_WRITE_ZEROES;
256 if (flags & BLKDEV_ZERO_NOUNMAP)
257 bio->bi_opf |= REQ_NOUNMAP;
259 if (nr_sects > max_write_zeroes_sectors) {
260 bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
261 nr_sects -= max_write_zeroes_sectors;
262 sector += max_write_zeroes_sectors;
263 } else {
264 bio->bi_iter.bi_size = nr_sects << 9;
265 nr_sects = 0;
267 cond_resched();
270 *biop = bio;
271 return 0;
275 * Convert a number of 512B sectors to a number of pages.
276 * The result is limited to a number of pages that can fit into a BIO.
277 * Also make sure that the result is always at least 1 (page) for the cases
278 * where nr_sects is lower than the number of sectors in a page.
280 static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
282 sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
284 return min(pages, (sector_t)BIO_MAX_PAGES);
287 static int __blkdev_issue_zero_pages(struct block_device *bdev,
288 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
289 struct bio **biop)
291 struct request_queue *q = bdev_get_queue(bdev);
292 struct bio *bio = *biop;
293 int bi_size = 0;
294 unsigned int sz;
296 if (!q)
297 return -ENXIO;
299 if (bdev_read_only(bdev))
300 return -EPERM;
302 while (nr_sects != 0) {
303 bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
304 gfp_mask);
305 bio->bi_iter.bi_sector = sector;
306 bio_set_dev(bio, bdev);
307 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
309 while (nr_sects != 0) {
310 sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
311 bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
312 nr_sects -= bi_size >> 9;
313 sector += bi_size >> 9;
314 if (bi_size < sz)
315 break;
317 cond_resched();
320 *biop = bio;
321 return 0;
325 * __blkdev_issue_zeroout - generate number of zero filed write bios
326 * @bdev: blockdev to issue
327 * @sector: start sector
328 * @nr_sects: number of sectors to write
329 * @gfp_mask: memory allocation flags (for bio_alloc)
330 * @biop: pointer to anchor bio
331 * @flags: controls detailed behavior
333 * Description:
334 * Zero-fill a block range, either using hardware offload or by explicitly
335 * writing zeroes to the device.
337 * If a device is using logical block provisioning, the underlying space will
338 * not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
340 * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
341 * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
343 int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
344 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
345 unsigned flags)
347 int ret;
348 sector_t bs_mask;
350 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
351 if ((sector | nr_sects) & bs_mask)
352 return -EINVAL;
354 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
355 biop, flags);
356 if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
357 return ret;
359 return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
360 biop);
362 EXPORT_SYMBOL(__blkdev_issue_zeroout);
365 * blkdev_issue_zeroout - zero-fill a block range
366 * @bdev: blockdev to write
367 * @sector: start sector
368 * @nr_sects: number of sectors to write
369 * @gfp_mask: memory allocation flags (for bio_alloc)
370 * @flags: controls detailed behavior
372 * Description:
373 * Zero-fill a block range, either using hardware offload or by explicitly
374 * writing zeroes to the device. See __blkdev_issue_zeroout() for the
375 * valid values for %flags.
377 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
378 sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
380 int ret = 0;
381 sector_t bs_mask;
382 struct bio *bio;
383 struct blk_plug plug;
384 bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
386 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
387 if ((sector | nr_sects) & bs_mask)
388 return -EINVAL;
390 retry:
391 bio = NULL;
392 blk_start_plug(&plug);
393 if (try_write_zeroes) {
394 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
395 gfp_mask, &bio, flags);
396 } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
397 ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
398 gfp_mask, &bio);
399 } else {
400 /* No zeroing offload support */
401 ret = -EOPNOTSUPP;
403 if (ret == 0 && bio) {
404 ret = submit_bio_wait(bio);
405 bio_put(bio);
407 blk_finish_plug(&plug);
408 if (ret && try_write_zeroes) {
409 if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
410 try_write_zeroes = false;
411 goto retry;
413 if (!bdev_write_zeroes_sectors(bdev)) {
415 * Zeroing offload support was indicated, but the
416 * device reported ILLEGAL REQUEST (for some devices
417 * there is no non-destructive way to verify whether
418 * WRITE ZEROES is actually supported).
420 ret = -EOPNOTSUPP;
424 return ret;
426 EXPORT_SYMBOL(blkdev_issue_zeroout);