minmax: simplify min()/max()/clamp() implementation
[linux-stable.git] / block / blk-settings.c
blobcd8a8eabc9a5eb589eff49417b38c116965e68f6
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions related to setting various queue properties from drivers
4 */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/bio.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/pagemap.h>
11 #include <linux/backing-dev-defs.h>
12 #include <linux/gcd.h>
13 #include <linux/lcm.h>
14 #include <linux/jiffies.h>
15 #include <linux/gfp.h>
16 #include <linux/dma-mapping.h>
18 #include "blk.h"
19 #include "blk-rq-qos.h"
20 #include "blk-wbt.h"
22 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
24 q->rq_timeout = timeout;
26 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
28 /**
29 * blk_set_stacking_limits - set default limits for stacking devices
30 * @lim: the queue_limits structure to reset
32 * Prepare queue limits for applying limits from underlying devices using
33 * blk_stack_limits().
35 void blk_set_stacking_limits(struct queue_limits *lim)
37 memset(lim, 0, sizeof(*lim));
38 lim->logical_block_size = SECTOR_SIZE;
39 lim->physical_block_size = SECTOR_SIZE;
40 lim->io_min = SECTOR_SIZE;
41 lim->discard_granularity = SECTOR_SIZE;
42 lim->dma_alignment = SECTOR_SIZE - 1;
43 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
45 /* Inherit limits from component devices */
46 lim->max_segments = USHRT_MAX;
47 lim->max_discard_segments = USHRT_MAX;
48 lim->max_hw_sectors = UINT_MAX;
49 lim->max_segment_size = UINT_MAX;
50 lim->max_sectors = UINT_MAX;
51 lim->max_dev_sectors = UINT_MAX;
52 lim->max_write_zeroes_sectors = UINT_MAX;
53 lim->max_zone_append_sectors = UINT_MAX;
54 lim->max_user_discard_sectors = UINT_MAX;
56 EXPORT_SYMBOL(blk_set_stacking_limits);
58 void blk_apply_bdi_limits(struct backing_dev_info *bdi,
59 struct queue_limits *lim)
62 * For read-ahead of large files to be effective, we need to read ahead
63 * at least twice the optimal I/O size.
65 bdi->ra_pages = max(lim->io_opt * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
66 bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
69 static int blk_validate_zoned_limits(struct queue_limits *lim)
71 if (!(lim->features & BLK_FEAT_ZONED)) {
72 if (WARN_ON_ONCE(lim->max_open_zones) ||
73 WARN_ON_ONCE(lim->max_active_zones) ||
74 WARN_ON_ONCE(lim->zone_write_granularity) ||
75 WARN_ON_ONCE(lim->max_zone_append_sectors))
76 return -EINVAL;
77 return 0;
80 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)))
81 return -EINVAL;
84 * Given that active zones include open zones, the maximum number of
85 * open zones cannot be larger than the maximum number of active zones.
87 if (lim->max_active_zones &&
88 lim->max_open_zones > lim->max_active_zones)
89 return -EINVAL;
91 if (lim->zone_write_granularity < lim->logical_block_size)
92 lim->zone_write_granularity = lim->logical_block_size;
94 if (lim->max_zone_append_sectors) {
96 * The Zone Append size is limited by the maximum I/O size
97 * and the zone size given that it can't span zones.
99 lim->max_zone_append_sectors =
100 min3(lim->max_hw_sectors,
101 lim->max_zone_append_sectors,
102 lim->chunk_sectors);
105 return 0;
108 static int blk_validate_integrity_limits(struct queue_limits *lim)
110 struct blk_integrity *bi = &lim->integrity;
112 if (!bi->tuple_size) {
113 if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE ||
114 bi->tag_size || ((bi->flags & BLK_INTEGRITY_REF_TAG))) {
115 pr_warn("invalid PI settings.\n");
116 return -EINVAL;
118 return 0;
121 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
122 pr_warn("integrity support disabled.\n");
123 return -EINVAL;
126 if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE &&
127 (bi->flags & BLK_INTEGRITY_REF_TAG)) {
128 pr_warn("ref tag not support without checksum.\n");
129 return -EINVAL;
132 if (!bi->interval_exp)
133 bi->interval_exp = ilog2(lim->logical_block_size);
135 return 0;
139 * Returns max guaranteed bytes which we can fit in a bio.
141 * We request that an atomic_write is ITER_UBUF iov_iter (so a single vector),
142 * so we assume that we can fit in at least PAGE_SIZE in a segment, apart from
143 * the first and last segments.
145 static unsigned int blk_queue_max_guaranteed_bio(struct queue_limits *lim)
147 unsigned int max_segments = min(BIO_MAX_VECS, lim->max_segments);
148 unsigned int length;
150 length = min(max_segments, 2) * lim->logical_block_size;
151 if (max_segments > 2)
152 length += (max_segments - 2) * PAGE_SIZE;
154 return length;
157 static void blk_atomic_writes_update_limits(struct queue_limits *lim)
159 unsigned int unit_limit = min(lim->max_hw_sectors << SECTOR_SHIFT,
160 blk_queue_max_guaranteed_bio(lim));
162 unit_limit = rounddown_pow_of_two(unit_limit);
164 lim->atomic_write_max_sectors =
165 min(lim->atomic_write_hw_max >> SECTOR_SHIFT,
166 lim->max_hw_sectors);
167 lim->atomic_write_unit_min =
168 min(lim->atomic_write_hw_unit_min, unit_limit);
169 lim->atomic_write_unit_max =
170 min(lim->atomic_write_hw_unit_max, unit_limit);
171 lim->atomic_write_boundary_sectors =
172 lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
175 static void blk_validate_atomic_write_limits(struct queue_limits *lim)
177 unsigned int boundary_sectors;
179 if (!lim->atomic_write_hw_max)
180 goto unsupported;
182 boundary_sectors = lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
184 if (boundary_sectors) {
186 * A feature of boundary support is that it disallows bios to
187 * be merged which would result in a merged request which
188 * crosses either a chunk sector or atomic write HW boundary,
189 * even though chunk sectors may be just set for performance.
190 * For simplicity, disallow atomic writes for a chunk sector
191 * which is non-zero and smaller than atomic write HW boundary.
192 * Furthermore, chunk sectors must be a multiple of atomic
193 * write HW boundary. Otherwise boundary support becomes
194 * complicated.
195 * Devices which do not conform to these rules can be dealt
196 * with if and when they show up.
198 if (WARN_ON_ONCE(lim->chunk_sectors % boundary_sectors))
199 goto unsupported;
202 * The boundary size just needs to be a multiple of unit_max
203 * (and not necessarily a power-of-2), so this following check
204 * could be relaxed in future.
205 * Furthermore, if needed, unit_max could even be reduced so
206 * that it is compliant with a !power-of-2 boundary.
208 if (!is_power_of_2(boundary_sectors))
209 goto unsupported;
212 blk_atomic_writes_update_limits(lim);
213 return;
215 unsupported:
216 lim->atomic_write_max_sectors = 0;
217 lim->atomic_write_boundary_sectors = 0;
218 lim->atomic_write_unit_min = 0;
219 lim->atomic_write_unit_max = 0;
223 * Check that the limits in lim are valid, initialize defaults for unset
224 * values, and cap values based on others where needed.
226 static int blk_validate_limits(struct queue_limits *lim)
228 unsigned int max_hw_sectors;
229 unsigned int logical_block_sectors;
230 int err;
233 * Unless otherwise specified, default to 512 byte logical blocks and a
234 * physical block size equal to the logical block size.
236 if (!lim->logical_block_size)
237 lim->logical_block_size = SECTOR_SIZE;
238 else if (blk_validate_block_size(lim->logical_block_size)) {
239 pr_warn("Invalid logical block size (%d)\n", lim->logical_block_size);
240 return -EINVAL;
242 if (lim->physical_block_size < lim->logical_block_size)
243 lim->physical_block_size = lim->logical_block_size;
246 * The minimum I/O size defaults to the physical block size unless
247 * explicitly overridden.
249 if (lim->io_min < lim->physical_block_size)
250 lim->io_min = lim->physical_block_size;
253 * max_hw_sectors has a somewhat weird default for historical reason,
254 * but driver really should set their own instead of relying on this
255 * value.
257 * The block layer relies on the fact that every driver can
258 * handle at lest a page worth of data per I/O, and needs the value
259 * aligned to the logical block size.
261 if (!lim->max_hw_sectors)
262 lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
263 if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
264 return -EINVAL;
265 logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
266 if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
267 return -EINVAL;
268 lim->max_hw_sectors = round_down(lim->max_hw_sectors,
269 logical_block_sectors);
272 * The actual max_sectors value is a complex beast and also takes the
273 * max_dev_sectors value (set by SCSI ULPs) and a user configurable
274 * value into account. The ->max_sectors value is always calculated
275 * from these, so directly setting it won't have any effect.
277 max_hw_sectors = min_not_zero(lim->max_hw_sectors,
278 lim->max_dev_sectors);
279 if (lim->max_user_sectors) {
280 if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
281 return -EINVAL;
282 lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
283 } else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
284 lim->max_sectors =
285 min(max_hw_sectors, lim->io_opt >> SECTOR_SHIFT);
286 } else if (lim->io_min > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
287 lim->max_sectors =
288 min(max_hw_sectors, lim->io_min >> SECTOR_SHIFT);
289 } else {
290 lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
292 lim->max_sectors = round_down(lim->max_sectors,
293 logical_block_sectors);
296 * Random default for the maximum number of segments. Driver should not
297 * rely on this and set their own.
299 if (!lim->max_segments)
300 lim->max_segments = BLK_MAX_SEGMENTS;
302 lim->max_discard_sectors =
303 min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
305 if (!lim->max_discard_segments)
306 lim->max_discard_segments = 1;
308 if (lim->discard_granularity < lim->physical_block_size)
309 lim->discard_granularity = lim->physical_block_size;
312 * By default there is no limit on the segment boundary alignment,
313 * but if there is one it can't be smaller than the page size as
314 * that would break all the normal I/O patterns.
316 if (!lim->seg_boundary_mask)
317 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
318 if (WARN_ON_ONCE(lim->seg_boundary_mask < PAGE_SIZE - 1))
319 return -EINVAL;
322 * Stacking device may have both virtual boundary and max segment
323 * size limit, so allow this setting now, and long-term the two
324 * might need to move out of stacking limits since we have immutable
325 * bvec and lower layer bio splitting is supposed to handle the two
326 * correctly.
328 if (lim->virt_boundary_mask) {
329 if (!lim->max_segment_size)
330 lim->max_segment_size = UINT_MAX;
331 } else {
333 * The maximum segment size has an odd historic 64k default that
334 * drivers probably should override. Just like the I/O size we
335 * require drivers to at least handle a full page per segment.
337 if (!lim->max_segment_size)
338 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
339 if (WARN_ON_ONCE(lim->max_segment_size < PAGE_SIZE))
340 return -EINVAL;
344 * We require drivers to at least do logical block aligned I/O, but
345 * historically could not check for that due to the separate calls
346 * to set the limits. Once the transition is finished the check
347 * below should be narrowed down to check the logical block size.
349 if (!lim->dma_alignment)
350 lim->dma_alignment = SECTOR_SIZE - 1;
351 if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE))
352 return -EINVAL;
354 if (lim->alignment_offset) {
355 lim->alignment_offset &= (lim->physical_block_size - 1);
356 lim->flags &= ~BLK_FLAG_MISALIGNED;
359 if (!(lim->features & BLK_FEAT_WRITE_CACHE))
360 lim->features &= ~BLK_FEAT_FUA;
362 blk_validate_atomic_write_limits(lim);
364 err = blk_validate_integrity_limits(lim);
365 if (err)
366 return err;
367 return blk_validate_zoned_limits(lim);
371 * Set the default limits for a newly allocated queue. @lim contains the
372 * initial limits set by the driver, which could be no limit in which case
373 * all fields are cleared to zero.
375 int blk_set_default_limits(struct queue_limits *lim)
378 * Most defaults are set by capping the bounds in blk_validate_limits,
379 * but max_user_discard_sectors is special and needs an explicit
380 * initialization to the max value here.
382 lim->max_user_discard_sectors = UINT_MAX;
383 return blk_validate_limits(lim);
387 * queue_limits_commit_update - commit an atomic update of queue limits
388 * @q: queue to update
389 * @lim: limits to apply
391 * Apply the limits in @lim that were obtained from queue_limits_start_update()
392 * and updated by the caller to @q.
394 * Returns 0 if successful, else a negative error code.
396 int queue_limits_commit_update(struct request_queue *q,
397 struct queue_limits *lim)
399 int error;
401 error = blk_validate_limits(lim);
402 if (error)
403 goto out_unlock;
405 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
406 if (q->crypto_profile && lim->integrity.tag_size) {
407 pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together.\n");
408 error = -EINVAL;
409 goto out_unlock;
411 #endif
413 q->limits = *lim;
414 if (q->disk)
415 blk_apply_bdi_limits(q->disk->bdi, lim);
416 out_unlock:
417 mutex_unlock(&q->limits_lock);
418 return error;
420 EXPORT_SYMBOL_GPL(queue_limits_commit_update);
423 * queue_limits_set - apply queue limits to queue
424 * @q: queue to update
425 * @lim: limits to apply
427 * Apply the limits in @lim that were freshly initialized to @q.
428 * To update existing limits use queue_limits_start_update() and
429 * queue_limits_commit_update() instead.
431 * Returns 0 if successful, else a negative error code.
433 int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
435 mutex_lock(&q->limits_lock);
436 return queue_limits_commit_update(q, lim);
438 EXPORT_SYMBOL_GPL(queue_limits_set);
441 * blk_limits_io_min - set minimum request size for a device
442 * @limits: the queue limits
443 * @min: smallest I/O size in bytes
445 * Description:
446 * Some devices have an internal block size bigger than the reported
447 * hardware sector size. This function can be used to signal the
448 * smallest I/O the device can perform without incurring a performance
449 * penalty.
451 void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
453 limits->io_min = min;
455 if (limits->io_min < limits->logical_block_size)
456 limits->io_min = limits->logical_block_size;
458 if (limits->io_min < limits->physical_block_size)
459 limits->io_min = limits->physical_block_size;
461 EXPORT_SYMBOL(blk_limits_io_min);
464 * blk_limits_io_opt - set optimal request size for a device
465 * @limits: the queue limits
466 * @opt: smallest I/O size in bytes
468 * Description:
469 * Storage devices may report an optimal I/O size, which is the
470 * device's preferred unit for sustained I/O. This is rarely reported
471 * for disk drives. For RAID arrays it is usually the stripe width or
472 * the internal track size. A properly aligned multiple of
473 * optimal_io_size is the preferred request size for workloads where
474 * sustained throughput is desired.
476 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
478 limits->io_opt = opt;
480 EXPORT_SYMBOL(blk_limits_io_opt);
482 static int queue_limit_alignment_offset(const struct queue_limits *lim,
483 sector_t sector)
485 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
486 unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
487 << SECTOR_SHIFT;
489 return (granularity + lim->alignment_offset - alignment) % granularity;
492 static unsigned int queue_limit_discard_alignment(
493 const struct queue_limits *lim, sector_t sector)
495 unsigned int alignment, granularity, offset;
497 if (!lim->max_discard_sectors)
498 return 0;
500 /* Why are these in bytes, not sectors? */
501 alignment = lim->discard_alignment >> SECTOR_SHIFT;
502 granularity = lim->discard_granularity >> SECTOR_SHIFT;
503 if (!granularity)
504 return 0;
506 /* Offset of the partition start in 'granularity' sectors */
507 offset = sector_div(sector, granularity);
509 /* And why do we do this modulus *again* in blkdev_issue_discard()? */
510 offset = (granularity + alignment - offset) % granularity;
512 /* Turn it back into bytes, gaah */
513 return offset << SECTOR_SHIFT;
516 static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
518 sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
519 if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
520 sectors = PAGE_SIZE >> SECTOR_SHIFT;
521 return sectors;
525 * blk_stack_limits - adjust queue_limits for stacked devices
526 * @t: the stacking driver limits (top device)
527 * @b: the underlying queue limits (bottom, component device)
528 * @start: first data sector within component device
530 * Description:
531 * This function is used by stacking drivers like MD and DM to ensure
532 * that all component devices have compatible block sizes and
533 * alignments. The stacking driver must provide a queue_limits
534 * struct (top) and then iteratively call the stacking function for
535 * all component (bottom) devices. The stacking function will
536 * attempt to combine the values and ensure proper alignment.
538 * Returns 0 if the top and bottom queue_limits are compatible. The
539 * top device's block sizes and alignment offsets may be adjusted to
540 * ensure alignment with the bottom device. If no compatible sizes
541 * and alignments exist, -1 is returned and the resulting top
542 * queue_limits will have the misaligned flag set to indicate that
543 * the alignment_offset is undefined.
545 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
546 sector_t start)
548 unsigned int top, bottom, alignment, ret = 0;
550 t->features |= (b->features & BLK_FEAT_INHERIT_MASK);
553 * BLK_FEAT_NOWAIT and BLK_FEAT_POLL need to be supported both by the
554 * stacking driver and all underlying devices. The stacking driver sets
555 * the flags before stacking the limits, and this will clear the flags
556 * if any of the underlying devices does not support it.
558 if (!(b->features & BLK_FEAT_NOWAIT))
559 t->features &= ~BLK_FEAT_NOWAIT;
560 if (!(b->features & BLK_FEAT_POLL))
561 t->features &= ~BLK_FEAT_POLL;
563 t->flags |= (b->flags & BLK_FLAG_MISALIGNED);
565 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
566 t->max_user_sectors = min_not_zero(t->max_user_sectors,
567 b->max_user_sectors);
568 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
569 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
570 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
571 b->max_write_zeroes_sectors);
572 t->max_zone_append_sectors = min(queue_limits_max_zone_append_sectors(t),
573 queue_limits_max_zone_append_sectors(b));
575 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
576 b->seg_boundary_mask);
577 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
578 b->virt_boundary_mask);
580 t->max_segments = min_not_zero(t->max_segments, b->max_segments);
581 t->max_discard_segments = min_not_zero(t->max_discard_segments,
582 b->max_discard_segments);
583 t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
584 b->max_integrity_segments);
586 t->max_segment_size = min_not_zero(t->max_segment_size,
587 b->max_segment_size);
589 alignment = queue_limit_alignment_offset(b, start);
591 /* Bottom device has different alignment. Check that it is
592 * compatible with the current top alignment.
594 if (t->alignment_offset != alignment) {
596 top = max(t->physical_block_size, t->io_min)
597 + t->alignment_offset;
598 bottom = max(b->physical_block_size, b->io_min) + alignment;
600 /* Verify that top and bottom intervals line up */
601 if (max(top, bottom) % min(top, bottom)) {
602 t->flags |= BLK_FLAG_MISALIGNED;
603 ret = -1;
607 t->logical_block_size = max(t->logical_block_size,
608 b->logical_block_size);
610 t->physical_block_size = max(t->physical_block_size,
611 b->physical_block_size);
613 t->io_min = max(t->io_min, b->io_min);
614 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
615 t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
617 /* Set non-power-of-2 compatible chunk_sectors boundary */
618 if (b->chunk_sectors)
619 t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
621 /* Physical block size a multiple of the logical block size? */
622 if (t->physical_block_size & (t->logical_block_size - 1)) {
623 t->physical_block_size = t->logical_block_size;
624 t->flags |= BLK_FLAG_MISALIGNED;
625 ret = -1;
628 /* Minimum I/O a multiple of the physical block size? */
629 if (t->io_min & (t->physical_block_size - 1)) {
630 t->io_min = t->physical_block_size;
631 t->flags |= BLK_FLAG_MISALIGNED;
632 ret = -1;
635 /* Optimal I/O a multiple of the physical block size? */
636 if (t->io_opt & (t->physical_block_size - 1)) {
637 t->io_opt = 0;
638 t->flags |= BLK_FLAG_MISALIGNED;
639 ret = -1;
642 /* chunk_sectors a multiple of the physical block size? */
643 if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
644 t->chunk_sectors = 0;
645 t->flags |= BLK_FLAG_MISALIGNED;
646 ret = -1;
649 /* Find lowest common alignment_offset */
650 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
651 % max(t->physical_block_size, t->io_min);
653 /* Verify that new alignment_offset is on a logical block boundary */
654 if (t->alignment_offset & (t->logical_block_size - 1)) {
655 t->flags |= BLK_FLAG_MISALIGNED;
656 ret = -1;
659 t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
660 t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
661 t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
663 /* Discard alignment and granularity */
664 if (b->discard_granularity) {
665 alignment = queue_limit_discard_alignment(b, start);
667 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
668 b->max_discard_sectors);
669 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
670 b->max_hw_discard_sectors);
671 t->discard_granularity = max(t->discard_granularity,
672 b->discard_granularity);
673 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
674 t->discard_granularity;
676 t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
677 b->max_secure_erase_sectors);
678 t->zone_write_granularity = max(t->zone_write_granularity,
679 b->zone_write_granularity);
680 if (!(t->features & BLK_FEAT_ZONED)) {
681 t->zone_write_granularity = 0;
682 t->max_zone_append_sectors = 0;
684 return ret;
686 EXPORT_SYMBOL(blk_stack_limits);
689 * queue_limits_stack_bdev - adjust queue_limits for stacked devices
690 * @t: the stacking driver limits (top device)
691 * @bdev: the underlying block device (bottom)
692 * @offset: offset to beginning of data within component device
693 * @pfx: prefix to use for warnings logged
695 * Description:
696 * This function is used by stacking drivers like MD and DM to ensure
697 * that all component devices have compatible block sizes and
698 * alignments. The stacking driver must provide a queue_limits
699 * struct (top) and then iteratively call the stacking function for
700 * all component (bottom) devices. The stacking function will
701 * attempt to combine the values and ensure proper alignment.
703 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
704 sector_t offset, const char *pfx)
706 if (blk_stack_limits(t, &bdev_get_queue(bdev)->limits,
707 get_start_sect(bdev) + offset))
708 pr_notice("%s: Warning: Device %pg is misaligned\n",
709 pfx, bdev);
711 EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
714 * queue_limits_stack_integrity - stack integrity profile
715 * @t: target queue limits
716 * @b: base queue limits
718 * Check if the integrity profile in the @b can be stacked into the
719 * target @t. Stacking is possible if either:
721 * a) does not have any integrity information stacked into it yet
722 * b) the integrity profile in @b is identical to the one in @t
724 * If @b can be stacked into @t, return %true. Else return %false and clear the
725 * integrity information in @t.
727 bool queue_limits_stack_integrity(struct queue_limits *t,
728 struct queue_limits *b)
730 struct blk_integrity *ti = &t->integrity;
731 struct blk_integrity *bi = &b->integrity;
733 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
734 return true;
736 if (!ti->tuple_size) {
737 /* inherit the settings from the first underlying device */
738 if (!(ti->flags & BLK_INTEGRITY_STACKED)) {
739 ti->flags = BLK_INTEGRITY_DEVICE_CAPABLE |
740 (bi->flags & BLK_INTEGRITY_REF_TAG);
741 ti->csum_type = bi->csum_type;
742 ti->tuple_size = bi->tuple_size;
743 ti->pi_offset = bi->pi_offset;
744 ti->interval_exp = bi->interval_exp;
745 ti->tag_size = bi->tag_size;
746 goto done;
748 if (!bi->tuple_size)
749 goto done;
752 if (ti->tuple_size != bi->tuple_size)
753 goto incompatible;
754 if (ti->interval_exp != bi->interval_exp)
755 goto incompatible;
756 if (ti->tag_size != bi->tag_size)
757 goto incompatible;
758 if (ti->csum_type != bi->csum_type)
759 goto incompatible;
760 if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
761 (bi->flags & BLK_INTEGRITY_REF_TAG))
762 goto incompatible;
764 done:
765 ti->flags |= BLK_INTEGRITY_STACKED;
766 return true;
768 incompatible:
769 memset(ti, 0, sizeof(*ti));
770 return false;
772 EXPORT_SYMBOL_GPL(queue_limits_stack_integrity);
775 * blk_set_queue_depth - tell the block layer about the device queue depth
776 * @q: the request queue for the device
777 * @depth: queue depth
780 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
782 q->queue_depth = depth;
783 rq_qos_queue_depth_changed(q);
785 EXPORT_SYMBOL(blk_set_queue_depth);
787 int bdev_alignment_offset(struct block_device *bdev)
789 struct request_queue *q = bdev_get_queue(bdev);
791 if (q->limits.flags & BLK_FLAG_MISALIGNED)
792 return -1;
793 if (bdev_is_partition(bdev))
794 return queue_limit_alignment_offset(&q->limits,
795 bdev->bd_start_sect);
796 return q->limits.alignment_offset;
798 EXPORT_SYMBOL_GPL(bdev_alignment_offset);
800 unsigned int bdev_discard_alignment(struct block_device *bdev)
802 struct request_queue *q = bdev_get_queue(bdev);
804 if (bdev_is_partition(bdev))
805 return queue_limit_discard_alignment(&q->limits,
806 bdev->bd_start_sect);
807 return q->limits.discard_alignment;
809 EXPORT_SYMBOL_GPL(bdev_discard_alignment);