Merge tag '6.11-rc-smb-client-fixes-part2' of git://git.samba.org/sfrench/cifs-2.6
[linux-stable.git] / block / blk-integrity.c
blob010decc892eaa03e07d232da7c3b8c4453fe7e50
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * blk-integrity.c - Block layer data integrity extensions
5 * Copyright (C) 2007, 2008 Oracle Corporation
6 * Written by: Martin K. Petersen <martin.petersen@oracle.com>
7 */
9 #include <linux/blk-integrity.h>
10 #include <linux/backing-dev.h>
11 #include <linux/mempool.h>
12 #include <linux/bio.h>
13 #include <linux/scatterlist.h>
14 #include <linux/export.h>
15 #include <linux/slab.h>
17 #include "blk.h"
19 /**
20 * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements
21 * @q: request queue
22 * @bio: bio with integrity metadata attached
24 * Description: Returns the number of elements required in a
25 * scatterlist corresponding to the integrity metadata in a bio.
27 int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
29 struct bio_vec iv, ivprv = { NULL };
30 unsigned int segments = 0;
31 unsigned int seg_size = 0;
32 struct bvec_iter iter;
33 int prev = 0;
35 bio_for_each_integrity_vec(iv, bio, iter) {
37 if (prev) {
38 if (!biovec_phys_mergeable(q, &ivprv, &iv))
39 goto new_segment;
40 if (seg_size + iv.bv_len > queue_max_segment_size(q))
41 goto new_segment;
43 seg_size += iv.bv_len;
44 } else {
45 new_segment:
46 segments++;
47 seg_size = iv.bv_len;
50 prev = 1;
51 ivprv = iv;
54 return segments;
56 EXPORT_SYMBOL(blk_rq_count_integrity_sg);
58 /**
59 * blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist
60 * @q: request queue
61 * @bio: bio with integrity metadata attached
62 * @sglist: target scatterlist
64 * Description: Map the integrity vectors in request into a
65 * scatterlist. The scatterlist must be big enough to hold all
66 * elements. I.e. sized using blk_rq_count_integrity_sg().
68 int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
69 struct scatterlist *sglist)
71 struct bio_vec iv, ivprv = { NULL };
72 struct scatterlist *sg = NULL;
73 unsigned int segments = 0;
74 struct bvec_iter iter;
75 int prev = 0;
77 bio_for_each_integrity_vec(iv, bio, iter) {
79 if (prev) {
80 if (!biovec_phys_mergeable(q, &ivprv, &iv))
81 goto new_segment;
82 if (sg->length + iv.bv_len > queue_max_segment_size(q))
83 goto new_segment;
85 sg->length += iv.bv_len;
86 } else {
87 new_segment:
88 if (!sg)
89 sg = sglist;
90 else {
91 sg_unmark_end(sg);
92 sg = sg_next(sg);
95 sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset);
96 segments++;
99 prev = 1;
100 ivprv = iv;
103 if (sg)
104 sg_mark_end(sg);
106 return segments;
108 EXPORT_SYMBOL(blk_rq_map_integrity_sg);
110 bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
111 struct request *next)
113 if (blk_integrity_rq(req) == 0 && blk_integrity_rq(next) == 0)
114 return true;
116 if (blk_integrity_rq(req) == 0 || blk_integrity_rq(next) == 0)
117 return false;
119 if (bio_integrity(req->bio)->bip_flags !=
120 bio_integrity(next->bio)->bip_flags)
121 return false;
123 if (req->nr_integrity_segments + next->nr_integrity_segments >
124 q->limits.max_integrity_segments)
125 return false;
127 if (integrity_req_gap_back_merge(req, next->bio))
128 return false;
130 return true;
133 bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
134 struct bio *bio)
136 int nr_integrity_segs;
137 struct bio *next = bio->bi_next;
139 if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL)
140 return true;
142 if (blk_integrity_rq(req) == 0 || bio_integrity(bio) == NULL)
143 return false;
145 if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags)
146 return false;
148 bio->bi_next = NULL;
149 nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);
150 bio->bi_next = next;
152 if (req->nr_integrity_segments + nr_integrity_segs >
153 q->limits.max_integrity_segments)
154 return false;
156 req->nr_integrity_segments += nr_integrity_segs;
158 return true;
161 static inline struct blk_integrity *dev_to_bi(struct device *dev)
163 return &dev_to_disk(dev)->queue->limits.integrity;
166 const char *blk_integrity_profile_name(struct blk_integrity *bi)
168 switch (bi->csum_type) {
169 case BLK_INTEGRITY_CSUM_IP:
170 if (bi->flags & BLK_INTEGRITY_REF_TAG)
171 return "T10-DIF-TYPE1-IP";
172 return "T10-DIF-TYPE3-IP";
173 case BLK_INTEGRITY_CSUM_CRC:
174 if (bi->flags & BLK_INTEGRITY_REF_TAG)
175 return "T10-DIF-TYPE1-CRC";
176 return "T10-DIF-TYPE3-CRC";
177 case BLK_INTEGRITY_CSUM_CRC64:
178 if (bi->flags & BLK_INTEGRITY_REF_TAG)
179 return "EXT-DIF-TYPE1-CRC64";
180 return "EXT-DIF-TYPE3-CRC64";
181 case BLK_INTEGRITY_CSUM_NONE:
182 break;
185 return "nop";
187 EXPORT_SYMBOL_GPL(blk_integrity_profile_name);
189 static ssize_t flag_store(struct device *dev, const char *page, size_t count,
190 unsigned char flag)
192 struct request_queue *q = dev_to_disk(dev)->queue;
193 struct queue_limits lim;
194 unsigned long val;
195 int err;
197 err = kstrtoul(page, 10, &val);
198 if (err)
199 return err;
201 /* note that the flags are inverted vs the values in the sysfs files */
202 lim = queue_limits_start_update(q);
203 if (val)
204 lim.integrity.flags &= ~flag;
205 else
206 lim.integrity.flags |= flag;
208 blk_mq_freeze_queue(q);
209 err = queue_limits_commit_update(q, &lim);
210 blk_mq_unfreeze_queue(q);
211 if (err)
212 return err;
213 return count;
216 static ssize_t flag_show(struct device *dev, char *page, unsigned char flag)
218 struct blk_integrity *bi = dev_to_bi(dev);
220 return sysfs_emit(page, "%d\n", !(bi->flags & flag));
223 static ssize_t format_show(struct device *dev, struct device_attribute *attr,
224 char *page)
226 struct blk_integrity *bi = dev_to_bi(dev);
228 if (!bi->tuple_size)
229 return sysfs_emit(page, "none\n");
230 return sysfs_emit(page, "%s\n", blk_integrity_profile_name(bi));
233 static ssize_t tag_size_show(struct device *dev, struct device_attribute *attr,
234 char *page)
236 struct blk_integrity *bi = dev_to_bi(dev);
238 return sysfs_emit(page, "%u\n", bi->tag_size);
241 static ssize_t protection_interval_bytes_show(struct device *dev,
242 struct device_attribute *attr,
243 char *page)
245 struct blk_integrity *bi = dev_to_bi(dev);
247 return sysfs_emit(page, "%u\n",
248 bi->interval_exp ? 1 << bi->interval_exp : 0);
251 static ssize_t read_verify_store(struct device *dev,
252 struct device_attribute *attr,
253 const char *page, size_t count)
255 return flag_store(dev, page, count, BLK_INTEGRITY_NOVERIFY);
258 static ssize_t read_verify_show(struct device *dev,
259 struct device_attribute *attr, char *page)
261 return flag_show(dev, page, BLK_INTEGRITY_NOVERIFY);
264 static ssize_t write_generate_store(struct device *dev,
265 struct device_attribute *attr,
266 const char *page, size_t count)
268 return flag_store(dev, page, count, BLK_INTEGRITY_NOGENERATE);
271 static ssize_t write_generate_show(struct device *dev,
272 struct device_attribute *attr, char *page)
274 return flag_show(dev, page, BLK_INTEGRITY_NOGENERATE);
277 static ssize_t device_is_integrity_capable_show(struct device *dev,
278 struct device_attribute *attr,
279 char *page)
281 struct blk_integrity *bi = dev_to_bi(dev);
283 return sysfs_emit(page, "%u\n",
284 !!(bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE));
287 static DEVICE_ATTR_RO(format);
288 static DEVICE_ATTR_RO(tag_size);
289 static DEVICE_ATTR_RO(protection_interval_bytes);
290 static DEVICE_ATTR_RW(read_verify);
291 static DEVICE_ATTR_RW(write_generate);
292 static DEVICE_ATTR_RO(device_is_integrity_capable);
294 static struct attribute *integrity_attrs[] = {
295 &dev_attr_format.attr,
296 &dev_attr_tag_size.attr,
297 &dev_attr_protection_interval_bytes.attr,
298 &dev_attr_read_verify.attr,
299 &dev_attr_write_generate.attr,
300 &dev_attr_device_is_integrity_capable.attr,
301 NULL
304 const struct attribute_group blk_integrity_attr_group = {
305 .name = "integrity",
306 .attrs = integrity_attrs,