split dev_queue
[cor.git] / drivers / block / null_blk_zoned.c
blobd4d88b5818225b6e3fe441b9190c4e7d82a0e04a
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/vmalloc.h>
3 #include "null_blk.h"
5 /* zone_size in MBs to sectors. */
6 #define ZONE_SIZE_SHIFT 11
8 static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
10 return sect >> ilog2(dev->zone_size_sects);
13 int null_zone_init(struct nullb_device *dev)
15 sector_t dev_size = (sector_t)dev->size * 1024 * 1024;
16 sector_t sector = 0;
17 unsigned int i;
19 if (!is_power_of_2(dev->zone_size)) {
20 pr_err("zone_size must be power-of-two\n");
21 return -EINVAL;
24 dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT;
25 dev->nr_zones = dev_size >>
26 (SECTOR_SHIFT + ilog2(dev->zone_size_sects));
27 dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct blk_zone),
28 GFP_KERNEL | __GFP_ZERO);
29 if (!dev->zones)
30 return -ENOMEM;
32 if (dev->zone_nr_conv >= dev->nr_zones) {
33 dev->zone_nr_conv = dev->nr_zones - 1;
34 pr_info("changed the number of conventional zones to %u",
35 dev->zone_nr_conv);
38 for (i = 0; i < dev->zone_nr_conv; i++) {
39 struct blk_zone *zone = &dev->zones[i];
41 zone->start = sector;
42 zone->len = dev->zone_size_sects;
43 zone->wp = zone->start + zone->len;
44 zone->type = BLK_ZONE_TYPE_CONVENTIONAL;
45 zone->cond = BLK_ZONE_COND_NOT_WP;
47 sector += dev->zone_size_sects;
50 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
51 struct blk_zone *zone = &dev->zones[i];
53 zone->start = zone->wp = sector;
54 zone->len = dev->zone_size_sects;
55 zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
56 zone->cond = BLK_ZONE_COND_EMPTY;
58 sector += dev->zone_size_sects;
61 return 0;
64 void null_zone_exit(struct nullb_device *dev)
66 kvfree(dev->zones);
69 int null_report_zones(struct gendisk *disk, sector_t sector,
70 unsigned int nr_zones, report_zones_cb cb, void *data)
72 struct nullb *nullb = disk->private_data;
73 struct nullb_device *dev = nullb->dev;
74 unsigned int first_zone, i;
75 struct blk_zone zone;
76 int error;
78 first_zone = null_zone_no(dev, sector);
79 if (first_zone >= dev->nr_zones)
80 return 0;
82 nr_zones = min(nr_zones, dev->nr_zones - first_zone);
83 for (i = 0; i < nr_zones; i++) {
85 * Stacked DM target drivers will remap the zone information by
86 * modifying the zone information passed to the report callback.
87 * So use a local copy to avoid corruption of the device zone
88 * array.
90 memcpy(&zone, &dev->zones[first_zone + i],
91 sizeof(struct blk_zone));
92 error = cb(&zone, i, data);
93 if (error)
94 return error;
97 return nr_zones;
100 size_t null_zone_valid_read_len(struct nullb *nullb,
101 sector_t sector, unsigned int len)
103 struct nullb_device *dev = nullb->dev;
104 struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)];
105 unsigned int nr_sectors = len >> SECTOR_SHIFT;
107 /* Read must be below the write pointer position */
108 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
109 sector + nr_sectors <= zone->wp)
110 return len;
112 if (sector > zone->wp)
113 return 0;
115 return (zone->wp - sector) << SECTOR_SHIFT;
118 static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
119 unsigned int nr_sectors)
121 struct nullb_device *dev = cmd->nq->dev;
122 unsigned int zno = null_zone_no(dev, sector);
123 struct blk_zone *zone = &dev->zones[zno];
125 switch (zone->cond) {
126 case BLK_ZONE_COND_FULL:
127 /* Cannot write to a full zone */
128 cmd->error = BLK_STS_IOERR;
129 return BLK_STS_IOERR;
130 case BLK_ZONE_COND_EMPTY:
131 case BLK_ZONE_COND_IMP_OPEN:
132 /* Writes must be at the write pointer position */
133 if (sector != zone->wp)
134 return BLK_STS_IOERR;
136 if (zone->cond == BLK_ZONE_COND_EMPTY)
137 zone->cond = BLK_ZONE_COND_IMP_OPEN;
139 zone->wp += nr_sectors;
140 if (zone->wp == zone->start + zone->len)
141 zone->cond = BLK_ZONE_COND_FULL;
142 break;
143 case BLK_ZONE_COND_NOT_WP:
144 break;
145 default:
146 /* Invalid zone condition */
147 return BLK_STS_IOERR;
149 return BLK_STS_OK;
152 static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
153 sector_t sector)
155 struct nullb_device *dev = cmd->nq->dev;
156 struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)];
157 size_t i;
159 switch (op) {
160 case REQ_OP_ZONE_RESET_ALL:
161 for (i = 0; i < dev->nr_zones; i++) {
162 if (zone[i].type == BLK_ZONE_TYPE_CONVENTIONAL)
163 continue;
164 zone[i].cond = BLK_ZONE_COND_EMPTY;
165 zone[i].wp = zone[i].start;
167 break;
168 case REQ_OP_ZONE_RESET:
169 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
170 return BLK_STS_IOERR;
172 zone->cond = BLK_ZONE_COND_EMPTY;
173 zone->wp = zone->start;
174 break;
175 case REQ_OP_ZONE_OPEN:
176 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
177 return BLK_STS_IOERR;
178 if (zone->cond == BLK_ZONE_COND_FULL)
179 return BLK_STS_IOERR;
181 zone->cond = BLK_ZONE_COND_EXP_OPEN;
182 break;
183 case REQ_OP_ZONE_CLOSE:
184 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
185 return BLK_STS_IOERR;
186 if (zone->cond == BLK_ZONE_COND_FULL)
187 return BLK_STS_IOERR;
189 zone->cond = BLK_ZONE_COND_CLOSED;
190 break;
191 case REQ_OP_ZONE_FINISH:
192 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
193 return BLK_STS_IOERR;
195 zone->cond = BLK_ZONE_COND_FULL;
196 zone->wp = zone->start + zone->len;
197 break;
198 default:
199 return BLK_STS_NOTSUPP;
201 return BLK_STS_OK;
204 blk_status_t null_handle_zoned(struct nullb_cmd *cmd, enum req_opf op,
205 sector_t sector, sector_t nr_sectors)
207 switch (op) {
208 case REQ_OP_WRITE:
209 return null_zone_write(cmd, sector, nr_sectors);
210 case REQ_OP_ZONE_RESET:
211 case REQ_OP_ZONE_RESET_ALL:
212 case REQ_OP_ZONE_OPEN:
213 case REQ_OP_ZONE_CLOSE:
214 case REQ_OP_ZONE_FINISH:
215 return null_zone_mgmt(cmd, op, sector);
216 default:
217 return BLK_STS_OK;