2 * QEMU NVM Express Virtual Namespace
4 * Copyright (c) 2019 CNEX Labs
5 * Copyright (c) 2020 Samsung Electronics
8 * Klaus Jensen <k.jensen@samsung.com>
10 * This work is licensed under the terms of the GNU GPL, version 2. See the
11 * COPYING file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "qemu/units.h"
17 #include "qemu/cutils.h"
18 #include "qemu/error-report.h"
19 #include "qapi/error.h"
20 #include "qemu/bitops.h"
21 #include "sysemu/sysemu.h"
22 #include "sysemu/block-backend.h"
27 #define MIN_DISCARD_GRANULARITY (4 * KiB)
28 #define NVME_DEFAULT_ZONE_SIZE (128 * MiB)
30 void nvme_ns_init_format(NvmeNamespace
*ns
)
32 NvmeIdNs
*id_ns
= &ns
->id_ns
;
37 ns
->lbaf
= id_ns
->lbaf
[NVME_ID_NS_FLBAS_INDEX(id_ns
->flbas
)];
38 ns
->lbasz
= 1 << ns
->lbaf
.ds
;
40 nlbas
= ns
->size
/ (ns
->lbasz
+ ns
->lbaf
.ms
);
42 id_ns
->nsze
= cpu_to_le64(nlbas
);
44 /* no thin provisioning */
45 id_ns
->ncap
= id_ns
->nsze
;
46 id_ns
->nuse
= id_ns
->ncap
;
48 ns
->moff
= nlbas
<< ns
->lbaf
.ds
;
50 npdg
= ns
->blkconf
.discard_granularity
/ ns
->lbasz
;
52 ret
= bdrv_get_info(blk_bs(ns
->blkconf
.blk
), &bdi
);
53 if (ret
>= 0 && bdi
.cluster_size
> ns
->blkconf
.discard_granularity
) {
54 npdg
= bdi
.cluster_size
/ ns
->lbasz
;
57 id_ns
->npda
= id_ns
->npdg
= npdg
- 1;
60 static int nvme_ns_init(NvmeNamespace
*ns
, Error
**errp
)
62 static uint64_t ns_count
;
63 NvmeIdNs
*id_ns
= &ns
->id_ns
;
64 NvmeIdNsNvm
*id_ns_nvm
= &ns
->id_ns_nvm
;
69 ns
->csi
= NVME_CSI_NVM
;
72 ns
->id_ns
.dlfeat
= 0x1;
74 /* support DULBE and I/O optimization fields */
75 id_ns
->nsfeat
|= (0x4 | 0x10);
77 if (ns
->params
.shared
) {
78 id_ns
->nmic
|= NVME_NMIC_NS_SHARED
;
81 /* Substitute a missing EUI-64 by an autogenerated one */
83 if (!ns
->params
.eui64
&& ns
->params
.eui64_default
) {
84 ns
->params
.eui64
= ns_count
+ NVME_EUI64_DEFAULT
;
88 id_ns
->mssrl
= cpu_to_le16(ns
->params
.mssrl
);
89 id_ns
->mcl
= cpu_to_le32(ns
->params
.mcl
);
90 id_ns
->msrc
= ns
->params
.msrc
;
91 id_ns
->eui64
= cpu_to_be64(ns
->params
.eui64
);
93 ds
= 31 - clz32(ns
->blkconf
.logical_block_size
);
96 id_ns
->mc
= NVME_ID_NS_MC_EXTENDED
| NVME_ID_NS_MC_SEPARATE
;
98 if (ms
&& ns
->params
.mset
) {
99 id_ns
->flbas
|= NVME_ID_NS_FLBAS_EXTENDED
;
103 id_ns
->dps
= ns
->params
.pi
;
104 if (ns
->params
.pi
&& ns
->params
.pil
) {
105 id_ns
->dps
|= NVME_ID_NS_DPS_FIRST_EIGHT
;
108 ns
->pif
= ns
->params
.pif
;
110 static const NvmeLBAF defaults
[16] = {
112 [1] = { .ds
= 9, .ms
= 8 },
113 [2] = { .ds
= 9, .ms
= 16 },
114 [3] = { .ds
= 9, .ms
= 64 },
116 [5] = { .ds
= 12, .ms
= 8 },
117 [6] = { .ds
= 12, .ms
= 16 },
118 [7] = { .ds
= 12, .ms
= 64 },
123 memcpy(&id_ns
->lbaf
, &defaults
, sizeof(defaults
));
125 for (i
= 0; i
< ns
->nlbaf
; i
++) {
126 NvmeLBAF
*lbaf
= &id_ns
->lbaf
[i
];
127 if (lbaf
->ds
== ds
) {
128 if (lbaf
->ms
== ms
) {
135 /* add non-standard lba format */
136 id_ns
->lbaf
[ns
->nlbaf
].ds
= ds
;
137 id_ns
->lbaf
[ns
->nlbaf
].ms
= ms
;
144 id_ns_nvm
->elbaf
[i
] = (ns
->pif
& 0x3) << 7;
145 id_ns
->nlbaf
= ns
->nlbaf
- 1;
146 nvme_ns_init_format(ns
);
151 static int nvme_ns_init_blk(NvmeNamespace
*ns
, Error
**errp
)
155 if (!blkconf_blocksizes(&ns
->blkconf
, errp
)) {
159 read_only
= !blk_supports_write_perm(ns
->blkconf
.blk
);
160 if (!blkconf_apply_backend_options(&ns
->blkconf
, read_only
, false, errp
)) {
164 if (ns
->blkconf
.discard_granularity
== -1) {
165 ns
->blkconf
.discard_granularity
=
166 MAX(ns
->blkconf
.logical_block_size
, MIN_DISCARD_GRANULARITY
);
169 ns
->size
= blk_getlength(ns
->blkconf
.blk
);
171 error_setg_errno(errp
, -ns
->size
, "could not get blockdev size");
178 static int nvme_ns_zoned_check_calc_geometry(NvmeNamespace
*ns
, Error
**errp
)
180 uint64_t zone_size
, zone_cap
;
182 /* Make sure that the values of ZNS properties are sane */
183 if (ns
->params
.zone_size_bs
) {
184 zone_size
= ns
->params
.zone_size_bs
;
186 zone_size
= NVME_DEFAULT_ZONE_SIZE
;
188 if (ns
->params
.zone_cap_bs
) {
189 zone_cap
= ns
->params
.zone_cap_bs
;
191 zone_cap
= zone_size
;
193 if (zone_cap
> zone_size
) {
194 error_setg(errp
, "zone capacity %"PRIu64
"B exceeds "
195 "zone size %"PRIu64
"B", zone_cap
, zone_size
);
198 if (zone_size
< ns
->lbasz
) {
199 error_setg(errp
, "zone size %"PRIu64
"B too small, "
200 "must be at least %zuB", zone_size
, ns
->lbasz
);
203 if (zone_cap
< ns
->lbasz
) {
204 error_setg(errp
, "zone capacity %"PRIu64
"B too small, "
205 "must be at least %zuB", zone_cap
, ns
->lbasz
);
210 * Save the main zone geometry values to avoid
211 * calculating them later again.
213 ns
->zone_size
= zone_size
/ ns
->lbasz
;
214 ns
->zone_capacity
= zone_cap
/ ns
->lbasz
;
215 ns
->num_zones
= le64_to_cpu(ns
->id_ns
.nsze
) / ns
->zone_size
;
217 /* Do a few more sanity checks of ZNS properties */
218 if (!ns
->num_zones
) {
220 "insufficient drive capacity, must be at least the size "
221 "of one zone (%"PRIu64
"B)", zone_size
);
228 static void nvme_ns_zoned_init_state(NvmeNamespace
*ns
)
230 uint64_t start
= 0, zone_size
= ns
->zone_size
;
231 uint64_t capacity
= ns
->num_zones
* zone_size
;
235 ns
->zone_array
= g_new0(NvmeZone
, ns
->num_zones
);
236 if (ns
->params
.zd_extension_size
) {
237 ns
->zd_extensions
= g_malloc0(ns
->params
.zd_extension_size
*
241 QTAILQ_INIT(&ns
->exp_open_zones
);
242 QTAILQ_INIT(&ns
->imp_open_zones
);
243 QTAILQ_INIT(&ns
->closed_zones
);
244 QTAILQ_INIT(&ns
->full_zones
);
246 zone
= ns
->zone_array
;
247 for (i
= 0; i
< ns
->num_zones
; i
++, zone
++) {
248 if (start
+ zone_size
> capacity
) {
249 zone_size
= capacity
- start
;
251 zone
->d
.zt
= NVME_ZONE_TYPE_SEQ_WRITE
;
252 nvme_set_zone_state(zone
, NVME_ZONE_STATE_EMPTY
);
254 zone
->d
.zcap
= ns
->zone_capacity
;
255 zone
->d
.zslba
= start
;
261 ns
->zone_size_log2
= 0;
262 if (is_power_of_2(ns
->zone_size
)) {
263 ns
->zone_size_log2
= 63 - clz64(ns
->zone_size
);
267 static void nvme_ns_init_zoned(NvmeNamespace
*ns
)
269 NvmeIdNsZoned
*id_ns_z
;
272 nvme_ns_zoned_init_state(ns
);
274 id_ns_z
= g_new0(NvmeIdNsZoned
, 1);
276 /* MAR/MOR are zeroes-based, FFFFFFFFFh means no limit */
277 id_ns_z
->mar
= cpu_to_le32(ns
->params
.max_active_zones
- 1);
278 id_ns_z
->mor
= cpu_to_le32(ns
->params
.max_open_zones
- 1);
280 id_ns_z
->ozcs
= ns
->params
.cross_zone_read
?
281 NVME_ID_NS_ZONED_OZCS_RAZB
: 0x00;
283 for (i
= 0; i
<= ns
->id_ns
.nlbaf
; i
++) {
284 id_ns_z
->lbafe
[i
].zsze
= cpu_to_le64(ns
->zone_size
);
285 id_ns_z
->lbafe
[i
].zdes
=
286 ns
->params
.zd_extension_size
>> 6; /* Units of 64B */
289 if (ns
->params
.zrwas
) {
290 ns
->zns
.numzrwa
= ns
->params
.numzrwa
?
291 ns
->params
.numzrwa
: ns
->num_zones
;
293 ns
->zns
.zrwas
= ns
->params
.zrwas
>> ns
->lbaf
.ds
;
294 ns
->zns
.zrwafg
= ns
->params
.zrwafg
>> ns
->lbaf
.ds
;
296 id_ns_z
->ozcs
|= NVME_ID_NS_ZONED_OZCS_ZRWASUP
;
297 id_ns_z
->zrwacap
= NVME_ID_NS_ZONED_ZRWACAP_EXPFLUSHSUP
;
299 id_ns_z
->numzrwa
= cpu_to_le32(ns
->params
.numzrwa
);
300 id_ns_z
->zrwas
= cpu_to_le16(ns
->zns
.zrwas
);
301 id_ns_z
->zrwafg
= cpu_to_le16(ns
->zns
.zrwafg
);
304 id_ns_z
->ozcs
= cpu_to_le16(id_ns_z
->ozcs
);
306 ns
->csi
= NVME_CSI_ZONED
;
307 ns
->id_ns
.nsze
= cpu_to_le64(ns
->num_zones
* ns
->zone_size
);
308 ns
->id_ns
.ncap
= ns
->id_ns
.nsze
;
309 ns
->id_ns
.nuse
= ns
->id_ns
.ncap
;
312 * The device uses the BDRV_BLOCK_ZERO flag to determine the "deallocated"
313 * status of logical blocks. Since the spec defines that logical blocks
314 * SHALL be deallocated when then zone is in the Empty or Offline states,
315 * we can only support DULBE if the zone size is a multiple of the
318 if (ns
->zone_size
% (ns
->id_ns
.npdg
+ 1)) {
319 warn_report("the zone size (%"PRIu64
" blocks) is not a multiple of "
320 "the calculated deallocation granularity (%d blocks); "
321 "DULBE support disabled",
322 ns
->zone_size
, ns
->id_ns
.npdg
+ 1);
324 ns
->id_ns
.nsfeat
&= ~0x4;
327 ns
->id_ns_zoned
= id_ns_z
;
330 static void nvme_clear_zone(NvmeNamespace
*ns
, NvmeZone
*zone
)
334 zone
->w_ptr
= zone
->d
.wp
;
335 state
= nvme_get_zone_state(zone
);
336 if (zone
->d
.wp
!= zone
->d
.zslba
||
337 (zone
->d
.za
& NVME_ZA_ZD_EXT_VALID
)) {
338 if (state
!= NVME_ZONE_STATE_CLOSED
) {
339 trace_pci_nvme_clear_ns_close(state
, zone
->d
.zslba
);
340 nvme_set_zone_state(zone
, NVME_ZONE_STATE_CLOSED
);
342 nvme_aor_inc_active(ns
);
343 QTAILQ_INSERT_HEAD(&ns
->closed_zones
, zone
, entry
);
345 trace_pci_nvme_clear_ns_reset(state
, zone
->d
.zslba
);
346 if (zone
->d
.za
& NVME_ZA_ZRWA_VALID
) {
347 zone
->d
.za
&= ~NVME_ZA_ZRWA_VALID
;
350 nvme_set_zone_state(zone
, NVME_ZONE_STATE_EMPTY
);
355 * Close all the zones that are currently open.
357 static void nvme_zoned_ns_shutdown(NvmeNamespace
*ns
)
359 NvmeZone
*zone
, *next
;
361 QTAILQ_FOREACH_SAFE(zone
, &ns
->closed_zones
, entry
, next
) {
362 QTAILQ_REMOVE(&ns
->closed_zones
, zone
, entry
);
363 nvme_aor_dec_active(ns
);
364 nvme_clear_zone(ns
, zone
);
366 QTAILQ_FOREACH_SAFE(zone
, &ns
->imp_open_zones
, entry
, next
) {
367 QTAILQ_REMOVE(&ns
->imp_open_zones
, zone
, entry
);
368 nvme_aor_dec_open(ns
);
369 nvme_aor_dec_active(ns
);
370 nvme_clear_zone(ns
, zone
);
372 QTAILQ_FOREACH_SAFE(zone
, &ns
->exp_open_zones
, entry
, next
) {
373 QTAILQ_REMOVE(&ns
->exp_open_zones
, zone
, entry
);
374 nvme_aor_dec_open(ns
);
375 nvme_aor_dec_active(ns
);
376 nvme_clear_zone(ns
, zone
);
379 assert(ns
->nr_open_zones
== 0);
382 static NvmeRuHandle
*nvme_find_ruh_by_attr(NvmeEnduranceGroup
*endgrp
,
383 uint8_t ruha
, uint16_t *ruhid
)
385 for (uint16_t i
= 0; i
< endgrp
->fdp
.nruh
; i
++) {
386 NvmeRuHandle
*ruh
= &endgrp
->fdp
.ruhs
[i
];
388 if (ruh
->ruha
== ruha
) {
397 static bool nvme_ns_init_fdp(NvmeNamespace
*ns
, Error
**errp
)
399 NvmeEnduranceGroup
*endgrp
= ns
->endgrp
;
401 uint8_t lbafi
= NVME_ID_NS_FLBAS_INDEX(ns
->id_ns
.flbas
);
402 g_autofree
unsigned int *ruhids
= NULL
;
403 unsigned int n
, m
, *ruhid
;
404 const char *endptr
, *token
;
408 if (!ns
->params
.fdp
.ruhs
) {
410 ph
= ns
->fdp
.phs
= g_new(uint16_t, 1);
412 ruh
= nvme_find_ruh_by_attr(endgrp
, NVME_RUHA_CTRL
, ph
);
414 ruh
= nvme_find_ruh_by_attr(endgrp
, NVME_RUHA_UNUSED
, ph
);
416 error_setg(errp
, "no unused reclaim unit handles left");
420 ruh
->ruha
= NVME_RUHA_CTRL
;
422 ruh
->ruamw
= endgrp
->fdp
.runs
>> ns
->lbaf
.ds
;
424 for (uint16_t rg
= 0; rg
< endgrp
->fdp
.nrg
; rg
++) {
425 ruh
->rus
[rg
].ruamw
= ruh
->ruamw
;
427 } else if (ruh
->lbafi
!= lbafi
) {
428 error_setg(errp
, "lba format index of controller assigned "
429 "reclaim unit handle does not match namespace lba "
437 ruhid
= ruhids
= g_new0(unsigned int, endgrp
->fdp
.nruh
);
438 r
= p
= strdup(ns
->params
.fdp
.ruhs
);
440 /* parse the placement handle identifiers */
441 while ((token
= qemu_strsep(&p
, ";")) != NULL
) {
442 if (qemu_strtoui(token
, &endptr
, 0, &n
) < 0) {
443 error_setg(errp
, "cannot parse reclaim unit handle identifier");
451 if (*endptr
== '-') {
454 if (qemu_strtoui(token
, NULL
, 0, &m
) < 0) {
455 error_setg(errp
, "cannot parse reclaim unit handle identifier");
461 error_setg(errp
, "invalid reclaim unit handle identifier range");
467 for (; n
<= m
; n
++) {
468 if (ns
->fdp
.nphs
++ == endgrp
->fdp
.nruh
) {
469 error_setg(errp
, "too many placement handles");
480 /* verify that the ruhids are unique */
481 for (unsigned int i
= 0; i
< ns
->fdp
.nphs
; i
++) {
482 for (unsigned int j
= i
+ 1; j
< ns
->fdp
.nphs
; j
++) {
483 if (ruhids
[i
] == ruhids
[j
]) {
484 error_setg(errp
, "duplicate reclaim unit handle identifier: %u",
491 ph
= ns
->fdp
.phs
= g_new(uint16_t, ns
->fdp
.nphs
);
495 /* verify the identifiers */
496 for (unsigned int i
= 0; i
< ns
->fdp
.nphs
; i
++, ruhid
++, ph
++) {
497 if (*ruhid
>= endgrp
->fdp
.nruh
) {
498 error_setg(errp
, "invalid reclaim unit handle identifier");
502 ruh
= &endgrp
->fdp
.ruhs
[*ruhid
];
505 case NVME_RUHA_UNUSED
:
506 ruh
->ruha
= NVME_RUHA_HOST
;
508 ruh
->ruamw
= endgrp
->fdp
.runs
>> ns
->lbaf
.ds
;
510 for (uint16_t rg
= 0; rg
< endgrp
->fdp
.nrg
; rg
++) {
511 ruh
->rus
[rg
].ruamw
= ruh
->ruamw
;
517 if (ruh
->lbafi
!= lbafi
) {
518 error_setg(errp
, "lba format index of host assigned"
519 "reclaim unit handle does not match namespace "
527 error_setg(errp
, "reclaim unit handle is controller assigned");
540 static int nvme_ns_check_constraints(NvmeNamespace
*ns
, Error
**errp
)
542 unsigned int pi_size
;
544 if (!ns
->blkconf
.blk
) {
545 error_setg(errp
, "block backend not configured");
550 if (ns
->params
.pi
> NVME_ID_NS_DPS_TYPE_3
) {
551 error_setg(errp
, "invalid 'pi' value");
555 switch (ns
->params
.pif
) {
556 case NVME_PI_GUARD_16
:
559 case NVME_PI_GUARD_64
:
563 error_setg(errp
, "invalid 'pif'");
567 if (ns
->params
.ms
< pi_size
) {
568 error_setg(errp
, "at least %u bytes of metadata required to "
569 "enable protection information", pi_size
);
574 if (ns
->params
.nsid
> NVME_MAX_NAMESPACES
) {
575 error_setg(errp
, "invalid namespace id (must be between 0 and %d)",
576 NVME_MAX_NAMESPACES
);
580 if (ns
->params
.zoned
&& ns
->endgrp
&& ns
->endgrp
->fdp
.enabled
) {
581 error_setg(errp
, "cannot be a zoned- in an FDP configuration");
585 if (ns
->params
.zoned
) {
586 if (ns
->params
.max_active_zones
) {
587 if (ns
->params
.max_open_zones
> ns
->params
.max_active_zones
) {
588 error_setg(errp
, "max_open_zones (%u) exceeds "
589 "max_active_zones (%u)", ns
->params
.max_open_zones
,
590 ns
->params
.max_active_zones
);
594 if (!ns
->params
.max_open_zones
) {
595 ns
->params
.max_open_zones
= ns
->params
.max_active_zones
;
599 if (ns
->params
.zd_extension_size
) {
600 if (ns
->params
.zd_extension_size
& 0x3f) {
601 error_setg(errp
, "zone descriptor extension size must be a "
605 if ((ns
->params
.zd_extension_size
>> 6) > 0xff) {
607 "zone descriptor extension size is too large");
612 if (ns
->params
.zrwas
) {
613 if (ns
->params
.zrwas
% ns
->blkconf
.logical_block_size
) {
614 error_setg(errp
, "zone random write area size (zoned.zrwas "
615 "%"PRIu64
") must be a multiple of the logical "
616 "block size (logical_block_size %"PRIu32
")",
617 ns
->params
.zrwas
, ns
->blkconf
.logical_block_size
);
621 if (ns
->params
.zrwafg
== -1) {
622 ns
->params
.zrwafg
= ns
->blkconf
.logical_block_size
;
625 if (ns
->params
.zrwas
% ns
->params
.zrwafg
) {
626 error_setg(errp
, "zone random write area size (zoned.zrwas "
627 "%"PRIu64
") must be a multiple of the zone random "
628 "write area flush granularity (zoned.zrwafg, "
629 "%"PRIu64
")", ns
->params
.zrwas
, ns
->params
.zrwafg
);
633 if (ns
->params
.max_active_zones
) {
634 if (ns
->params
.numzrwa
> ns
->params
.max_active_zones
) {
635 error_setg(errp
, "number of zone random write area "
636 "resources (zoned.numzrwa, %d) must be less "
637 "than or equal to maximum active resources "
638 "(zoned.max_active_zones, %d)",
640 ns
->params
.max_active_zones
);
650 int nvme_ns_setup(NvmeNamespace
*ns
, Error
**errp
)
652 if (nvme_ns_check_constraints(ns
, errp
)) {
656 if (nvme_ns_init_blk(ns
, errp
)) {
660 if (nvme_ns_init(ns
, errp
)) {
663 if (ns
->params
.zoned
) {
664 if (nvme_ns_zoned_check_calc_geometry(ns
, errp
) != 0) {
667 nvme_ns_init_zoned(ns
);
670 if (ns
->endgrp
&& ns
->endgrp
->fdp
.enabled
) {
671 if (!nvme_ns_init_fdp(ns
, errp
)) {
679 void nvme_ns_drain(NvmeNamespace
*ns
)
681 blk_drain(ns
->blkconf
.blk
);
684 void nvme_ns_shutdown(NvmeNamespace
*ns
)
686 blk_flush(ns
->blkconf
.blk
);
687 if (ns
->params
.zoned
) {
688 nvme_zoned_ns_shutdown(ns
);
692 void nvme_ns_cleanup(NvmeNamespace
*ns
)
694 if (ns
->params
.zoned
) {
695 g_free(ns
->id_ns_zoned
);
696 g_free(ns
->zone_array
);
697 g_free(ns
->zd_extensions
);
700 if (ns
->endgrp
&& ns
->endgrp
->fdp
.enabled
) {
705 static void nvme_ns_unrealize(DeviceState
*dev
)
707 NvmeNamespace
*ns
= NVME_NS(dev
);
710 nvme_ns_shutdown(ns
);
714 static void nvme_ns_realize(DeviceState
*dev
, Error
**errp
)
716 NvmeNamespace
*ns
= NVME_NS(dev
);
717 BusState
*s
= qdev_get_parent_bus(dev
);
718 NvmeCtrl
*n
= NVME(s
->parent
);
719 NvmeSubsystem
*subsys
= n
->subsys
;
720 uint32_t nsid
= ns
->params
.nsid
;
724 /* If no subsys, the ns cannot be attached to more than one ctrl. */
725 ns
->params
.shared
= false;
726 if (ns
->params
.detached
) {
727 error_setg(errp
, "detached requires that the nvme device is "
728 "linked to an nvme-subsys device");
733 * If this namespace belongs to a subsystem (through a link on the
734 * controller device), reparent the device.
736 if (!qdev_set_parent_bus(dev
, &subsys
->bus
.parent_bus
, errp
)) {
740 ns
->endgrp
= &subsys
->endgrp
;
743 if (nvme_ns_setup(ns
, errp
)) {
748 for (i
= 1; i
<= NVME_MAX_NAMESPACES
; i
++) {
749 if (nvme_ns(n
, i
) || nvme_subsys_ns(subsys
, i
)) {
753 nsid
= ns
->params
.nsid
= i
;
758 error_setg(errp
, "no free namespace id");
762 if (nvme_ns(n
, nsid
) || nvme_subsys_ns(subsys
, nsid
)) {
763 error_setg(errp
, "namespace id '%d' already allocated", nsid
);
769 subsys
->namespaces
[nsid
] = ns
;
771 ns
->id_ns
.endgid
= cpu_to_le16(0x1);
773 if (ns
->params
.detached
) {
777 if (ns
->params
.shared
) {
778 for (i
= 0; i
< ARRAY_SIZE(subsys
->ctrls
); i
++) {
779 NvmeCtrl
*ctrl
= subsys
->ctrls
[i
];
781 if (ctrl
&& ctrl
!= SUBSYS_SLOT_RSVD
) {
782 nvme_attach_ns(ctrl
, ns
);
791 nvme_attach_ns(n
, ns
);
794 static Property nvme_ns_props
[] = {
795 DEFINE_BLOCK_PROPERTIES(NvmeNamespace
, blkconf
),
796 DEFINE_PROP_BOOL("detached", NvmeNamespace
, params
.detached
, false),
797 DEFINE_PROP_BOOL("shared", NvmeNamespace
, params
.shared
, true),
798 DEFINE_PROP_UINT32("nsid", NvmeNamespace
, params
.nsid
, 0),
799 DEFINE_PROP_UUID_NODEFAULT("uuid", NvmeNamespace
, params
.uuid
),
800 DEFINE_PROP_UINT64("eui64", NvmeNamespace
, params
.eui64
, 0),
801 DEFINE_PROP_UINT16("ms", NvmeNamespace
, params
.ms
, 0),
802 DEFINE_PROP_UINT8("mset", NvmeNamespace
, params
.mset
, 0),
803 DEFINE_PROP_UINT8("pi", NvmeNamespace
, params
.pi
, 0),
804 DEFINE_PROP_UINT8("pil", NvmeNamespace
, params
.pil
, 0),
805 DEFINE_PROP_UINT8("pif", NvmeNamespace
, params
.pif
, 0),
806 DEFINE_PROP_UINT16("mssrl", NvmeNamespace
, params
.mssrl
, 128),
807 DEFINE_PROP_UINT32("mcl", NvmeNamespace
, params
.mcl
, 128),
808 DEFINE_PROP_UINT8("msrc", NvmeNamespace
, params
.msrc
, 127),
809 DEFINE_PROP_BOOL("zoned", NvmeNamespace
, params
.zoned
, false),
810 DEFINE_PROP_SIZE("zoned.zone_size", NvmeNamespace
, params
.zone_size_bs
,
811 NVME_DEFAULT_ZONE_SIZE
),
812 DEFINE_PROP_SIZE("zoned.zone_capacity", NvmeNamespace
, params
.zone_cap_bs
,
814 DEFINE_PROP_BOOL("zoned.cross_read", NvmeNamespace
,
815 params
.cross_zone_read
, false),
816 DEFINE_PROP_UINT32("zoned.max_active", NvmeNamespace
,
817 params
.max_active_zones
, 0),
818 DEFINE_PROP_UINT32("zoned.max_open", NvmeNamespace
,
819 params
.max_open_zones
, 0),
820 DEFINE_PROP_UINT32("zoned.descr_ext_size", NvmeNamespace
,
821 params
.zd_extension_size
, 0),
822 DEFINE_PROP_UINT32("zoned.numzrwa", NvmeNamespace
, params
.numzrwa
, 0),
823 DEFINE_PROP_SIZE("zoned.zrwas", NvmeNamespace
, params
.zrwas
, 0),
824 DEFINE_PROP_SIZE("zoned.zrwafg", NvmeNamespace
, params
.zrwafg
, -1),
825 DEFINE_PROP_BOOL("eui64-default", NvmeNamespace
, params
.eui64_default
,
827 DEFINE_PROP_STRING("fdp.ruhs", NvmeNamespace
, params
.fdp
.ruhs
),
828 DEFINE_PROP_END_OF_LIST(),
831 static void nvme_ns_class_init(ObjectClass
*oc
, void *data
)
833 DeviceClass
*dc
= DEVICE_CLASS(oc
);
835 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
837 dc
->bus_type
= TYPE_NVME_BUS
;
838 dc
->realize
= nvme_ns_realize
;
839 dc
->unrealize
= nvme_ns_unrealize
;
840 device_class_set_props(dc
, nvme_ns_props
);
841 dc
->desc
= "Virtual NVMe namespace";
844 static void nvme_ns_instance_init(Object
*obj
)
846 NvmeNamespace
*ns
= NVME_NS(obj
);
847 char *bootindex
= g_strdup_printf("/namespace@%d,0", ns
->params
.nsid
);
849 device_add_bootindex_property(obj
, &ns
->bootindex
, "bootindex",
850 bootindex
, DEVICE(obj
));
855 static const TypeInfo nvme_ns_info
= {
856 .name
= TYPE_NVME_NS
,
857 .parent
= TYPE_DEVICE
,
858 .class_init
= nvme_ns_class_init
,
859 .instance_size
= sizeof(NvmeNamespace
),
860 .instance_init
= nvme_ns_instance_init
,
863 static void nvme_ns_register_types(void)
865 type_register_static(&nvme_ns_info
);
868 type_init(nvme_ns_register_types
)