2 * QEMU NVM Express End-to-End Data Protection support
4 * Copyright (c) 2021 Samsung Electronics Co., Ltd.
7 * Klaus Jensen <k.jensen@samsung.com>
8 * Gollu Appalanaidu <anaidu.gollu@samsung.com>
11 #include "qemu/osdep.h"
12 #include "qapi/error.h"
13 #include "sysemu/block-backend.h"
19 uint16_t nvme_check_prinfo(NvmeNamespace
*ns
, uint8_t prinfo
, uint64_t slba
,
22 uint64_t mask
= ns
->pif
? 0xffffffffffff : 0xffffffff;
24 if ((NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
) == NVME_ID_NS_DPS_TYPE_1
) &&
25 (prinfo
& NVME_PRINFO_PRCHK_REF
) && (slba
& mask
) != reftag
) {
26 return NVME_INVALID_PROT_INFO
| NVME_DNR
;
29 if ((NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
) == NVME_ID_NS_DPS_TYPE_3
) &&
30 (prinfo
& NVME_PRINFO_PRCHK_REF
)) {
31 return NVME_INVALID_PROT_INFO
;
37 /* from Linux kernel (crypto/crct10dif_common.c) */
38 static uint16_t crc16_t10dif(uint16_t crc
, const unsigned char *buffer
,
43 for (i
= 0; i
< len
; i
++) {
44 crc
= (crc
<< 8) ^ crc16_t10dif_table
[((crc
>> 8) ^ buffer
[i
]) & 0xff];
50 /* from Linux kernel (lib/crc64.c) */
51 static uint64_t crc64_nvme(uint64_t crc
, const unsigned char *buffer
,
56 for (i
= 0; i
< len
; i
++) {
57 crc
= (crc
>> 8) ^ crc64_nvme_table
[(crc
& 0xff) ^ buffer
[i
]];
60 return crc
^ (uint64_t)~0;
63 static void nvme_dif_pract_generate_dif_crc16(NvmeNamespace
*ns
, uint8_t *buf
,
64 size_t len
, uint8_t *mbuf
,
65 size_t mlen
, uint16_t apptag
,
68 uint8_t *end
= buf
+ len
;
71 if (!(ns
->id_ns
.dps
& NVME_ID_NS_DPS_FIRST_EIGHT
)) {
72 pil
= ns
->lbaf
.ms
- nvme_pi_tuple_size(ns
);
75 trace_pci_nvme_dif_pract_generate_dif_crc16(len
, ns
->lbasz
,
76 ns
->lbasz
+ pil
, apptag
,
79 for (; buf
< end
; buf
+= ns
->lbasz
, mbuf
+= ns
->lbaf
.ms
) {
80 NvmeDifTuple
*dif
= (NvmeDifTuple
*)(mbuf
+ pil
);
81 uint16_t crc
= crc16_t10dif(0x0, buf
, ns
->lbasz
);
84 crc
= crc16_t10dif(crc
, mbuf
, pil
);
87 dif
->g16
.guard
= cpu_to_be16(crc
);
88 dif
->g16
.apptag
= cpu_to_be16(apptag
);
89 dif
->g16
.reftag
= cpu_to_be32(*reftag
);
91 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
) != NVME_ID_NS_DPS_TYPE_3
) {
97 static void nvme_dif_pract_generate_dif_crc64(NvmeNamespace
*ns
, uint8_t *buf
,
98 size_t len
, uint8_t *mbuf
,
99 size_t mlen
, uint16_t apptag
,
102 uint8_t *end
= buf
+ len
;
105 if (!(ns
->id_ns
.dps
& NVME_ID_NS_DPS_FIRST_EIGHT
)) {
106 pil
= ns
->lbaf
.ms
- 16;
109 trace_pci_nvme_dif_pract_generate_dif_crc64(len
, ns
->lbasz
,
110 ns
->lbasz
+ pil
, apptag
,
113 for (; buf
< end
; buf
+= ns
->lbasz
, mbuf
+= ns
->lbaf
.ms
) {
114 NvmeDifTuple
*dif
= (NvmeDifTuple
*)(mbuf
+ pil
);
115 uint64_t crc
= crc64_nvme(~0ULL, buf
, ns
->lbasz
);
118 crc
= crc64_nvme(crc
, mbuf
, pil
);
121 dif
->g64
.guard
= cpu_to_be64(crc
);
122 dif
->g64
.apptag
= cpu_to_be16(apptag
);
124 dif
->g64
.sr
[0] = *reftag
>> 40;
125 dif
->g64
.sr
[1] = *reftag
>> 32;
126 dif
->g64
.sr
[2] = *reftag
>> 24;
127 dif
->g64
.sr
[3] = *reftag
>> 16;
128 dif
->g64
.sr
[4] = *reftag
>> 8;
129 dif
->g64
.sr
[5] = *reftag
;
131 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
) != NVME_ID_NS_DPS_TYPE_3
) {
137 void nvme_dif_pract_generate_dif(NvmeNamespace
*ns
, uint8_t *buf
, size_t len
,
138 uint8_t *mbuf
, size_t mlen
, uint16_t apptag
,
142 case NVME_PI_GUARD_16
:
143 return nvme_dif_pract_generate_dif_crc16(ns
, buf
, len
, mbuf
, mlen
,
145 case NVME_PI_GUARD_64
:
146 return nvme_dif_pract_generate_dif_crc64(ns
, buf
, len
, mbuf
, mlen
,
153 static uint16_t nvme_dif_prchk_crc16(NvmeNamespace
*ns
, NvmeDifTuple
*dif
,
154 uint8_t *buf
, uint8_t *mbuf
, size_t pil
,
155 uint8_t prinfo
, uint16_t apptag
,
156 uint16_t appmask
, uint64_t reftag
)
158 switch (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
159 case NVME_ID_NS_DPS_TYPE_3
:
160 if (be32_to_cpu(dif
->g16
.reftag
) != 0xffffffff) {
165 case NVME_ID_NS_DPS_TYPE_1
:
166 case NVME_ID_NS_DPS_TYPE_2
:
167 if (be16_to_cpu(dif
->g16
.apptag
) != 0xffff) {
171 trace_pci_nvme_dif_prchk_disabled_crc16(be16_to_cpu(dif
->g16
.apptag
),
172 be32_to_cpu(dif
->g16
.reftag
));
177 if (prinfo
& NVME_PRINFO_PRCHK_GUARD
) {
178 uint16_t crc
= crc16_t10dif(0x0, buf
, ns
->lbasz
);
181 crc
= crc16_t10dif(crc
, mbuf
, pil
);
184 trace_pci_nvme_dif_prchk_guard_crc16(be16_to_cpu(dif
->g16
.guard
), crc
);
186 if (be16_to_cpu(dif
->g16
.guard
) != crc
) {
187 return NVME_E2E_GUARD_ERROR
;
191 if (prinfo
& NVME_PRINFO_PRCHK_APP
) {
192 trace_pci_nvme_dif_prchk_apptag(be16_to_cpu(dif
->g16
.apptag
), apptag
,
195 if ((be16_to_cpu(dif
->g16
.apptag
) & appmask
) != (apptag
& appmask
)) {
196 return NVME_E2E_APP_ERROR
;
200 if (prinfo
& NVME_PRINFO_PRCHK_REF
) {
201 trace_pci_nvme_dif_prchk_reftag_crc16(be32_to_cpu(dif
->g16
.reftag
),
204 if (be32_to_cpu(dif
->g16
.reftag
) != reftag
) {
205 return NVME_E2E_REF_ERROR
;
212 static uint16_t nvme_dif_prchk_crc64(NvmeNamespace
*ns
, NvmeDifTuple
*dif
,
213 uint8_t *buf
, uint8_t *mbuf
, size_t pil
,
214 uint8_t prinfo
, uint16_t apptag
,
215 uint16_t appmask
, uint64_t reftag
)
219 r
|= (uint64_t)dif
->g64
.sr
[0] << 40;
220 r
|= (uint64_t)dif
->g64
.sr
[1] << 32;
221 r
|= (uint64_t)dif
->g64
.sr
[2] << 24;
222 r
|= (uint64_t)dif
->g64
.sr
[3] << 16;
223 r
|= (uint64_t)dif
->g64
.sr
[4] << 8;
224 r
|= (uint64_t)dif
->g64
.sr
[5];
226 switch (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
227 case NVME_ID_NS_DPS_TYPE_3
:
228 if (r
!= 0xffffffffffff) {
233 case NVME_ID_NS_DPS_TYPE_1
:
234 case NVME_ID_NS_DPS_TYPE_2
:
235 if (be16_to_cpu(dif
->g64
.apptag
) != 0xffff) {
239 trace_pci_nvme_dif_prchk_disabled_crc64(be16_to_cpu(dif
->g16
.apptag
),
245 if (prinfo
& NVME_PRINFO_PRCHK_GUARD
) {
246 uint64_t crc
= crc64_nvme(~0ULL, buf
, ns
->lbasz
);
249 crc
= crc64_nvme(crc
, mbuf
, pil
);
252 trace_pci_nvme_dif_prchk_guard_crc64(be64_to_cpu(dif
->g64
.guard
), crc
);
254 if (be64_to_cpu(dif
->g64
.guard
) != crc
) {
255 return NVME_E2E_GUARD_ERROR
;
259 if (prinfo
& NVME_PRINFO_PRCHK_APP
) {
260 trace_pci_nvme_dif_prchk_apptag(be16_to_cpu(dif
->g64
.apptag
), apptag
,
263 if ((be16_to_cpu(dif
->g64
.apptag
) & appmask
) != (apptag
& appmask
)) {
264 return NVME_E2E_APP_ERROR
;
268 if (prinfo
& NVME_PRINFO_PRCHK_REF
) {
269 trace_pci_nvme_dif_prchk_reftag_crc64(r
, reftag
);
272 return NVME_E2E_REF_ERROR
;
279 static uint16_t nvme_dif_prchk(NvmeNamespace
*ns
, NvmeDifTuple
*dif
,
280 uint8_t *buf
, uint8_t *mbuf
, size_t pil
,
281 uint8_t prinfo
, uint16_t apptag
,
282 uint16_t appmask
, uint64_t reftag
)
285 case NVME_PI_GUARD_16
:
286 return nvme_dif_prchk_crc16(ns
, dif
, buf
, mbuf
, pil
, prinfo
, apptag
,
288 case NVME_PI_GUARD_64
:
289 return nvme_dif_prchk_crc64(ns
, dif
, buf
, mbuf
, pil
, prinfo
, apptag
,
296 uint16_t nvme_dif_check(NvmeNamespace
*ns
, uint8_t *buf
, size_t len
,
297 uint8_t *mbuf
, size_t mlen
, uint8_t prinfo
,
298 uint64_t slba
, uint16_t apptag
,
299 uint16_t appmask
, uint64_t *reftag
)
301 uint8_t *bufp
, *end
= buf
+ len
;
305 status
= nvme_check_prinfo(ns
, prinfo
, slba
, *reftag
);
310 if (!(ns
->id_ns
.dps
& NVME_ID_NS_DPS_FIRST_EIGHT
)) {
311 pil
= ns
->lbaf
.ms
- nvme_pi_tuple_size(ns
);
314 trace_pci_nvme_dif_check(prinfo
, ns
->lbasz
+ pil
);
316 for (bufp
= buf
; bufp
< end
; bufp
+= ns
->lbasz
, mbuf
+= ns
->lbaf
.ms
) {
317 NvmeDifTuple
*dif
= (NvmeDifTuple
*)(mbuf
+ pil
);
318 status
= nvme_dif_prchk(ns
, dif
, bufp
, mbuf
, pil
, prinfo
, apptag
,
322 * The first block of a 'raw' image is always allocated, so we
323 * cannot reliably know if the block is all zeroes or not. For
324 * CRC16 this works fine because the T10 CRC16 is 0x0 for all
325 * zeroes, but the Rocksoft CRC64 is not. Thus, if a guard error is
326 * detected for the first block, check if it is zeroed and manually
327 * set the protection information to all ones to disable protection
328 * information checking.
330 if (status
== NVME_E2E_GUARD_ERROR
&& slba
== 0x0 && bufp
== buf
) {
331 g_autofree
uint8_t *zeroes
= g_malloc0(ns
->lbasz
);
333 if (memcmp(bufp
, zeroes
, ns
->lbasz
) == 0) {
334 memset(mbuf
+ pil
, 0xff, nvme_pi_tuple_size(ns
));
341 if (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
) != NVME_ID_NS_DPS_TYPE_3
) {
349 uint16_t nvme_dif_mangle_mdata(NvmeNamespace
*ns
, uint8_t *mbuf
, size_t mlen
,
352 BlockBackend
*blk
= ns
->blkconf
.blk
;
353 BlockDriverState
*bs
= blk_bs(blk
);
355 int64_t moffset
= 0, offset
= nvme_l2b(ns
, slba
);
356 uint8_t *mbufp
, *end
;
359 int64_t bytes
= (mlen
/ ns
->lbaf
.ms
) << ns
->lbaf
.ds
;
365 if (!(ns
->id_ns
.dps
& NVME_ID_NS_DPS_FIRST_EIGHT
)) {
366 pil
= ns
->lbaf
.ms
- nvme_pi_tuple_size(ns
);
374 ret
= bdrv_block_status(bs
, offset
, bytes
, &pnum
, NULL
, NULL
);
376 error_setg_errno(&err
, -ret
, "unable to get block status");
377 error_report_err(err
);
379 return NVME_INTERNAL_DEV_ERROR
;
382 zeroed
= !!(ret
& BDRV_BLOCK_ZERO
);
384 trace_pci_nvme_block_status(offset
, bytes
, pnum
, ret
, zeroed
);
387 mbufp
= mbuf
+ moffset
;
388 mlen
= (pnum
>> ns
->lbaf
.ds
) * ns
->lbaf
.ms
;
391 for (; mbufp
< end
; mbufp
+= ns
->lbaf
.ms
) {
392 memset(mbufp
+ pil
, 0xff, nvme_pi_tuple_size(ns
));
396 moffset
+= (pnum
>> ns
->lbaf
.ds
) * ns
->lbaf
.ms
;
398 } while (pnum
!= bytes
);
403 static void nvme_dif_rw_cb(void *opaque
, int ret
)
405 NvmeBounceContext
*ctx
= opaque
;
406 NvmeRequest
*req
= ctx
->req
;
407 NvmeNamespace
*ns
= req
->ns
;
408 BlockBackend
*blk
= ns
->blkconf
.blk
;
410 trace_pci_nvme_dif_rw_cb(nvme_cid(req
), blk_name(blk
));
412 qemu_iovec_destroy(&ctx
->data
.iov
);
413 g_free(ctx
->data
.bounce
);
415 qemu_iovec_destroy(&ctx
->mdata
.iov
);
416 g_free(ctx
->mdata
.bounce
);
420 nvme_rw_complete_cb(req
, ret
);
423 static void nvme_dif_rw_check_cb(void *opaque
, int ret
)
425 NvmeBounceContext
*ctx
= opaque
;
426 NvmeRequest
*req
= ctx
->req
;
427 NvmeNamespace
*ns
= req
->ns
;
428 NvmeCtrl
*n
= nvme_ctrl(req
);
429 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
430 uint64_t slba
= le64_to_cpu(rw
->slba
);
431 uint8_t prinfo
= NVME_RW_PRINFO(le16_to_cpu(rw
->control
));
432 uint16_t apptag
= le16_to_cpu(rw
->apptag
);
433 uint16_t appmask
= le16_to_cpu(rw
->appmask
);
434 uint64_t reftag
= le32_to_cpu(rw
->reftag
);
435 uint64_t cdw3
= le32_to_cpu(rw
->cdw3
);
438 reftag
|= cdw3
<< 32;
440 trace_pci_nvme_dif_rw_check_cb(nvme_cid(req
), prinfo
, apptag
, appmask
,
447 status
= nvme_dif_mangle_mdata(ns
, ctx
->mdata
.bounce
, ctx
->mdata
.iov
.size
,
450 req
->status
= status
;
454 status
= nvme_dif_check(ns
, ctx
->data
.bounce
, ctx
->data
.iov
.size
,
455 ctx
->mdata
.bounce
, ctx
->mdata
.iov
.size
, prinfo
,
456 slba
, apptag
, appmask
, &reftag
);
458 req
->status
= status
;
462 status
= nvme_bounce_data(n
, ctx
->data
.bounce
, ctx
->data
.iov
.size
,
463 NVME_TX_DIRECTION_FROM_DEVICE
, req
);
465 req
->status
= status
;
469 if (prinfo
& NVME_PRINFO_PRACT
&& ns
->lbaf
.ms
== nvme_pi_tuple_size(ns
)) {
473 status
= nvme_bounce_mdata(n
, ctx
->mdata
.bounce
, ctx
->mdata
.iov
.size
,
474 NVME_TX_DIRECTION_FROM_DEVICE
, req
);
476 req
->status
= status
;
480 nvme_dif_rw_cb(ctx
, ret
);
483 static void nvme_dif_rw_mdata_in_cb(void *opaque
, int ret
)
485 NvmeBounceContext
*ctx
= opaque
;
486 NvmeRequest
*req
= ctx
->req
;
487 NvmeNamespace
*ns
= req
->ns
;
488 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
489 uint64_t slba
= le64_to_cpu(rw
->slba
);
490 uint32_t nlb
= le16_to_cpu(rw
->nlb
) + 1;
491 size_t mlen
= nvme_m2b(ns
, nlb
);
492 uint64_t offset
= nvme_moff(ns
, slba
);
493 BlockBackend
*blk
= ns
->blkconf
.blk
;
495 trace_pci_nvme_dif_rw_mdata_in_cb(nvme_cid(req
), blk_name(blk
));
501 ctx
->mdata
.bounce
= g_malloc(mlen
);
503 qemu_iovec_reset(&ctx
->mdata
.iov
);
504 qemu_iovec_add(&ctx
->mdata
.iov
, ctx
->mdata
.bounce
, mlen
);
506 req
->aiocb
= blk_aio_preadv(blk
, offset
, &ctx
->mdata
.iov
, 0,
507 nvme_dif_rw_check_cb
, ctx
);
511 nvme_dif_rw_cb(ctx
, ret
);
514 static void nvme_dif_rw_mdata_out_cb(void *opaque
, int ret
)
516 NvmeBounceContext
*ctx
= opaque
;
517 NvmeRequest
*req
= ctx
->req
;
518 NvmeNamespace
*ns
= req
->ns
;
519 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
520 uint64_t slba
= le64_to_cpu(rw
->slba
);
521 uint64_t offset
= nvme_moff(ns
, slba
);
522 BlockBackend
*blk
= ns
->blkconf
.blk
;
524 trace_pci_nvme_dif_rw_mdata_out_cb(nvme_cid(req
), blk_name(blk
));
530 req
->aiocb
= blk_aio_pwritev(blk
, offset
, &ctx
->mdata
.iov
, 0,
531 nvme_dif_rw_cb
, ctx
);
535 nvme_dif_rw_cb(ctx
, ret
);
538 uint16_t nvme_dif_rw(NvmeCtrl
*n
, NvmeRequest
*req
)
540 NvmeRwCmd
*rw
= (NvmeRwCmd
*)&req
->cmd
;
541 NvmeNamespace
*ns
= req
->ns
;
542 BlockBackend
*blk
= ns
->blkconf
.blk
;
543 bool wrz
= rw
->opcode
== NVME_CMD_WRITE_ZEROES
;
544 uint32_t nlb
= le16_to_cpu(rw
->nlb
) + 1;
545 uint64_t slba
= le64_to_cpu(rw
->slba
);
546 size_t len
= nvme_l2b(ns
, nlb
);
547 size_t mlen
= nvme_m2b(ns
, nlb
);
548 size_t mapped_len
= len
;
549 int64_t offset
= nvme_l2b(ns
, slba
);
550 uint8_t prinfo
= NVME_RW_PRINFO(le16_to_cpu(rw
->control
));
551 uint16_t apptag
= le16_to_cpu(rw
->apptag
);
552 uint16_t appmask
= le16_to_cpu(rw
->appmask
);
553 uint64_t reftag
= le32_to_cpu(rw
->reftag
);
554 uint64_t cdw3
= le32_to_cpu(rw
->cdw3
);
555 bool pract
= !!(prinfo
& NVME_PRINFO_PRACT
);
556 NvmeBounceContext
*ctx
;
559 reftag
|= cdw3
<< 32;
561 trace_pci_nvme_dif_rw(pract
, prinfo
);
563 ctx
= g_new0(NvmeBounceContext
, 1);
567 BdrvRequestFlags flags
= BDRV_REQ_MAY_UNMAP
;
569 if (prinfo
& NVME_PRINFO_PRCHK_MASK
) {
570 status
= NVME_INVALID_PROT_INFO
| NVME_DNR
;
576 int16_t pil
= ns
->lbaf
.ms
- nvme_pi_tuple_size(ns
);
578 status
= nvme_check_prinfo(ns
, prinfo
, slba
, reftag
);
585 ctx
->mdata
.bounce
= g_malloc0(mlen
);
587 qemu_iovec_init(&ctx
->mdata
.iov
, 1);
588 qemu_iovec_add(&ctx
->mdata
.iov
, ctx
->mdata
.bounce
, mlen
);
590 mbuf
= ctx
->mdata
.bounce
;
593 if (ns
->id_ns
.dps
& NVME_ID_NS_DPS_FIRST_EIGHT
) {
597 for (; mbuf
< end
; mbuf
+= ns
->lbaf
.ms
) {
598 NvmeDifTuple
*dif
= (NvmeDifTuple
*)(mbuf
+ pil
);
601 case NVME_PI_GUARD_16
:
602 dif
->g16
.apptag
= cpu_to_be16(apptag
);
603 dif
->g16
.reftag
= cpu_to_be32(reftag
);
607 case NVME_PI_GUARD_64
:
608 dif
->g64
.guard
= cpu_to_be64(0x6482d367eb22b64e);
609 dif
->g64
.apptag
= cpu_to_be16(apptag
);
611 dif
->g64
.sr
[0] = reftag
>> 40;
612 dif
->g64
.sr
[1] = reftag
>> 32;
613 dif
->g64
.sr
[2] = reftag
>> 24;
614 dif
->g64
.sr
[3] = reftag
>> 16;
615 dif
->g64
.sr
[4] = reftag
>> 8;
616 dif
->g64
.sr
[5] = reftag
;
624 switch (NVME_ID_NS_DPS_TYPE(ns
->id_ns
.dps
)) {
625 case NVME_ID_NS_DPS_TYPE_1
:
626 case NVME_ID_NS_DPS_TYPE_2
:
632 req
->aiocb
= blk_aio_pwrite_zeroes(blk
, offset
, len
, flags
,
633 nvme_dif_rw_mdata_out_cb
, ctx
);
634 return NVME_NO_COMPLETE
;
637 if (nvme_ns_ext(ns
) && !(pract
&& ns
->lbaf
.ms
== nvme_pi_tuple_size(ns
))) {
641 status
= nvme_map_dptr(n
, &req
->sg
, mapped_len
, &req
->cmd
);
646 ctx
->data
.bounce
= g_malloc(len
);
648 qemu_iovec_init(&ctx
->data
.iov
, 1);
649 qemu_iovec_add(&ctx
->data
.iov
, ctx
->data
.bounce
, len
);
651 if (req
->cmd
.opcode
== NVME_CMD_READ
) {
652 block_acct_start(blk_get_stats(blk
), &req
->acct
, ctx
->data
.iov
.size
,
655 req
->aiocb
= blk_aio_preadv(ns
->blkconf
.blk
, offset
, &ctx
->data
.iov
, 0,
656 nvme_dif_rw_mdata_in_cb
, ctx
);
657 return NVME_NO_COMPLETE
;
660 status
= nvme_bounce_data(n
, ctx
->data
.bounce
, ctx
->data
.iov
.size
,
661 NVME_TX_DIRECTION_TO_DEVICE
, req
);
666 ctx
->mdata
.bounce
= g_malloc(mlen
);
668 qemu_iovec_init(&ctx
->mdata
.iov
, 1);
669 qemu_iovec_add(&ctx
->mdata
.iov
, ctx
->mdata
.bounce
, mlen
);
671 if (!(pract
&& ns
->lbaf
.ms
== nvme_pi_tuple_size(ns
))) {
672 status
= nvme_bounce_mdata(n
, ctx
->mdata
.bounce
, ctx
->mdata
.iov
.size
,
673 NVME_TX_DIRECTION_TO_DEVICE
, req
);
679 status
= nvme_check_prinfo(ns
, prinfo
, slba
, reftag
);
685 /* splice generated protection information into the buffer */
686 nvme_dif_pract_generate_dif(ns
, ctx
->data
.bounce
, ctx
->data
.iov
.size
,
687 ctx
->mdata
.bounce
, ctx
->mdata
.iov
.size
,
690 status
= nvme_dif_check(ns
, ctx
->data
.bounce
, ctx
->data
.iov
.size
,
691 ctx
->mdata
.bounce
, ctx
->mdata
.iov
.size
, prinfo
,
692 slba
, apptag
, appmask
, &reftag
);
698 block_acct_start(blk_get_stats(blk
), &req
->acct
, ctx
->data
.iov
.size
,
701 req
->aiocb
= blk_aio_pwritev(ns
->blkconf
.blk
, offset
, &ctx
->data
.iov
, 0,
702 nvme_dif_rw_mdata_out_cb
, ctx
);
704 return NVME_NO_COMPLETE
;
707 qemu_iovec_destroy(&ctx
->data
.iov
);
708 g_free(ctx
->data
.bounce
);
710 qemu_iovec_destroy(&ctx
->mdata
.iov
);
711 g_free(ctx
->mdata
.bounce
);