4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/defer-call.h"
16 #include "qapi/error.h"
18 #include "qemu/module.h"
19 #include "qemu/error-report.h"
20 #include "qemu/main-loop.h"
21 #include "block/block_int.h"
23 #include "hw/block/block.h"
24 #include "hw/qdev-properties.h"
25 #include "sysemu/blockdev.h"
26 #include "sysemu/block-ram-registrar.h"
27 #include "sysemu/sysemu.h"
28 #include "sysemu/runstate.h"
29 #include "hw/virtio/virtio-blk.h"
30 #include "scsi/constants.h"
34 #include "hw/virtio/virtio-bus.h"
35 #include "migration/qemu-file-types.h"
36 #include "hw/virtio/virtio-access.h"
37 #include "hw/virtio/virtio-blk-common.h"
38 #include "qemu/coroutine.h"
40 static void virtio_blk_ioeventfd_attach(VirtIOBlock
*s
);
42 static void virtio_blk_init_request(VirtIOBlock
*s
, VirtQueue
*vq
,
53 static void virtio_blk_free_request(VirtIOBlockReq
*req
)
58 static void virtio_blk_req_complete(VirtIOBlockReq
*req
, unsigned char status
)
60 VirtIOBlock
*s
= req
->dev
;
61 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
63 trace_virtio_blk_req_complete(vdev
, req
, status
);
65 stb_p(&req
->in
->status
, status
);
66 iov_discard_undo(&req
->inhdr_undo
);
67 iov_discard_undo(&req
->outhdr_undo
);
68 virtqueue_push(req
->vq
, &req
->elem
, req
->in_len
);
69 if (qemu_in_iothread()) {
70 virtio_notify_irqfd(vdev
, req
->vq
);
72 virtio_notify(vdev
, req
->vq
);
76 static int virtio_blk_handle_rw_error(VirtIOBlockReq
*req
, int error
,
77 bool is_read
, bool acct_failed
)
79 VirtIOBlock
*s
= req
->dev
;
80 BlockErrorAction action
= blk_get_error_action(s
->blk
, is_read
, error
);
82 if (action
== BLOCK_ERROR_ACTION_STOP
) {
83 /* Break the link as the next request is going to be parsed from the
84 * ring again. Otherwise we may end up doing a double completion! */
87 WITH_QEMU_LOCK_GUARD(&s
->rq_lock
) {
91 } else if (action
== BLOCK_ERROR_ACTION_REPORT
) {
92 virtio_blk_req_complete(req
, VIRTIO_BLK_S_IOERR
);
94 block_acct_failed(blk_get_stats(s
->blk
), &req
->acct
);
96 virtio_blk_free_request(req
);
99 blk_error_action(s
->blk
, action
, is_read
, error
);
100 return action
!= BLOCK_ERROR_ACTION_IGNORE
;
103 static void virtio_blk_rw_complete(void *opaque
, int ret
)
105 VirtIOBlockReq
*next
= opaque
;
106 VirtIOBlock
*s
= next
->dev
;
107 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
110 VirtIOBlockReq
*req
= next
;
112 trace_virtio_blk_rw_complete(vdev
, req
, ret
);
114 if (req
->qiov
.nalloc
!= -1) {
115 /* If nalloc is != -1 req->qiov is a local copy of the original
116 * external iovec. It was allocated in submit_requests to be
117 * able to merge requests. */
118 qemu_iovec_destroy(&req
->qiov
);
122 int p
= virtio_ldl_p(VIRTIO_DEVICE(s
), &req
->out
.type
);
123 bool is_read
= !(p
& VIRTIO_BLK_T_OUT
);
124 /* Note that memory may be dirtied on read failure. If the
125 * virtio request is not completed here, as is the case for
126 * BLOCK_ERROR_ACTION_STOP, the memory may not be copied
127 * correctly during live migration. While this is ugly,
128 * it is acceptable because the device is free to write to
129 * the memory until the request is completed (which will
130 * happen on the other side of the migration).
132 if (virtio_blk_handle_rw_error(req
, -ret
, is_read
, true)) {
137 virtio_blk_req_complete(req
, VIRTIO_BLK_S_OK
);
138 block_acct_done(blk_get_stats(s
->blk
), &req
->acct
);
139 virtio_blk_free_request(req
);
143 static void virtio_blk_flush_complete(void *opaque
, int ret
)
145 VirtIOBlockReq
*req
= opaque
;
146 VirtIOBlock
*s
= req
->dev
;
148 if (ret
&& virtio_blk_handle_rw_error(req
, -ret
, 0, true)) {
152 virtio_blk_req_complete(req
, VIRTIO_BLK_S_OK
);
153 block_acct_done(blk_get_stats(s
->blk
), &req
->acct
);
154 virtio_blk_free_request(req
);
157 static void virtio_blk_discard_write_zeroes_complete(void *opaque
, int ret
)
159 VirtIOBlockReq
*req
= opaque
;
160 VirtIOBlock
*s
= req
->dev
;
161 bool is_write_zeroes
= (virtio_ldl_p(VIRTIO_DEVICE(s
), &req
->out
.type
) &
162 ~VIRTIO_BLK_T_BARRIER
) == VIRTIO_BLK_T_WRITE_ZEROES
;
164 if (ret
&& virtio_blk_handle_rw_error(req
, -ret
, false, is_write_zeroes
)) {
168 virtio_blk_req_complete(req
, VIRTIO_BLK_S_OK
);
169 if (is_write_zeroes
) {
170 block_acct_done(blk_get_stats(s
->blk
), &req
->acct
);
172 virtio_blk_free_request(req
);
179 struct sg_io_hdr hdr
;
180 } VirtIOBlockIoctlReq
;
182 static void virtio_blk_ioctl_complete(void *opaque
, int status
)
184 VirtIOBlockIoctlReq
*ioctl_req
= opaque
;
185 VirtIOBlockReq
*req
= ioctl_req
->req
;
186 VirtIOBlock
*s
= req
->dev
;
187 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
188 struct virtio_scsi_inhdr
*scsi
;
189 struct sg_io_hdr
*hdr
;
191 scsi
= (void *)req
->elem
.in_sg
[req
->elem
.in_num
- 2].iov_base
;
194 status
= VIRTIO_BLK_S_UNSUPP
;
195 virtio_stl_p(vdev
, &scsi
->errors
, 255);
199 hdr
= &ioctl_req
->hdr
;
201 * From SCSI-Generic-HOWTO: "Some lower level drivers (e.g. ide-scsi)
202 * clear the masked_status field [hence status gets cleared too, see
203 * block/scsi_ioctl.c] even when a CHECK_CONDITION or COMMAND_TERMINATED
204 * status has occurred. However they do set DRIVER_SENSE in driver_status
205 * field. Also a (sb_len_wr > 0) indicates there is a sense buffer.
207 if (hdr
->status
== 0 && hdr
->sb_len_wr
> 0) {
208 hdr
->status
= CHECK_CONDITION
;
211 virtio_stl_p(vdev
, &scsi
->errors
,
212 hdr
->status
| (hdr
->msg_status
<< 8) |
213 (hdr
->host_status
<< 16) | (hdr
->driver_status
<< 24));
214 virtio_stl_p(vdev
, &scsi
->residual
, hdr
->resid
);
215 virtio_stl_p(vdev
, &scsi
->sense_len
, hdr
->sb_len_wr
);
216 virtio_stl_p(vdev
, &scsi
->data_len
, hdr
->dxfer_len
);
219 virtio_blk_req_complete(req
, status
);
220 virtio_blk_free_request(req
);
226 static VirtIOBlockReq
*virtio_blk_get_request(VirtIOBlock
*s
, VirtQueue
*vq
)
228 VirtIOBlockReq
*req
= virtqueue_pop(vq
, sizeof(VirtIOBlockReq
));
231 virtio_blk_init_request(s
, vq
, req
);
236 static int virtio_blk_handle_scsi_req(VirtIOBlockReq
*req
)
238 int status
= VIRTIO_BLK_S_OK
;
239 struct virtio_scsi_inhdr
*scsi
= NULL
;
240 VirtIOBlock
*blk
= req
->dev
;
241 VirtIODevice
*vdev
= VIRTIO_DEVICE(blk
);
242 VirtQueueElement
*elem
= &req
->elem
;
246 VirtIOBlockIoctlReq
*ioctl_req
;
251 * We require at least one output segment each for the virtio_blk_outhdr
252 * and the SCSI command block.
254 * We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr
255 * and the sense buffer pointer in the input segments.
257 if (elem
->out_num
< 2 || elem
->in_num
< 3) {
258 status
= VIRTIO_BLK_S_IOERR
;
263 * The scsi inhdr is placed in the second-to-last input segment, just
264 * before the regular inhdr.
266 scsi
= (void *)elem
->in_sg
[elem
->in_num
- 2].iov_base
;
268 if (!virtio_has_feature(blk
->host_features
, VIRTIO_BLK_F_SCSI
)) {
269 status
= VIRTIO_BLK_S_UNSUPP
;
274 * No support for bidirection commands yet.
276 if (elem
->out_num
> 2 && elem
->in_num
> 3) {
277 status
= VIRTIO_BLK_S_UNSUPP
;
282 ioctl_req
= g_new0(VirtIOBlockIoctlReq
, 1);
283 ioctl_req
->req
= req
;
284 ioctl_req
->hdr
.interface_id
= 'S';
285 ioctl_req
->hdr
.cmd_len
= elem
->out_sg
[1].iov_len
;
286 ioctl_req
->hdr
.cmdp
= elem
->out_sg
[1].iov_base
;
287 ioctl_req
->hdr
.dxfer_len
= 0;
289 if (elem
->out_num
> 2) {
291 * If there are more than the minimally required 2 output segments
292 * there is write payload starting from the third iovec.
294 ioctl_req
->hdr
.dxfer_direction
= SG_DXFER_TO_DEV
;
295 ioctl_req
->hdr
.iovec_count
= elem
->out_num
- 2;
297 for (i
= 0; i
< ioctl_req
->hdr
.iovec_count
; i
++) {
298 ioctl_req
->hdr
.dxfer_len
+= elem
->out_sg
[i
+ 2].iov_len
;
301 ioctl_req
->hdr
.dxferp
= elem
->out_sg
+ 2;
303 } else if (elem
->in_num
> 3) {
305 * If we have more than 3 input segments the guest wants to actually
308 ioctl_req
->hdr
.dxfer_direction
= SG_DXFER_FROM_DEV
;
309 ioctl_req
->hdr
.iovec_count
= elem
->in_num
- 3;
310 for (i
= 0; i
< ioctl_req
->hdr
.iovec_count
; i
++) {
311 ioctl_req
->hdr
.dxfer_len
+= elem
->in_sg
[i
].iov_len
;
314 ioctl_req
->hdr
.dxferp
= elem
->in_sg
;
317 * Some SCSI commands don't actually transfer any data.
319 ioctl_req
->hdr
.dxfer_direction
= SG_DXFER_NONE
;
322 ioctl_req
->hdr
.sbp
= elem
->in_sg
[elem
->in_num
- 3].iov_base
;
323 ioctl_req
->hdr
.mx_sb_len
= elem
->in_sg
[elem
->in_num
- 3].iov_len
;
325 acb
= blk_aio_ioctl(blk
->blk
, SG_IO
, &ioctl_req
->hdr
,
326 virtio_blk_ioctl_complete
, ioctl_req
);
329 status
= VIRTIO_BLK_S_UNSUPP
;
338 /* Just put anything nonzero so that the ioctl fails in the guest. */
340 virtio_stl_p(vdev
, &scsi
->errors
, 255);
345 static void virtio_blk_handle_scsi(VirtIOBlockReq
*req
)
349 status
= virtio_blk_handle_scsi_req(req
);
350 if (status
!= -EINPROGRESS
) {
351 virtio_blk_req_complete(req
, status
);
352 virtio_blk_free_request(req
);
356 static inline void submit_requests(VirtIOBlock
*s
, MultiReqBuffer
*mrb
,
357 int start
, int num_reqs
, int niov
)
359 BlockBackend
*blk
= s
->blk
;
360 QEMUIOVector
*qiov
= &mrb
->reqs
[start
]->qiov
;
361 int64_t sector_num
= mrb
->reqs
[start
]->sector_num
;
362 bool is_write
= mrb
->is_write
;
363 BdrvRequestFlags flags
= 0;
367 struct iovec
*tmp_iov
= qiov
->iov
;
368 int tmp_niov
= qiov
->niov
;
370 /* mrb->reqs[start]->qiov was initialized from external so we can't
371 * modify it here. We need to initialize it locally and then add the
372 * external iovecs. */
373 qemu_iovec_init(qiov
, niov
);
375 for (i
= 0; i
< tmp_niov
; i
++) {
376 qemu_iovec_add(qiov
, tmp_iov
[i
].iov_base
, tmp_iov
[i
].iov_len
);
379 for (i
= start
+ 1; i
< start
+ num_reqs
; i
++) {
380 qemu_iovec_concat(qiov
, &mrb
->reqs
[i
]->qiov
, 0,
381 mrb
->reqs
[i
]->qiov
.size
);
382 mrb
->reqs
[i
- 1]->mr_next
= mrb
->reqs
[i
];
385 trace_virtio_blk_submit_multireq(VIRTIO_DEVICE(mrb
->reqs
[start
]->dev
),
386 mrb
, start
, num_reqs
,
387 sector_num
<< BDRV_SECTOR_BITS
,
388 qiov
->size
, is_write
);
389 block_acct_merge_done(blk_get_stats(blk
),
390 is_write
? BLOCK_ACCT_WRITE
: BLOCK_ACCT_READ
,
394 if (blk_ram_registrar_ok(&s
->blk_ram_registrar
)) {
395 flags
|= BDRV_REQ_REGISTERED_BUF
;
399 blk_aio_pwritev(blk
, sector_num
<< BDRV_SECTOR_BITS
, qiov
,
400 flags
, virtio_blk_rw_complete
,
403 blk_aio_preadv(blk
, sector_num
<< BDRV_SECTOR_BITS
, qiov
,
404 flags
, virtio_blk_rw_complete
,
409 static int multireq_compare(const void *a
, const void *b
)
411 const VirtIOBlockReq
*req1
= *(VirtIOBlockReq
**)a
,
412 *req2
= *(VirtIOBlockReq
**)b
;
415 * Note that we can't simply subtract sector_num1 from sector_num2
416 * here as that could overflow the return value.
418 if (req1
->sector_num
> req2
->sector_num
) {
420 } else if (req1
->sector_num
< req2
->sector_num
) {
427 static void virtio_blk_submit_multireq(VirtIOBlock
*s
, MultiReqBuffer
*mrb
)
429 int i
= 0, start
= 0, num_reqs
= 0, niov
= 0, nb_sectors
= 0;
430 uint32_t max_transfer
;
431 int64_t sector_num
= 0;
433 if (mrb
->num_reqs
== 1) {
434 submit_requests(s
, mrb
, 0, 1, -1);
439 max_transfer
= blk_get_max_transfer(mrb
->reqs
[0]->dev
->blk
);
441 qsort(mrb
->reqs
, mrb
->num_reqs
, sizeof(*mrb
->reqs
),
444 for (i
= 0; i
< mrb
->num_reqs
; i
++) {
445 VirtIOBlockReq
*req
= mrb
->reqs
[i
];
448 * NOTE: We cannot merge the requests in below situations:
449 * 1. requests are not sequential
450 * 2. merge would exceed maximum number of IOVs
451 * 3. merge would exceed maximum transfer length of backend device
453 if (sector_num
+ nb_sectors
!= req
->sector_num
||
454 niov
> blk_get_max_iov(s
->blk
) - req
->qiov
.niov
||
455 req
->qiov
.size
> max_transfer
||
456 nb_sectors
> (max_transfer
-
457 req
->qiov
.size
) / BDRV_SECTOR_SIZE
) {
458 submit_requests(s
, mrb
, start
, num_reqs
, niov
);
464 sector_num
= req
->sector_num
;
465 nb_sectors
= niov
= 0;
469 nb_sectors
+= req
->qiov
.size
/ BDRV_SECTOR_SIZE
;
470 niov
+= req
->qiov
.niov
;
474 submit_requests(s
, mrb
, start
, num_reqs
, niov
);
478 static void virtio_blk_handle_flush(VirtIOBlockReq
*req
, MultiReqBuffer
*mrb
)
480 VirtIOBlock
*s
= req
->dev
;
482 block_acct_start(blk_get_stats(s
->blk
), &req
->acct
, 0,
486 * Make sure all outstanding writes are posted to the backing device.
488 if (mrb
->is_write
&& mrb
->num_reqs
> 0) {
489 virtio_blk_submit_multireq(s
, mrb
);
491 blk_aio_flush(s
->blk
, virtio_blk_flush_complete
, req
);
494 static bool virtio_blk_sect_range_ok(VirtIOBlock
*dev
,
495 uint64_t sector
, size_t size
)
497 uint64_t nb_sectors
= size
>> BDRV_SECTOR_BITS
;
498 uint64_t total_sectors
;
500 if (nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
503 if (sector
& dev
->sector_mask
) {
506 if (size
% dev
->conf
.conf
.logical_block_size
) {
509 blk_get_geometry(dev
->blk
, &total_sectors
);
510 if (sector
> total_sectors
|| nb_sectors
> total_sectors
- sector
) {
516 static uint8_t virtio_blk_handle_discard_write_zeroes(VirtIOBlockReq
*req
,
517 struct virtio_blk_discard_write_zeroes
*dwz_hdr
, bool is_write_zeroes
)
519 VirtIOBlock
*s
= req
->dev
;
520 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
522 uint32_t num_sectors
, flags
, max_sectors
;
526 sector
= virtio_ldq_p(vdev
, &dwz_hdr
->sector
);
527 num_sectors
= virtio_ldl_p(vdev
, &dwz_hdr
->num_sectors
);
528 flags
= virtio_ldl_p(vdev
, &dwz_hdr
->flags
);
529 max_sectors
= is_write_zeroes
? s
->conf
.max_write_zeroes_sectors
:
530 s
->conf
.max_discard_sectors
;
533 * max_sectors is at most BDRV_REQUEST_MAX_SECTORS, this check
534 * make us sure that "num_sectors << BDRV_SECTOR_BITS" can fit in
535 * the integer variable.
537 if (unlikely(num_sectors
> max_sectors
)) {
538 err_status
= VIRTIO_BLK_S_IOERR
;
542 bytes
= num_sectors
<< BDRV_SECTOR_BITS
;
544 if (unlikely(!virtio_blk_sect_range_ok(s
, sector
, bytes
))) {
545 err_status
= VIRTIO_BLK_S_IOERR
;
550 * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for discard
551 * and write zeroes commands if any unknown flag is set.
553 if (unlikely(flags
& ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP
)) {
554 err_status
= VIRTIO_BLK_S_UNSUPP
;
558 if (is_write_zeroes
) { /* VIRTIO_BLK_T_WRITE_ZEROES */
559 int blk_aio_flags
= 0;
561 if (flags
& VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP
) {
562 blk_aio_flags
|= BDRV_REQ_MAY_UNMAP
;
565 block_acct_start(blk_get_stats(s
->blk
), &req
->acct
, bytes
,
568 blk_aio_pwrite_zeroes(s
->blk
, sector
<< BDRV_SECTOR_BITS
,
569 bytes
, blk_aio_flags
,
570 virtio_blk_discard_write_zeroes_complete
, req
);
571 } else { /* VIRTIO_BLK_T_DISCARD */
573 * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for
574 * discard commands if the unmap flag is set.
576 if (unlikely(flags
& VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP
)) {
577 err_status
= VIRTIO_BLK_S_UNSUPP
;
581 blk_aio_pdiscard(s
->blk
, sector
<< BDRV_SECTOR_BITS
, bytes
,
582 virtio_blk_discard_write_zeroes_complete
, req
);
585 return VIRTIO_BLK_S_OK
;
588 if (is_write_zeroes
) {
589 block_acct_invalid(blk_get_stats(s
->blk
), BLOCK_ACCT_WRITE
);
594 typedef struct ZoneCmdData
{
596 struct iovec
*in_iov
;
600 unsigned int nr_zones
;
601 BlockZoneDescriptor
*zones
;
610 * check zoned_request: error checking before issuing requests. If all checks
611 * passed, return true.
612 * append: true if only zone append requests issued.
614 static bool check_zoned_request(VirtIOBlock
*s
, int64_t offset
, int64_t len
,
615 bool append
, uint8_t *status
) {
616 BlockDriverState
*bs
= blk_bs(s
->blk
);
619 if (!virtio_has_feature(s
->host_features
, VIRTIO_BLK_F_ZONED
)) {
620 *status
= VIRTIO_BLK_S_UNSUPP
;
624 if (offset
< 0 || len
< 0 || len
> (bs
->total_sectors
<< BDRV_SECTOR_BITS
)
625 || offset
> (bs
->total_sectors
<< BDRV_SECTOR_BITS
) - len
) {
626 *status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
631 if (bs
->bl
.write_granularity
) {
632 if ((offset
% bs
->bl
.write_granularity
) != 0) {
633 *status
= VIRTIO_BLK_S_ZONE_UNALIGNED_WP
;
638 index
= offset
/ bs
->bl
.zone_size
;
639 if (BDRV_ZT_IS_CONV(bs
->wps
->wp
[index
])) {
640 *status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
644 if (len
/ 512 > bs
->bl
.max_append_sectors
) {
645 if (bs
->bl
.max_append_sectors
== 0) {
646 *status
= VIRTIO_BLK_S_UNSUPP
;
648 *status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
656 static void virtio_blk_zone_report_complete(void *opaque
, int ret
)
658 ZoneCmdData
*data
= opaque
;
659 VirtIOBlockReq
*req
= data
->req
;
660 VirtIODevice
*vdev
= VIRTIO_DEVICE(req
->dev
);
661 struct iovec
*in_iov
= data
->in_iov
;
662 unsigned in_num
= data
->in_num
;
663 int64_t zrp_size
, n
, j
= 0;
664 int64_t nz
= data
->zone_report_data
.nr_zones
;
665 int8_t err_status
= VIRTIO_BLK_S_OK
;
666 struct virtio_blk_zone_report zrp_hdr
= (struct virtio_blk_zone_report
) {
667 .nr_zones
= cpu_to_le64(nz
),
670 trace_virtio_blk_zone_report_complete(vdev
, req
, nz
, ret
);
672 err_status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
676 zrp_size
= sizeof(struct virtio_blk_zone_report
)
677 + sizeof(struct virtio_blk_zone_descriptor
) * nz
;
678 n
= iov_from_buf(in_iov
, in_num
, 0, &zrp_hdr
, sizeof(zrp_hdr
));
679 if (n
!= sizeof(zrp_hdr
)) {
680 virtio_error(vdev
, "Driver provided input buffer that is too small!");
681 err_status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
685 for (size_t i
= sizeof(zrp_hdr
); i
< zrp_size
;
686 i
+= sizeof(struct virtio_blk_zone_descriptor
), ++j
) {
687 struct virtio_blk_zone_descriptor desc
=
688 (struct virtio_blk_zone_descriptor
) {
689 .z_start
= cpu_to_le64(data
->zone_report_data
.zones
[j
].start
690 >> BDRV_SECTOR_BITS
),
691 .z_cap
= cpu_to_le64(data
->zone_report_data
.zones
[j
].cap
692 >> BDRV_SECTOR_BITS
),
693 .z_wp
= cpu_to_le64(data
->zone_report_data
.zones
[j
].wp
694 >> BDRV_SECTOR_BITS
),
697 switch (data
->zone_report_data
.zones
[j
].type
) {
699 desc
.z_type
= VIRTIO_BLK_ZT_CONV
;
702 desc
.z_type
= VIRTIO_BLK_ZT_SWR
;
705 desc
.z_type
= VIRTIO_BLK_ZT_SWP
;
708 g_assert_not_reached();
711 switch (data
->zone_report_data
.zones
[j
].state
) {
713 desc
.z_state
= VIRTIO_BLK_ZS_RDONLY
;
716 desc
.z_state
= VIRTIO_BLK_ZS_OFFLINE
;
719 desc
.z_state
= VIRTIO_BLK_ZS_EMPTY
;
722 desc
.z_state
= VIRTIO_BLK_ZS_CLOSED
;
725 desc
.z_state
= VIRTIO_BLK_ZS_FULL
;
728 desc
.z_state
= VIRTIO_BLK_ZS_EOPEN
;
731 desc
.z_state
= VIRTIO_BLK_ZS_IOPEN
;
734 desc
.z_state
= VIRTIO_BLK_ZS_NOT_WP
;
737 g_assert_not_reached();
740 /* TODO: it takes O(n^2) time complexity. Optimizations required. */
741 n
= iov_from_buf(in_iov
, in_num
, i
, &desc
, sizeof(desc
));
742 if (n
!= sizeof(desc
)) {
743 virtio_error(vdev
, "Driver provided input buffer "
744 "for descriptors that is too small!");
745 err_status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
750 virtio_blk_req_complete(req
, err_status
);
751 virtio_blk_free_request(req
);
752 g_free(data
->zone_report_data
.zones
);
756 static void virtio_blk_handle_zone_report(VirtIOBlockReq
*req
,
757 struct iovec
*in_iov
,
760 VirtIOBlock
*s
= req
->dev
;
761 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
762 unsigned int nr_zones
;
764 int64_t zone_size
, offset
;
767 if (req
->in_len
< sizeof(struct virtio_blk_inhdr
) +
768 sizeof(struct virtio_blk_zone_report
) +
769 sizeof(struct virtio_blk_zone_descriptor
)) {
770 virtio_error(vdev
, "in buffer too small for zone report");
771 err_status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
775 /* start byte offset of the zone report */
776 offset
= virtio_ldq_p(vdev
, &req
->out
.sector
) << BDRV_SECTOR_BITS
;
777 if (!check_zoned_request(s
, offset
, 0, false, &err_status
)) {
780 nr_zones
= (req
->in_len
- sizeof(struct virtio_blk_inhdr
) -
781 sizeof(struct virtio_blk_zone_report
)) /
782 sizeof(struct virtio_blk_zone_descriptor
);
783 trace_virtio_blk_handle_zone_report(vdev
, req
,
784 offset
>> BDRV_SECTOR_BITS
, nr_zones
);
786 zone_size
= sizeof(BlockZoneDescriptor
) * nr_zones
;
787 data
= g_malloc(sizeof(ZoneCmdData
));
789 data
->in_iov
= in_iov
;
790 data
->in_num
= in_num
;
791 data
->zone_report_data
.nr_zones
= nr_zones
;
792 data
->zone_report_data
.zones
= g_malloc(zone_size
),
794 blk_aio_zone_report(s
->blk
, offset
, &data
->zone_report_data
.nr_zones
,
795 data
->zone_report_data
.zones
,
796 virtio_blk_zone_report_complete
, data
);
799 virtio_blk_req_complete(req
, err_status
);
800 virtio_blk_free_request(req
);
803 static void virtio_blk_zone_mgmt_complete(void *opaque
, int ret
)
805 VirtIOBlockReq
*req
= opaque
;
806 VirtIOBlock
*s
= req
->dev
;
807 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
808 int8_t err_status
= VIRTIO_BLK_S_OK
;
809 trace_virtio_blk_zone_mgmt_complete(vdev
, req
,ret
);
812 err_status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
815 virtio_blk_req_complete(req
, err_status
);
816 virtio_blk_free_request(req
);
819 static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq
*req
, BlockZoneOp op
)
821 VirtIOBlock
*s
= req
->dev
;
822 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
823 BlockDriverState
*bs
= blk_bs(s
->blk
);
824 int64_t offset
= virtio_ldq_p(vdev
, &req
->out
.sector
) << BDRV_SECTOR_BITS
;
826 uint64_t capacity
= bs
->total_sectors
<< BDRV_SECTOR_BITS
;
827 uint8_t err_status
= VIRTIO_BLK_S_OK
;
829 uint32_t type
= virtio_ldl_p(vdev
, &req
->out
.type
);
830 if (type
== VIRTIO_BLK_T_ZONE_RESET_ALL
) {
831 /* Entire drive capacity */
834 trace_virtio_blk_handle_zone_reset_all(vdev
, req
, 0,
837 if (bs
->bl
.zone_size
> capacity
- offset
) {
838 /* The zoned device allows the last smaller zone. */
839 len
= capacity
- bs
->bl
.zone_size
* (bs
->bl
.nr_zones
- 1);
841 len
= bs
->bl
.zone_size
;
843 trace_virtio_blk_handle_zone_mgmt(vdev
, req
, op
,
844 offset
>> BDRV_SECTOR_BITS
,
845 len
>> BDRV_SECTOR_BITS
);
848 if (!check_zoned_request(s
, offset
, len
, false, &err_status
)) {
852 blk_aio_zone_mgmt(s
->blk
, op
, offset
, len
,
853 virtio_blk_zone_mgmt_complete
, req
);
857 virtio_blk_req_complete(req
, err_status
);
858 virtio_blk_free_request(req
);
862 static void virtio_blk_zone_append_complete(void *opaque
, int ret
)
864 ZoneCmdData
*data
= opaque
;
865 VirtIOBlockReq
*req
= data
->req
;
866 VirtIODevice
*vdev
= VIRTIO_DEVICE(req
->dev
);
867 int64_t append_sector
, n
;
868 uint8_t err_status
= VIRTIO_BLK_S_OK
;
871 err_status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
875 virtio_stq_p(vdev
, &append_sector
,
876 data
->zone_append_data
.offset
>> BDRV_SECTOR_BITS
);
877 n
= iov_from_buf(data
->in_iov
, data
->in_num
, 0, &append_sector
,
878 sizeof(append_sector
));
879 if (n
!= sizeof(append_sector
)) {
880 virtio_error(vdev
, "Driver provided input buffer less than size of "
882 err_status
= VIRTIO_BLK_S_ZONE_INVALID_CMD
;
885 trace_virtio_blk_zone_append_complete(vdev
, req
, append_sector
, ret
);
888 virtio_blk_req_complete(req
, err_status
);
889 virtio_blk_free_request(req
);
893 static int virtio_blk_handle_zone_append(VirtIOBlockReq
*req
,
894 struct iovec
*out_iov
,
895 struct iovec
*in_iov
,
898 VirtIOBlock
*s
= req
->dev
;
899 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
900 uint8_t err_status
= VIRTIO_BLK_S_OK
;
902 int64_t offset
= virtio_ldq_p(vdev
, &req
->out
.sector
) << BDRV_SECTOR_BITS
;
903 int64_t len
= iov_size(out_iov
, out_num
);
906 trace_virtio_blk_handle_zone_append(vdev
, req
, offset
>> BDRV_SECTOR_BITS
);
907 if (!check_zoned_request(s
, offset
, len
, true, &err_status
)) {
911 data
= g_malloc(sizeof(ZoneCmdData
));
913 data
->in_iov
= in_iov
;
914 data
->in_num
= in_num
;
915 data
->zone_append_data
.offset
= offset
;
916 qemu_iovec_init_external(&req
->qiov
, out_iov
, out_num
);
918 block_acct_start(blk_get_stats(s
->blk
), &req
->acct
, len
,
919 BLOCK_ACCT_ZONE_APPEND
);
921 blk_aio_zone_append(s
->blk
, &data
->zone_append_data
.offset
, &req
->qiov
, 0,
922 virtio_blk_zone_append_complete
, data
);
926 virtio_blk_req_complete(req
, err_status
);
927 virtio_blk_free_request(req
);
931 static int virtio_blk_handle_request(VirtIOBlockReq
*req
, MultiReqBuffer
*mrb
)
934 struct iovec
*in_iov
= req
->elem
.in_sg
;
935 struct iovec
*out_iov
= req
->elem
.out_sg
;
936 unsigned in_num
= req
->elem
.in_num
;
937 unsigned out_num
= req
->elem
.out_num
;
938 VirtIOBlock
*s
= req
->dev
;
939 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
941 if (req
->elem
.out_num
< 1 || req
->elem
.in_num
< 1) {
942 virtio_error(vdev
, "virtio-blk missing headers");
946 if (unlikely(iov_to_buf(out_iov
, out_num
, 0, &req
->out
,
947 sizeof(req
->out
)) != sizeof(req
->out
))) {
948 virtio_error(vdev
, "virtio-blk request outhdr too short");
952 iov_discard_front_undoable(&out_iov
, &out_num
, sizeof(req
->out
),
955 if (in_iov
[in_num
- 1].iov_len
< sizeof(struct virtio_blk_inhdr
)) {
956 virtio_error(vdev
, "virtio-blk request inhdr too short");
957 iov_discard_undo(&req
->outhdr_undo
);
961 /* We always touch the last byte, so just see how big in_iov is. */
962 req
->in_len
= iov_size(in_iov
, in_num
);
963 req
->in
= (void *)in_iov
[in_num
- 1].iov_base
964 + in_iov
[in_num
- 1].iov_len
965 - sizeof(struct virtio_blk_inhdr
);
966 iov_discard_back_undoable(in_iov
, &in_num
, sizeof(struct virtio_blk_inhdr
),
969 type
= virtio_ldl_p(vdev
, &req
->out
.type
);
971 /* VIRTIO_BLK_T_OUT defines the command direction. VIRTIO_BLK_T_BARRIER
972 * is an optional flag. Although a guest should not send this flag if
973 * not negotiated we ignored it in the past. So keep ignoring it. */
974 switch (type
& ~(VIRTIO_BLK_T_OUT
| VIRTIO_BLK_T_BARRIER
)) {
975 case VIRTIO_BLK_T_IN
:
977 bool is_write
= type
& VIRTIO_BLK_T_OUT
;
978 req
->sector_num
= virtio_ldq_p(vdev
, &req
->out
.sector
);
981 qemu_iovec_init_external(&req
->qiov
, out_iov
, out_num
);
982 trace_virtio_blk_handle_write(vdev
, req
, req
->sector_num
,
983 req
->qiov
.size
/ BDRV_SECTOR_SIZE
);
985 qemu_iovec_init_external(&req
->qiov
, in_iov
, in_num
);
986 trace_virtio_blk_handle_read(vdev
, req
, req
->sector_num
,
987 req
->qiov
.size
/ BDRV_SECTOR_SIZE
);
990 if (!virtio_blk_sect_range_ok(s
, req
->sector_num
, req
->qiov
.size
)) {
991 virtio_blk_req_complete(req
, VIRTIO_BLK_S_IOERR
);
992 block_acct_invalid(blk_get_stats(s
->blk
),
993 is_write
? BLOCK_ACCT_WRITE
: BLOCK_ACCT_READ
);
994 virtio_blk_free_request(req
);
998 block_acct_start(blk_get_stats(s
->blk
), &req
->acct
, req
->qiov
.size
,
999 is_write
? BLOCK_ACCT_WRITE
: BLOCK_ACCT_READ
);
1001 /* merge would exceed maximum number of requests or IO direction
1003 if (mrb
->num_reqs
> 0 && (mrb
->num_reqs
== VIRTIO_BLK_MAX_MERGE_REQS
||
1004 is_write
!= mrb
->is_write
||
1005 !s
->conf
.request_merging
)) {
1006 virtio_blk_submit_multireq(s
, mrb
);
1009 assert(mrb
->num_reqs
< VIRTIO_BLK_MAX_MERGE_REQS
);
1010 mrb
->reqs
[mrb
->num_reqs
++] = req
;
1011 mrb
->is_write
= is_write
;
1014 case VIRTIO_BLK_T_FLUSH
:
1015 virtio_blk_handle_flush(req
, mrb
);
1017 case VIRTIO_BLK_T_ZONE_REPORT
:
1018 virtio_blk_handle_zone_report(req
, in_iov
, in_num
);
1020 case VIRTIO_BLK_T_ZONE_OPEN
:
1021 virtio_blk_handle_zone_mgmt(req
, BLK_ZO_OPEN
);
1023 case VIRTIO_BLK_T_ZONE_CLOSE
:
1024 virtio_blk_handle_zone_mgmt(req
, BLK_ZO_CLOSE
);
1026 case VIRTIO_BLK_T_ZONE_FINISH
:
1027 virtio_blk_handle_zone_mgmt(req
, BLK_ZO_FINISH
);
1029 case VIRTIO_BLK_T_ZONE_RESET
:
1030 virtio_blk_handle_zone_mgmt(req
, BLK_ZO_RESET
);
1032 case VIRTIO_BLK_T_ZONE_RESET_ALL
:
1033 virtio_blk_handle_zone_mgmt(req
, BLK_ZO_RESET
);
1035 case VIRTIO_BLK_T_SCSI_CMD
:
1036 virtio_blk_handle_scsi(req
);
1038 case VIRTIO_BLK_T_GET_ID
:
1041 * NB: per existing s/n string convention the string is
1042 * terminated by '\0' only when shorter than buffer.
1044 const char *serial
= s
->conf
.serial
? s
->conf
.serial
: "";
1045 size_t size
= MIN(strlen(serial
) + 1,
1046 MIN(iov_size(in_iov
, in_num
),
1047 VIRTIO_BLK_ID_BYTES
));
1048 iov_from_buf(in_iov
, in_num
, 0, serial
, size
);
1049 virtio_blk_req_complete(req
, VIRTIO_BLK_S_OK
);
1050 virtio_blk_free_request(req
);
1053 case VIRTIO_BLK_T_ZONE_APPEND
& ~VIRTIO_BLK_T_OUT
:
1055 * Passing out_iov/out_num and in_iov/in_num is not safe
1056 * to access req->elem.out_sg directly because it may be
1057 * modified by virtio_blk_handle_request().
1059 virtio_blk_handle_zone_append(req
, out_iov
, in_iov
, out_num
, in_num
);
1062 * VIRTIO_BLK_T_DISCARD and VIRTIO_BLK_T_WRITE_ZEROES are defined with
1063 * VIRTIO_BLK_T_OUT flag set. We masked this flag in the switch statement,
1064 * so we must mask it for these requests, then we will check if it is set.
1066 case VIRTIO_BLK_T_DISCARD
& ~VIRTIO_BLK_T_OUT
:
1067 case VIRTIO_BLK_T_WRITE_ZEROES
& ~VIRTIO_BLK_T_OUT
:
1069 struct virtio_blk_discard_write_zeroes dwz_hdr
;
1070 size_t out_len
= iov_size(out_iov
, out_num
);
1071 bool is_write_zeroes
= (type
& ~VIRTIO_BLK_T_BARRIER
) ==
1072 VIRTIO_BLK_T_WRITE_ZEROES
;
1076 * Unsupported if VIRTIO_BLK_T_OUT is not set or the request contains
1077 * more than one segment.
1079 if (unlikely(!(type
& VIRTIO_BLK_T_OUT
) ||
1080 out_len
> sizeof(dwz_hdr
))) {
1081 virtio_blk_req_complete(req
, VIRTIO_BLK_S_UNSUPP
);
1082 virtio_blk_free_request(req
);
1086 if (unlikely(iov_to_buf(out_iov
, out_num
, 0, &dwz_hdr
,
1087 sizeof(dwz_hdr
)) != sizeof(dwz_hdr
))) {
1088 iov_discard_undo(&req
->inhdr_undo
);
1089 iov_discard_undo(&req
->outhdr_undo
);
1090 virtio_error(vdev
, "virtio-blk discard/write_zeroes header"
1095 err_status
= virtio_blk_handle_discard_write_zeroes(req
, &dwz_hdr
,
1097 if (err_status
!= VIRTIO_BLK_S_OK
) {
1098 virtio_blk_req_complete(req
, err_status
);
1099 virtio_blk_free_request(req
);
1105 virtio_blk_req_complete(req
, VIRTIO_BLK_S_UNSUPP
);
1106 virtio_blk_free_request(req
);
1111 void virtio_blk_handle_vq(VirtIOBlock
*s
, VirtQueue
*vq
)
1113 VirtIOBlockReq
*req
;
1114 MultiReqBuffer mrb
= {};
1115 bool suppress_notifications
= virtio_queue_get_notification(vq
);
1120 if (suppress_notifications
) {
1121 virtio_queue_set_notification(vq
, 0);
1124 while ((req
= virtio_blk_get_request(s
, vq
))) {
1125 if (virtio_blk_handle_request(req
, &mrb
)) {
1126 virtqueue_detach_element(req
->vq
, &req
->elem
, 0);
1127 virtio_blk_free_request(req
);
1132 if (suppress_notifications
) {
1133 virtio_queue_set_notification(vq
, 1);
1135 } while (!virtio_queue_empty(vq
));
1138 virtio_blk_submit_multireq(s
, &mrb
);
1144 static void virtio_blk_handle_output(VirtIODevice
*vdev
, VirtQueue
*vq
)
1146 VirtIOBlock
*s
= (VirtIOBlock
*)vdev
;
1148 if (!s
->ioeventfd_disabled
&& !s
->ioeventfd_started
) {
1149 /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
1150 * ioeventfd here instead of waiting for .set_status().
1152 virtio_device_start_ioeventfd(vdev
);
1153 if (!s
->ioeventfd_disabled
) {
1158 virtio_blk_handle_vq(s
, vq
);
1161 static void virtio_blk_dma_restart_bh(void *opaque
)
1163 VirtIOBlockReq
*req
= opaque
;
1164 VirtIOBlock
*s
= req
->dev
; /* we're called with at least one request */
1166 MultiReqBuffer mrb
= {};
1169 VirtIOBlockReq
*next
= req
->next
;
1170 if (virtio_blk_handle_request(req
, &mrb
)) {
1171 /* Device is now broken and won't do any processing until it gets
1172 * reset. Already queued requests will be lost: let's purge them.
1176 virtqueue_detach_element(req
->vq
, &req
->elem
, 0);
1177 virtio_blk_free_request(req
);
1186 virtio_blk_submit_multireq(s
, &mrb
);
1189 /* Paired with inc in virtio_blk_dma_restart_cb() */
1190 blk_dec_in_flight(s
->conf
.conf
.blk
);
1193 static void virtio_blk_dma_restart_cb(void *opaque
, bool running
,
1196 VirtIOBlock
*s
= opaque
;
1197 uint16_t num_queues
= s
->conf
.num_queues
;
1198 g_autofree VirtIOBlockReq
**vq_rq
= NULL
;
1205 /* Split the device-wide s->rq request list into per-vq request lists */
1206 vq_rq
= g_new0(VirtIOBlockReq
*, num_queues
);
1208 WITH_QEMU_LOCK_GUARD(&s
->rq_lock
) {
1214 VirtIOBlockReq
*next
= rq
->next
;
1215 uint16_t idx
= virtio_get_queue_index(rq
->vq
);
1217 /* Only num_queues vqs were created so vq_rq[idx] is within bounds */
1218 assert(idx
< num_queues
);
1219 rq
->next
= vq_rq
[idx
];
1224 /* Schedule a BH to submit the requests in each vq's AioContext */
1225 for (uint16_t i
= 0; i
< num_queues
; i
++) {
1230 /* Paired with dec in virtio_blk_dma_restart_bh() */
1231 blk_inc_in_flight(s
->conf
.conf
.blk
);
1233 aio_bh_schedule_oneshot(s
->vq_aio_context
[i
],
1234 virtio_blk_dma_restart_bh
,
1239 static void virtio_blk_reset(VirtIODevice
*vdev
)
1241 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
1242 VirtIOBlockReq
*req
;
1244 /* Dataplane has stopped... */
1245 assert(!s
->ioeventfd_started
);
1247 /* ...but requests may still be in flight. */
1250 /* We drop queued requests after blk_drain() because blk_drain() itself can
1252 WITH_QEMU_LOCK_GUARD(&s
->rq_lock
) {
1257 /* No other threads can access req->vq here */
1258 virtqueue_detach_element(req
->vq
, &req
->elem
, 0);
1260 virtio_blk_free_request(req
);
1264 blk_set_enable_write_cache(s
->blk
, s
->original_wce
);
1267 /* coalesce internal state, copy to pci i/o region 0
1269 static void virtio_blk_update_config(VirtIODevice
*vdev
, uint8_t *config
)
1271 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
1272 BlockConf
*conf
= &s
->conf
.conf
;
1273 BlockDriverState
*bs
= blk_bs(s
->blk
);
1274 struct virtio_blk_config blkcfg
;
1277 int blk_size
= conf
->logical_block_size
;
1279 blk_get_geometry(s
->blk
, &capacity
);
1280 memset(&blkcfg
, 0, sizeof(blkcfg
));
1281 virtio_stq_p(vdev
, &blkcfg
.capacity
, capacity
);
1282 virtio_stl_p(vdev
, &blkcfg
.seg_max
,
1283 s
->conf
.seg_max_adjust
? s
->conf
.queue_size
- 2 : 128 - 2);
1284 virtio_stw_p(vdev
, &blkcfg
.geometry
.cylinders
, conf
->cyls
);
1285 virtio_stl_p(vdev
, &blkcfg
.blk_size
, blk_size
);
1286 virtio_stw_p(vdev
, &blkcfg
.min_io_size
, conf
->min_io_size
/ blk_size
);
1287 virtio_stl_p(vdev
, &blkcfg
.opt_io_size
, conf
->opt_io_size
/ blk_size
);
1288 blkcfg
.geometry
.heads
= conf
->heads
;
1290 * We must ensure that the block device capacity is a multiple of
1291 * the logical block size. If that is not the case, let's use
1292 * sector_mask to adopt the geometry to have a correct picture.
1293 * For those devices where the capacity is ok for the given geometry
1294 * we don't touch the sector value of the geometry, since some devices
1295 * (like s390 dasd) need a specific value. Here the capacity is already
1296 * cyls*heads*secs*blk_size and the sector value is not block size
1297 * divided by 512 - instead it is the amount of blk_size blocks
1298 * per track (cylinder).
1300 length
= blk_getlength(s
->blk
);
1301 if (length
> 0 && length
/ conf
->heads
/ conf
->secs
% blk_size
) {
1302 blkcfg
.geometry
.sectors
= conf
->secs
& ~s
->sector_mask
;
1304 blkcfg
.geometry
.sectors
= conf
->secs
;
1306 blkcfg
.size_max
= 0;
1307 blkcfg
.physical_block_exp
= get_physical_block_exp(conf
);
1308 blkcfg
.alignment_offset
= 0;
1309 blkcfg
.wce
= blk_enable_write_cache(s
->blk
);
1310 virtio_stw_p(vdev
, &blkcfg
.num_queues
, s
->conf
.num_queues
);
1311 if (virtio_has_feature(s
->host_features
, VIRTIO_BLK_F_DISCARD
)) {
1312 uint32_t discard_granularity
= conf
->discard_granularity
;
1313 if (discard_granularity
== -1 || !s
->conf
.report_discard_granularity
) {
1314 discard_granularity
= blk_size
;
1316 virtio_stl_p(vdev
, &blkcfg
.max_discard_sectors
,
1317 s
->conf
.max_discard_sectors
);
1318 virtio_stl_p(vdev
, &blkcfg
.discard_sector_alignment
,
1319 discard_granularity
>> BDRV_SECTOR_BITS
);
1321 * We support only one segment per request since multiple segments
1322 * are not widely used and there are no userspace APIs that allow
1323 * applications to submit multiple segments in a single call.
1325 virtio_stl_p(vdev
, &blkcfg
.max_discard_seg
, 1);
1327 if (virtio_has_feature(s
->host_features
, VIRTIO_BLK_F_WRITE_ZEROES
)) {
1328 virtio_stl_p(vdev
, &blkcfg
.max_write_zeroes_sectors
,
1329 s
->conf
.max_write_zeroes_sectors
);
1330 blkcfg
.write_zeroes_may_unmap
= 1;
1331 virtio_stl_p(vdev
, &blkcfg
.max_write_zeroes_seg
, 1);
1333 if (bs
->bl
.zoned
!= BLK_Z_NONE
) {
1334 switch (bs
->bl
.zoned
) {
1336 blkcfg
.zoned
.model
= VIRTIO_BLK_Z_HM
;
1339 blkcfg
.zoned
.model
= VIRTIO_BLK_Z_HA
;
1342 g_assert_not_reached();
1345 virtio_stl_p(vdev
, &blkcfg
.zoned
.zone_sectors
,
1346 bs
->bl
.zone_size
/ 512);
1347 virtio_stl_p(vdev
, &blkcfg
.zoned
.max_active_zones
,
1348 bs
->bl
.max_active_zones
);
1349 virtio_stl_p(vdev
, &blkcfg
.zoned
.max_open_zones
,
1350 bs
->bl
.max_open_zones
);
1351 virtio_stl_p(vdev
, &blkcfg
.zoned
.write_granularity
, blk_size
);
1352 virtio_stl_p(vdev
, &blkcfg
.zoned
.max_append_sectors
,
1353 bs
->bl
.max_append_sectors
);
1355 blkcfg
.zoned
.model
= VIRTIO_BLK_Z_NONE
;
1357 memcpy(config
, &blkcfg
, s
->config_size
);
1360 static void virtio_blk_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
1362 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
1363 struct virtio_blk_config blkcfg
;
1365 memcpy(&blkcfg
, config
, s
->config_size
);
1367 blk_set_enable_write_cache(s
->blk
, blkcfg
.wce
!= 0);
1370 static uint64_t virtio_blk_get_features(VirtIODevice
*vdev
, uint64_t features
,
1373 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
1375 /* Firstly sync all virtio-blk possible supported features */
1376 features
|= s
->host_features
;
1378 virtio_add_feature(&features
, VIRTIO_BLK_F_SEG_MAX
);
1379 virtio_add_feature(&features
, VIRTIO_BLK_F_GEOMETRY
);
1380 virtio_add_feature(&features
, VIRTIO_BLK_F_TOPOLOGY
);
1381 virtio_add_feature(&features
, VIRTIO_BLK_F_BLK_SIZE
);
1382 if (virtio_has_feature(features
, VIRTIO_F_VERSION_1
)) {
1383 if (virtio_has_feature(s
->host_features
, VIRTIO_BLK_F_SCSI
)) {
1384 error_setg(errp
, "Please set scsi=off for virtio-blk devices in order to use virtio 1.0");
1388 virtio_clear_feature(&features
, VIRTIO_F_ANY_LAYOUT
);
1389 virtio_add_feature(&features
, VIRTIO_BLK_F_SCSI
);
1392 if (blk_enable_write_cache(s
->blk
) ||
1393 (s
->conf
.x_enable_wce_if_config_wce
&&
1394 virtio_has_feature(features
, VIRTIO_BLK_F_CONFIG_WCE
))) {
1395 virtio_add_feature(&features
, VIRTIO_BLK_F_WCE
);
1397 if (!blk_is_writable(s
->blk
)) {
1398 virtio_add_feature(&features
, VIRTIO_BLK_F_RO
);
1400 if (s
->conf
.num_queues
> 1) {
1401 virtio_add_feature(&features
, VIRTIO_BLK_F_MQ
);
1407 static void virtio_blk_set_status(VirtIODevice
*vdev
, uint8_t status
)
1409 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
1411 if (!(status
& (VIRTIO_CONFIG_S_DRIVER
| VIRTIO_CONFIG_S_DRIVER_OK
))) {
1412 assert(!s
->ioeventfd_started
);
1415 if (!(status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1419 /* A guest that supports VIRTIO_BLK_F_CONFIG_WCE must be able to send
1420 * cache flushes. Thus, the "auto writethrough" behavior is never
1421 * necessary for guests that support the VIRTIO_BLK_F_CONFIG_WCE feature.
1422 * Leaving it enabled would break the following sequence:
1424 * Guest started with "-drive cache=writethrough"
1425 * Guest sets status to 0
1426 * Guest sets DRIVER bit in status field
1427 * Guest reads host features (WCE=0, CONFIG_WCE=1)
1428 * Guest writes guest features (WCE=0, CONFIG_WCE=1)
1429 * Guest writes 1 to the WCE configuration field (writeback mode)
1430 * Guest sets DRIVER_OK bit in status field
1432 * s->blk would erroneously be placed in writethrough mode.
1434 if (!virtio_vdev_has_feature(vdev
, VIRTIO_BLK_F_CONFIG_WCE
)) {
1435 blk_set_enable_write_cache(s
->blk
,
1436 virtio_vdev_has_feature(vdev
,
1441 static void virtio_blk_save_device(VirtIODevice
*vdev
, QEMUFile
*f
)
1443 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
1445 WITH_QEMU_LOCK_GUARD(&s
->rq_lock
) {
1446 VirtIOBlockReq
*req
= s
->rq
;
1449 qemu_put_sbyte(f
, 1);
1451 if (s
->conf
.num_queues
> 1) {
1452 qemu_put_be32(f
, virtio_get_queue_index(req
->vq
));
1455 qemu_put_virtqueue_element(vdev
, f
, &req
->elem
);
1460 qemu_put_sbyte(f
, 0);
1463 static int virtio_blk_load_device(VirtIODevice
*vdev
, QEMUFile
*f
,
1466 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
1468 while (qemu_get_sbyte(f
)) {
1469 unsigned nvqs
= s
->conf
.num_queues
;
1470 unsigned vq_idx
= 0;
1471 VirtIOBlockReq
*req
;
1474 vq_idx
= qemu_get_be32(f
);
1476 if (vq_idx
>= nvqs
) {
1477 error_report("Invalid virtqueue index in request list: %#x",
1483 req
= qemu_get_virtqueue_element(vdev
, f
, sizeof(VirtIOBlockReq
));
1484 virtio_blk_init_request(s
, virtio_get_queue(vdev
, vq_idx
), req
);
1486 WITH_QEMU_LOCK_GUARD(&s
->rq_lock
) {
1495 static void virtio_resize_cb(void *opaque
)
1497 VirtIODevice
*vdev
= opaque
;
1499 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
1500 virtio_notify_config(vdev
);
1503 static void virtio_blk_resize(void *opaque
)
1505 VirtIODevice
*vdev
= VIRTIO_DEVICE(opaque
);
1508 * virtio_notify_config() needs to acquire the BQL,
1509 * so it can't be called from an iothread. Instead, schedule
1510 * it to be run in the main context BH.
1512 aio_bh_schedule_oneshot(qemu_get_aio_context(), virtio_resize_cb
, vdev
);
1515 static void virtio_blk_ioeventfd_detach(VirtIOBlock
*s
)
1517 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
1519 for (uint16_t i
= 0; i
< s
->conf
.num_queues
; i
++) {
1520 VirtQueue
*vq
= virtio_get_queue(vdev
, i
);
1521 virtio_queue_aio_detach_host_notifier(vq
, s
->vq_aio_context
[i
]);
1525 static void virtio_blk_ioeventfd_attach(VirtIOBlock
*s
)
1527 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
1529 for (uint16_t i
= 0; i
< s
->conf
.num_queues
; i
++) {
1530 VirtQueue
*vq
= virtio_get_queue(vdev
, i
);
1531 virtio_queue_aio_attach_host_notifier(vq
, s
->vq_aio_context
[i
]);
1535 /* Suspend virtqueue ioeventfd processing during drain */
1536 static void virtio_blk_drained_begin(void *opaque
)
1538 VirtIOBlock
*s
= opaque
;
1540 if (s
->ioeventfd_started
) {
1541 virtio_blk_ioeventfd_detach(s
);
1545 /* Resume virtqueue ioeventfd processing after drain */
1546 static void virtio_blk_drained_end(void *opaque
)
1548 VirtIOBlock
*s
= opaque
;
1550 if (s
->ioeventfd_started
) {
1551 virtio_blk_ioeventfd_attach(s
);
1555 static const BlockDevOps virtio_block_ops
= {
1556 .resize_cb
= virtio_blk_resize
,
1557 .drained_begin
= virtio_blk_drained_begin
,
1558 .drained_end
= virtio_blk_drained_end
,
1562 validate_iothread_vq_mapping_list(IOThreadVirtQueueMappingList
*list
,
1563 uint16_t num_queues
, Error
**errp
)
1565 g_autofree
unsigned long *vqs
= bitmap_new(num_queues
);
1566 g_autoptr(GHashTable
) iothreads
=
1567 g_hash_table_new(g_str_hash
, g_str_equal
);
1569 for (IOThreadVirtQueueMappingList
*node
= list
; node
; node
= node
->next
) {
1570 const char *name
= node
->value
->iothread
;
1573 if (!iothread_by_id(name
)) {
1574 error_setg(errp
, "IOThread \"%s\" object does not exist", name
);
1578 if (!g_hash_table_add(iothreads
, (gpointer
)name
)) {
1580 "duplicate IOThread name \"%s\" in iothread-vq-mapping",
1586 if (!!node
->value
->vqs
!= !!list
->value
->vqs
) {
1587 error_setg(errp
, "either all items in iothread-vq-mapping "
1588 "must have vqs or none of them must have it");
1593 for (vq
= node
->value
->vqs
; vq
; vq
= vq
->next
) {
1594 if (vq
->value
>= num_queues
) {
1595 error_setg(errp
, "vq index %u for IOThread \"%s\" must be "
1596 "less than num_queues %u in iothread-vq-mapping",
1597 vq
->value
, name
, num_queues
);
1601 if (test_and_set_bit(vq
->value
, vqs
)) {
1602 error_setg(errp
, "cannot assign vq %u to IOThread \"%s\" "
1603 "because it is already assigned", vq
->value
, name
);
1609 if (list
->value
->vqs
) {
1610 for (uint16_t i
= 0; i
< num_queues
; i
++) {
1611 if (!test_bit(i
, vqs
)) {
1613 "missing vq %u IOThread assignment in iothread-vq-mapping",
1624 * apply_iothread_vq_mapping:
1625 * @iothread_vq_mapping_list: The mapping of virtqueues to IOThreads.
1626 * @vq_aio_context: The array of AioContext pointers to fill in.
1627 * @num_queues: The length of @vq_aio_context.
1628 * @errp: If an error occurs, a pointer to the area to store the error.
1630 * Fill in the AioContext for each virtqueue in the @vq_aio_context array given
1631 * the iothread-vq-mapping parameter in @iothread_vq_mapping_list.
1633 * Returns: %true on success, %false on failure.
1635 static bool apply_iothread_vq_mapping(
1636 IOThreadVirtQueueMappingList
*iothread_vq_mapping_list
,
1637 AioContext
**vq_aio_context
,
1638 uint16_t num_queues
,
1641 IOThreadVirtQueueMappingList
*node
;
1642 size_t num_iothreads
= 0;
1643 size_t cur_iothread
= 0;
1645 if (!validate_iothread_vq_mapping_list(iothread_vq_mapping_list
,
1646 num_queues
, errp
)) {
1650 for (node
= iothread_vq_mapping_list
; node
; node
= node
->next
) {
1654 for (node
= iothread_vq_mapping_list
; node
; node
= node
->next
) {
1655 IOThread
*iothread
= iothread_by_id(node
->value
->iothread
);
1656 AioContext
*ctx
= iothread_get_aio_context(iothread
);
1658 /* Released in virtio_blk_vq_aio_context_cleanup() */
1659 object_ref(OBJECT(iothread
));
1661 if (node
->value
->vqs
) {
1664 /* Explicit vq:IOThread assignment */
1665 for (vq
= node
->value
->vqs
; vq
; vq
= vq
->next
) {
1666 assert(vq
->value
< num_queues
);
1667 vq_aio_context
[vq
->value
] = ctx
;
1670 /* Round-robin vq:IOThread assignment */
1671 for (unsigned i
= cur_iothread
; i
< num_queues
;
1672 i
+= num_iothreads
) {
1673 vq_aio_context
[i
] = ctx
;
1683 /* Context: BQL held */
1684 static bool virtio_blk_vq_aio_context_init(VirtIOBlock
*s
, Error
**errp
)
1687 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
1688 VirtIOBlkConf
*conf
= &s
->conf
;
1689 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(vdev
)));
1690 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1692 if (conf
->iothread
&& conf
->iothread_vq_mapping_list
) {
1694 "iothread and iothread-vq-mapping properties cannot be set "
1695 "at the same time");
1699 if (conf
->iothread
|| conf
->iothread_vq_mapping_list
) {
1700 if (!k
->set_guest_notifiers
|| !k
->ioeventfd_assign
) {
1702 "device is incompatible with iothread "
1703 "(transport does not support notifiers)");
1706 if (!virtio_device_ioeventfd_enabled(vdev
)) {
1707 error_setg(errp
, "ioeventfd is required for iothread");
1712 * If ioeventfd is (re-)enabled while the guest is running there could
1713 * be block jobs that can conflict.
1715 if (blk_op_is_blocked(conf
->conf
.blk
, BLOCK_OP_TYPE_DATAPLANE
, errp
)) {
1716 error_prepend(errp
, "cannot start virtio-blk ioeventfd: ");
1721 s
->vq_aio_context
= g_new(AioContext
*, conf
->num_queues
);
1723 if (conf
->iothread_vq_mapping_list
) {
1724 if (!apply_iothread_vq_mapping(conf
->iothread_vq_mapping_list
,
1728 g_free(s
->vq_aio_context
);
1729 s
->vq_aio_context
= NULL
;
1732 } else if (conf
->iothread
) {
1733 AioContext
*ctx
= iothread_get_aio_context(conf
->iothread
);
1734 for (unsigned i
= 0; i
< conf
->num_queues
; i
++) {
1735 s
->vq_aio_context
[i
] = ctx
;
1738 /* Released in virtio_blk_vq_aio_context_cleanup() */
1739 object_ref(OBJECT(conf
->iothread
));
1741 AioContext
*ctx
= qemu_get_aio_context();
1742 for (unsigned i
= 0; i
< conf
->num_queues
; i
++) {
1743 s
->vq_aio_context
[i
] = ctx
;
1750 /* Context: BQL held */
1751 static void virtio_blk_vq_aio_context_cleanup(VirtIOBlock
*s
)
1753 VirtIOBlkConf
*conf
= &s
->conf
;
1755 assert(!s
->ioeventfd_started
);
1757 if (conf
->iothread_vq_mapping_list
) {
1758 IOThreadVirtQueueMappingList
*node
;
1760 for (node
= conf
->iothread_vq_mapping_list
; node
; node
= node
->next
) {
1761 IOThread
*iothread
= iothread_by_id(node
->value
->iothread
);
1762 object_unref(OBJECT(iothread
));
1766 if (conf
->iothread
) {
1767 object_unref(OBJECT(conf
->iothread
));
1770 g_free(s
->vq_aio_context
);
1771 s
->vq_aio_context
= NULL
;
1774 /* Context: BQL held */
1775 static int virtio_blk_start_ioeventfd(VirtIODevice
*vdev
)
1777 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
1778 BusState
*qbus
= BUS(qdev_get_parent_bus(DEVICE(s
)));
1779 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1781 unsigned nvqs
= s
->conf
.num_queues
;
1782 Error
*local_err
= NULL
;
1785 if (s
->ioeventfd_started
|| s
->ioeventfd_starting
) {
1789 s
->ioeventfd_starting
= true;
1791 /* Set up guest notifier (irq) */
1792 r
= k
->set_guest_notifiers(qbus
->parent
, nvqs
, true);
1794 error_report("virtio-blk failed to set guest notifier (%d), "
1795 "ensure -accel kvm is set.", r
);
1796 goto fail_guest_notifiers
;
1800 * Batch all the host notifiers in a single transaction to avoid
1801 * quadratic time complexity in address_space_update_ioeventfds().
1803 memory_region_transaction_begin();
1805 /* Set up virtqueue notify */
1806 for (i
= 0; i
< nvqs
; i
++) {
1807 r
= virtio_bus_set_host_notifier(VIRTIO_BUS(qbus
), i
, true);
1811 fprintf(stderr
, "virtio-blk failed to set host notifier (%d)\n", r
);
1813 virtio_bus_set_host_notifier(VIRTIO_BUS(qbus
), i
, false);
1817 * The transaction expects the ioeventfds to be open when it
1818 * commits. Do it now, before the cleanup loop.
1820 memory_region_transaction_commit();
1823 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus
), j
);
1825 goto fail_host_notifiers
;
1829 memory_region_transaction_commit();
1832 * Try to change the AioContext so that block jobs and other operations can
1833 * co-locate their activity in the same AioContext. If it fails, nevermind.
1835 assert(nvqs
> 0); /* enforced during ->realize() */
1836 r
= blk_set_aio_context(s
->conf
.conf
.blk
, s
->vq_aio_context
[0],
1839 warn_report_err(local_err
);
1843 * These fields must be visible to the IOThread when it processes the
1844 * virtqueue, otherwise it will think ioeventfd has not started yet.
1846 * Make sure ->ioeventfd_started is false when blk_set_aio_context() is
1847 * called above so that draining does not cause the host notifier to be
1848 * detached/attached prematurely.
1850 s
->ioeventfd_starting
= false;
1851 s
->ioeventfd_started
= true;
1852 smp_wmb(); /* paired with aio_notify_accept() on the read side */
1855 * Get this show started by hooking up our callbacks. If drained now,
1856 * virtio_blk_drained_end() will do this later.
1857 * Attaching the notifier also kicks the virtqueues, processing any requests
1858 * they may already have.
1860 if (!blk_in_drain(s
->conf
.conf
.blk
)) {
1861 virtio_blk_ioeventfd_attach(s
);
1865 fail_host_notifiers
:
1866 k
->set_guest_notifiers(qbus
->parent
, nvqs
, false);
1867 fail_guest_notifiers
:
1868 s
->ioeventfd_disabled
= true;
1869 s
->ioeventfd_starting
= false;
1873 /* Stop notifications for new requests from guest.
1875 * Context: BH in IOThread
1877 static void virtio_blk_ioeventfd_stop_vq_bh(void *opaque
)
1879 VirtQueue
*vq
= opaque
;
1880 EventNotifier
*host_notifier
= virtio_queue_get_host_notifier(vq
);
1882 virtio_queue_aio_detach_host_notifier(vq
, qemu_get_current_aio_context());
1885 * Test and clear notifier after disabling event, in case poll callback
1886 * didn't have time to run.
1888 virtio_queue_host_notifier_read(host_notifier
);
1891 /* Context: BQL held */
1892 static void virtio_blk_stop_ioeventfd(VirtIODevice
*vdev
)
1894 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
1895 BusState
*qbus
= qdev_get_parent_bus(DEVICE(s
));
1896 VirtioBusClass
*k
= VIRTIO_BUS_GET_CLASS(qbus
);
1898 unsigned nvqs
= s
->conf
.num_queues
;
1900 if (!s
->ioeventfd_started
|| s
->ioeventfd_stopping
) {
1904 /* Better luck next time. */
1905 if (s
->ioeventfd_disabled
) {
1906 s
->ioeventfd_disabled
= false;
1907 s
->ioeventfd_started
= false;
1910 s
->ioeventfd_stopping
= true;
1912 if (!blk_in_drain(s
->conf
.conf
.blk
)) {
1913 for (i
= 0; i
< nvqs
; i
++) {
1914 VirtQueue
*vq
= virtio_get_queue(vdev
, i
);
1915 AioContext
*ctx
= s
->vq_aio_context
[i
];
1917 aio_wait_bh_oneshot(ctx
, virtio_blk_ioeventfd_stop_vq_bh
, vq
);
1922 * Batch all the host notifiers in a single transaction to avoid
1923 * quadratic time complexity in address_space_update_ioeventfds().
1925 memory_region_transaction_begin();
1927 for (i
= 0; i
< nvqs
; i
++) {
1928 virtio_bus_set_host_notifier(VIRTIO_BUS(qbus
), i
, false);
1932 * The transaction expects the ioeventfds to be open when it
1933 * commits. Do it now, before the cleanup loop.
1935 memory_region_transaction_commit();
1937 for (i
= 0; i
< nvqs
; i
++) {
1938 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus
), i
);
1942 * Set ->ioeventfd_started to false before draining so that host notifiers
1943 * are not detached/attached anymore.
1945 s
->ioeventfd_started
= false;
1947 /* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */
1948 blk_drain(s
->conf
.conf
.blk
);
1951 * Try to switch bs back to the QEMU main loop. If other users keep the
1952 * BlockBackend in the iothread, that's ok
1954 blk_set_aio_context(s
->conf
.conf
.blk
, qemu_get_aio_context(), NULL
);
1956 /* Clean up guest notifier (irq) */
1957 k
->set_guest_notifiers(qbus
->parent
, nvqs
, false);
1959 s
->ioeventfd_stopping
= false;
1962 static void virtio_blk_device_realize(DeviceState
*dev
, Error
**errp
)
1964 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
1965 VirtIOBlock
*s
= VIRTIO_BLK(dev
);
1966 VirtIOBlkConf
*conf
= &s
->conf
;
1967 BlockDriverState
*bs
;
1971 if (!conf
->conf
.blk
) {
1972 error_setg(errp
, "drive property not set");
1975 if (!blk_is_inserted(conf
->conf
.blk
)) {
1976 error_setg(errp
, "Device needs media, but drive is empty");
1979 if (conf
->num_queues
== VIRTIO_BLK_AUTO_NUM_QUEUES
) {
1980 conf
->num_queues
= 1;
1982 if (!conf
->num_queues
) {
1983 error_setg(errp
, "num-queues property must be larger than 0");
1986 if (conf
->queue_size
<= 2) {
1987 error_setg(errp
, "invalid queue-size property (%" PRIu16
"), "
1988 "must be > 2", conf
->queue_size
);
1991 if (!is_power_of_2(conf
->queue_size
) ||
1992 conf
->queue_size
> VIRTQUEUE_MAX_SIZE
) {
1993 error_setg(errp
, "invalid queue-size property (%" PRIu16
"), "
1994 "must be a power of 2 (max %d)",
1995 conf
->queue_size
, VIRTQUEUE_MAX_SIZE
);
1999 if (!blkconf_apply_backend_options(&conf
->conf
,
2000 !blk_supports_write_perm(conf
->conf
.blk
),
2004 s
->original_wce
= blk_enable_write_cache(conf
->conf
.blk
);
2005 if (!blkconf_geometry(&conf
->conf
, NULL
, 65535, 255, 255, errp
)) {
2009 if (!blkconf_blocksizes(&conf
->conf
, errp
)) {
2013 bs
= blk_bs(conf
->conf
.blk
);
2014 if (bs
->bl
.zoned
!= BLK_Z_NONE
) {
2015 virtio_add_feature(&s
->host_features
, VIRTIO_BLK_F_ZONED
);
2016 if (bs
->bl
.zoned
== BLK_Z_HM
) {
2017 virtio_clear_feature(&s
->host_features
, VIRTIO_BLK_F_DISCARD
);
2021 if (virtio_has_feature(s
->host_features
, VIRTIO_BLK_F_DISCARD
) &&
2022 (!conf
->max_discard_sectors
||
2023 conf
->max_discard_sectors
> BDRV_REQUEST_MAX_SECTORS
)) {
2024 error_setg(errp
, "invalid max-discard-sectors property (%" PRIu32
")"
2025 ", must be between 1 and %d",
2026 conf
->max_discard_sectors
, (int)BDRV_REQUEST_MAX_SECTORS
);
2030 if (virtio_has_feature(s
->host_features
, VIRTIO_BLK_F_WRITE_ZEROES
) &&
2031 (!conf
->max_write_zeroes_sectors
||
2032 conf
->max_write_zeroes_sectors
> BDRV_REQUEST_MAX_SECTORS
)) {
2033 error_setg(errp
, "invalid max-write-zeroes-sectors property (%" PRIu32
2034 "), must be between 1 and %d",
2035 conf
->max_write_zeroes_sectors
,
2036 (int)BDRV_REQUEST_MAX_SECTORS
);
2040 s
->config_size
= virtio_get_config_size(&virtio_blk_cfg_size_params
,
2042 virtio_init(vdev
, VIRTIO_ID_BLOCK
, s
->config_size
);
2044 qemu_mutex_init(&s
->rq_lock
);
2046 s
->blk
= conf
->conf
.blk
;
2048 s
->sector_mask
= (s
->conf
.conf
.logical_block_size
/ BDRV_SECTOR_SIZE
) - 1;
2050 for (i
= 0; i
< conf
->num_queues
; i
++) {
2051 virtio_add_queue(vdev
, conf
->queue_size
, virtio_blk_handle_output
);
2053 qemu_coroutine_inc_pool_size(conf
->num_queues
* conf
->queue_size
/ 2);
2055 /* Don't start ioeventfd if transport does not support notifiers. */
2056 if (!virtio_device_ioeventfd_enabled(vdev
)) {
2057 s
->ioeventfd_disabled
= true;
2060 virtio_blk_vq_aio_context_init(s
, &err
);
2062 error_propagate(errp
, err
);
2063 for (i
= 0; i
< conf
->num_queues
; i
++) {
2064 virtio_del_queue(vdev
, i
);
2066 virtio_cleanup(vdev
);
2071 * This must be after virtio_init() so virtio_blk_dma_restart_cb() gets
2072 * called after ->start_ioeventfd() has already set blk's AioContext.
2075 qdev_add_vm_change_state_handler(dev
, virtio_blk_dma_restart_cb
, s
);
2077 blk_ram_registrar_init(&s
->blk_ram_registrar
, s
->blk
);
2078 blk_set_dev_ops(s
->blk
, &virtio_block_ops
, s
);
2080 blk_iostatus_enable(s
->blk
);
2082 add_boot_device_lchs(dev
, "/disk@0,0",
2088 static void virtio_blk_device_unrealize(DeviceState
*dev
)
2090 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
2091 VirtIOBlock
*s
= VIRTIO_BLK(dev
);
2092 VirtIOBlkConf
*conf
= &s
->conf
;
2096 del_boot_device_lchs(dev
, "/disk@0,0");
2097 virtio_blk_vq_aio_context_cleanup(s
);
2098 for (i
= 0; i
< conf
->num_queues
; i
++) {
2099 virtio_del_queue(vdev
, i
);
2101 qemu_coroutine_dec_pool_size(conf
->num_queues
* conf
->queue_size
/ 2);
2102 qemu_mutex_destroy(&s
->rq_lock
);
2103 blk_ram_registrar_destroy(&s
->blk_ram_registrar
);
2104 qemu_del_vm_change_state_handler(s
->change
);
2105 blockdev_mark_auto_del(s
->blk
);
2106 virtio_cleanup(vdev
);
2109 static void virtio_blk_instance_init(Object
*obj
)
2111 VirtIOBlock
*s
= VIRTIO_BLK(obj
);
2113 device_add_bootindex_property(obj
, &s
->conf
.conf
.bootindex
,
2114 "bootindex", "/disk@0,0",
2118 static const VMStateDescription vmstate_virtio_blk
= {
2119 .name
= "virtio-blk",
2120 .minimum_version_id
= 2,
2122 .fields
= (const VMStateField
[]) {
2123 VMSTATE_VIRTIO_DEVICE
,
2124 VMSTATE_END_OF_LIST()
2128 static Property virtio_blk_properties
[] = {
2129 DEFINE_BLOCK_PROPERTIES(VirtIOBlock
, conf
.conf
),
2130 DEFINE_BLOCK_ERROR_PROPERTIES(VirtIOBlock
, conf
.conf
),
2131 DEFINE_BLOCK_CHS_PROPERTIES(VirtIOBlock
, conf
.conf
),
2132 DEFINE_PROP_STRING("serial", VirtIOBlock
, conf
.serial
),
2133 DEFINE_PROP_BIT64("config-wce", VirtIOBlock
, host_features
,
2134 VIRTIO_BLK_F_CONFIG_WCE
, true),
2136 DEFINE_PROP_BIT64("scsi", VirtIOBlock
, host_features
,
2137 VIRTIO_BLK_F_SCSI
, false),
2139 DEFINE_PROP_BIT("request-merging", VirtIOBlock
, conf
.request_merging
, 0,
2141 DEFINE_PROP_UINT16("num-queues", VirtIOBlock
, conf
.num_queues
,
2142 VIRTIO_BLK_AUTO_NUM_QUEUES
),
2143 DEFINE_PROP_UINT16("queue-size", VirtIOBlock
, conf
.queue_size
, 256),
2144 DEFINE_PROP_BOOL("seg-max-adjust", VirtIOBlock
, conf
.seg_max_adjust
, true),
2145 DEFINE_PROP_LINK("iothread", VirtIOBlock
, conf
.iothread
, TYPE_IOTHREAD
,
2147 DEFINE_PROP_IOTHREAD_VQ_MAPPING_LIST("iothread-vq-mapping", VirtIOBlock
,
2148 conf
.iothread_vq_mapping_list
),
2149 DEFINE_PROP_BIT64("discard", VirtIOBlock
, host_features
,
2150 VIRTIO_BLK_F_DISCARD
, true),
2151 DEFINE_PROP_BOOL("report-discard-granularity", VirtIOBlock
,
2152 conf
.report_discard_granularity
, true),
2153 DEFINE_PROP_BIT64("write-zeroes", VirtIOBlock
, host_features
,
2154 VIRTIO_BLK_F_WRITE_ZEROES
, true),
2155 DEFINE_PROP_UINT32("max-discard-sectors", VirtIOBlock
,
2156 conf
.max_discard_sectors
, BDRV_REQUEST_MAX_SECTORS
),
2157 DEFINE_PROP_UINT32("max-write-zeroes-sectors", VirtIOBlock
,
2158 conf
.max_write_zeroes_sectors
, BDRV_REQUEST_MAX_SECTORS
),
2159 DEFINE_PROP_BOOL("x-enable-wce-if-config-wce", VirtIOBlock
,
2160 conf
.x_enable_wce_if_config_wce
, true),
2161 DEFINE_PROP_END_OF_LIST(),
2164 static void virtio_blk_class_init(ObjectClass
*klass
, void *data
)
2166 DeviceClass
*dc
= DEVICE_CLASS(klass
);
2167 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
2169 device_class_set_props(dc
, virtio_blk_properties
);
2170 dc
->vmsd
= &vmstate_virtio_blk
;
2171 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
2172 vdc
->realize
= virtio_blk_device_realize
;
2173 vdc
->unrealize
= virtio_blk_device_unrealize
;
2174 vdc
->get_config
= virtio_blk_update_config
;
2175 vdc
->set_config
= virtio_blk_set_config
;
2176 vdc
->get_features
= virtio_blk_get_features
;
2177 vdc
->set_status
= virtio_blk_set_status
;
2178 vdc
->reset
= virtio_blk_reset
;
2179 vdc
->save
= virtio_blk_save_device
;
2180 vdc
->load
= virtio_blk_load_device
;
2181 vdc
->start_ioeventfd
= virtio_blk_start_ioeventfd
;
2182 vdc
->stop_ioeventfd
= virtio_blk_stop_ioeventfd
;
2185 static const TypeInfo virtio_blk_info
= {
2186 .name
= TYPE_VIRTIO_BLK
,
2187 .parent
= TYPE_VIRTIO_DEVICE
,
2188 .instance_size
= sizeof(VirtIOBlock
),
2189 .instance_init
= virtio_blk_instance_init
,
2190 .class_init
= virtio_blk_class_init
,
2193 static void virtio_register_types(void)
2195 type_register_static(&virtio_blk_info
);
2198 type_init(virtio_register_types
)