4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qemu-common.h"
18 #include "qemu/error-report.h"
20 #include "hw/block/block.h"
21 #include "sysemu/blockdev.h"
22 #include "hw/virtio/virtio-blk.h"
23 #include "dataplane/virtio-blk.h"
24 #include "scsi/constants.h"
28 #include "hw/virtio/virtio-bus.h"
29 #include "hw/virtio/virtio-access.h"
31 static void virtio_blk_init_request(VirtIOBlock
*s
, VirtQueue
*vq
,
42 static void virtio_blk_free_request(VirtIOBlockReq
*req
)
47 static void virtio_blk_req_complete(VirtIOBlockReq
*req
, unsigned char status
)
49 VirtIOBlock
*s
= req
->dev
;
50 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
52 trace_virtio_blk_req_complete(vdev
, req
, status
);
54 stb_p(&req
->in
->status
, status
);
55 virtqueue_push(req
->vq
, &req
->elem
, req
->in_len
);
56 if (s
->dataplane_started
&& !s
->dataplane_disabled
) {
57 virtio_blk_data_plane_notify(s
->dataplane
, req
->vq
);
59 virtio_notify(vdev
, req
->vq
);
63 static int virtio_blk_handle_rw_error(VirtIOBlockReq
*req
, int error
,
66 BlockErrorAction action
= blk_get_error_action(req
->dev
->blk
,
68 VirtIOBlock
*s
= req
->dev
;
70 if (action
== BLOCK_ERROR_ACTION_STOP
) {
71 /* Break the link as the next request is going to be parsed from the
72 * ring again. Otherwise we may end up doing a double completion! */
76 } else if (action
== BLOCK_ERROR_ACTION_REPORT
) {
77 virtio_blk_req_complete(req
, VIRTIO_BLK_S_IOERR
);
78 block_acct_failed(blk_get_stats(s
->blk
), &req
->acct
);
79 virtio_blk_free_request(req
);
82 blk_error_action(s
->blk
, action
, is_read
, error
);
83 return action
!= BLOCK_ERROR_ACTION_IGNORE
;
86 static void virtio_blk_rw_complete(void *opaque
, int ret
)
88 VirtIOBlockReq
*next
= opaque
;
89 VirtIOBlock
*s
= next
->dev
;
90 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
92 aio_context_acquire(blk_get_aio_context(s
->conf
.conf
.blk
));
94 VirtIOBlockReq
*req
= next
;
96 trace_virtio_blk_rw_complete(vdev
, req
, ret
);
98 if (req
->qiov
.nalloc
!= -1) {
99 /* If nalloc is != -1 req->qiov is a local copy of the original
100 * external iovec. It was allocated in submit_requests to be
101 * able to merge requests. */
102 qemu_iovec_destroy(&req
->qiov
);
106 int p
= virtio_ldl_p(VIRTIO_DEVICE(req
->dev
), &req
->out
.type
);
107 bool is_read
= !(p
& VIRTIO_BLK_T_OUT
);
108 /* Note that memory may be dirtied on read failure. If the
109 * virtio request is not completed here, as is the case for
110 * BLOCK_ERROR_ACTION_STOP, the memory may not be copied
111 * correctly during live migration. While this is ugly,
112 * it is acceptable because the device is free to write to
113 * the memory until the request is completed (which will
114 * happen on the other side of the migration).
116 if (virtio_blk_handle_rw_error(req
, -ret
, is_read
)) {
121 virtio_blk_req_complete(req
, VIRTIO_BLK_S_OK
);
122 block_acct_done(blk_get_stats(req
->dev
->blk
), &req
->acct
);
123 virtio_blk_free_request(req
);
125 aio_context_release(blk_get_aio_context(s
->conf
.conf
.blk
));
128 static void virtio_blk_flush_complete(void *opaque
, int ret
)
130 VirtIOBlockReq
*req
= opaque
;
131 VirtIOBlock
*s
= req
->dev
;
133 aio_context_acquire(blk_get_aio_context(s
->conf
.conf
.blk
));
135 if (virtio_blk_handle_rw_error(req
, -ret
, 0)) {
140 virtio_blk_req_complete(req
, VIRTIO_BLK_S_OK
);
141 block_acct_done(blk_get_stats(req
->dev
->blk
), &req
->acct
);
142 virtio_blk_free_request(req
);
145 aio_context_release(blk_get_aio_context(s
->conf
.conf
.blk
));
152 struct sg_io_hdr hdr
;
153 } VirtIOBlockIoctlReq
;
155 static void virtio_blk_ioctl_complete(void *opaque
, int status
)
157 VirtIOBlockIoctlReq
*ioctl_req
= opaque
;
158 VirtIOBlockReq
*req
= ioctl_req
->req
;
159 VirtIOBlock
*s
= req
->dev
;
160 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
161 struct virtio_scsi_inhdr
*scsi
;
162 struct sg_io_hdr
*hdr
;
164 scsi
= (void *)req
->elem
.in_sg
[req
->elem
.in_num
- 2].iov_base
;
167 status
= VIRTIO_BLK_S_UNSUPP
;
168 virtio_stl_p(vdev
, &scsi
->errors
, 255);
172 hdr
= &ioctl_req
->hdr
;
174 * From SCSI-Generic-HOWTO: "Some lower level drivers (e.g. ide-scsi)
175 * clear the masked_status field [hence status gets cleared too, see
176 * block/scsi_ioctl.c] even when a CHECK_CONDITION or COMMAND_TERMINATED
177 * status has occurred. However they do set DRIVER_SENSE in driver_status
178 * field. Also a (sb_len_wr > 0) indicates there is a sense buffer.
180 if (hdr
->status
== 0 && hdr
->sb_len_wr
> 0) {
181 hdr
->status
= CHECK_CONDITION
;
184 virtio_stl_p(vdev
, &scsi
->errors
,
185 hdr
->status
| (hdr
->msg_status
<< 8) |
186 (hdr
->host_status
<< 16) | (hdr
->driver_status
<< 24));
187 virtio_stl_p(vdev
, &scsi
->residual
, hdr
->resid
);
188 virtio_stl_p(vdev
, &scsi
->sense_len
, hdr
->sb_len_wr
);
189 virtio_stl_p(vdev
, &scsi
->data_len
, hdr
->dxfer_len
);
192 aio_context_acquire(blk_get_aio_context(s
->conf
.conf
.blk
));
193 virtio_blk_req_complete(req
, status
);
194 virtio_blk_free_request(req
);
195 aio_context_release(blk_get_aio_context(s
->conf
.conf
.blk
));
201 static VirtIOBlockReq
*virtio_blk_get_request(VirtIOBlock
*s
, VirtQueue
*vq
)
203 VirtIOBlockReq
*req
= virtqueue_pop(vq
, sizeof(VirtIOBlockReq
));
206 virtio_blk_init_request(s
, vq
, req
);
211 static int virtio_blk_handle_scsi_req(VirtIOBlockReq
*req
)
213 int status
= VIRTIO_BLK_S_OK
;
214 struct virtio_scsi_inhdr
*scsi
= NULL
;
215 VirtIODevice
*vdev
= VIRTIO_DEVICE(req
->dev
);
216 VirtQueueElement
*elem
= &req
->elem
;
217 VirtIOBlock
*blk
= req
->dev
;
221 VirtIOBlockIoctlReq
*ioctl_req
;
226 * We require at least one output segment each for the virtio_blk_outhdr
227 * and the SCSI command block.
229 * We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr
230 * and the sense buffer pointer in the input segments.
232 if (elem
->out_num
< 2 || elem
->in_num
< 3) {
233 status
= VIRTIO_BLK_S_IOERR
;
238 * The scsi inhdr is placed in the second-to-last input segment, just
239 * before the regular inhdr.
241 scsi
= (void *)elem
->in_sg
[elem
->in_num
- 2].iov_base
;
243 if (!blk
->conf
.scsi
) {
244 status
= VIRTIO_BLK_S_UNSUPP
;
249 * No support for bidirection commands yet.
251 if (elem
->out_num
> 2 && elem
->in_num
> 3) {
252 status
= VIRTIO_BLK_S_UNSUPP
;
257 ioctl_req
= g_new0(VirtIOBlockIoctlReq
, 1);
258 ioctl_req
->req
= req
;
259 ioctl_req
->hdr
.interface_id
= 'S';
260 ioctl_req
->hdr
.cmd_len
= elem
->out_sg
[1].iov_len
;
261 ioctl_req
->hdr
.cmdp
= elem
->out_sg
[1].iov_base
;
262 ioctl_req
->hdr
.dxfer_len
= 0;
264 if (elem
->out_num
> 2) {
266 * If there are more than the minimally required 2 output segments
267 * there is write payload starting from the third iovec.
269 ioctl_req
->hdr
.dxfer_direction
= SG_DXFER_TO_DEV
;
270 ioctl_req
->hdr
.iovec_count
= elem
->out_num
- 2;
272 for (i
= 0; i
< ioctl_req
->hdr
.iovec_count
; i
++) {
273 ioctl_req
->hdr
.dxfer_len
+= elem
->out_sg
[i
+ 2].iov_len
;
276 ioctl_req
->hdr
.dxferp
= elem
->out_sg
+ 2;
278 } else if (elem
->in_num
> 3) {
280 * If we have more than 3 input segments the guest wants to actually
283 ioctl_req
->hdr
.dxfer_direction
= SG_DXFER_FROM_DEV
;
284 ioctl_req
->hdr
.iovec_count
= elem
->in_num
- 3;
285 for (i
= 0; i
< ioctl_req
->hdr
.iovec_count
; i
++) {
286 ioctl_req
->hdr
.dxfer_len
+= elem
->in_sg
[i
].iov_len
;
289 ioctl_req
->hdr
.dxferp
= elem
->in_sg
;
292 * Some SCSI commands don't actually transfer any data.
294 ioctl_req
->hdr
.dxfer_direction
= SG_DXFER_NONE
;
297 ioctl_req
->hdr
.sbp
= elem
->in_sg
[elem
->in_num
- 3].iov_base
;
298 ioctl_req
->hdr
.mx_sb_len
= elem
->in_sg
[elem
->in_num
- 3].iov_len
;
300 acb
= blk_aio_ioctl(blk
->blk
, SG_IO
, &ioctl_req
->hdr
,
301 virtio_blk_ioctl_complete
, ioctl_req
);
304 status
= VIRTIO_BLK_S_UNSUPP
;
313 /* Just put anything nonzero so that the ioctl fails in the guest. */
315 virtio_stl_p(vdev
, &scsi
->errors
, 255);
320 static void virtio_blk_handle_scsi(VirtIOBlockReq
*req
)
324 status
= virtio_blk_handle_scsi_req(req
);
325 if (status
!= -EINPROGRESS
) {
326 virtio_blk_req_complete(req
, status
);
327 virtio_blk_free_request(req
);
331 static inline void submit_requests(BlockBackend
*blk
, MultiReqBuffer
*mrb
,
332 int start
, int num_reqs
, int niov
)
334 QEMUIOVector
*qiov
= &mrb
->reqs
[start
]->qiov
;
335 int64_t sector_num
= mrb
->reqs
[start
]->sector_num
;
336 bool is_write
= mrb
->is_write
;
340 struct iovec
*tmp_iov
= qiov
->iov
;
341 int tmp_niov
= qiov
->niov
;
343 /* mrb->reqs[start]->qiov was initialized from external so we can't
344 * modify it here. We need to initialize it locally and then add the
345 * external iovecs. */
346 qemu_iovec_init(qiov
, niov
);
348 for (i
= 0; i
< tmp_niov
; i
++) {
349 qemu_iovec_add(qiov
, tmp_iov
[i
].iov_base
, tmp_iov
[i
].iov_len
);
352 for (i
= start
+ 1; i
< start
+ num_reqs
; i
++) {
353 qemu_iovec_concat(qiov
, &mrb
->reqs
[i
]->qiov
, 0,
354 mrb
->reqs
[i
]->qiov
.size
);
355 mrb
->reqs
[i
- 1]->mr_next
= mrb
->reqs
[i
];
358 trace_virtio_blk_submit_multireq(VIRTIO_DEVICE(mrb
->reqs
[start
]->dev
),
359 mrb
, start
, num_reqs
,
360 sector_num
<< BDRV_SECTOR_BITS
,
361 qiov
->size
, is_write
);
362 block_acct_merge_done(blk_get_stats(blk
),
363 is_write
? BLOCK_ACCT_WRITE
: BLOCK_ACCT_READ
,
368 blk_aio_pwritev(blk
, sector_num
<< BDRV_SECTOR_BITS
, qiov
, 0,
369 virtio_blk_rw_complete
, mrb
->reqs
[start
]);
371 blk_aio_preadv(blk
, sector_num
<< BDRV_SECTOR_BITS
, qiov
, 0,
372 virtio_blk_rw_complete
, mrb
->reqs
[start
]);
376 static int multireq_compare(const void *a
, const void *b
)
378 const VirtIOBlockReq
*req1
= *(VirtIOBlockReq
**)a
,
379 *req2
= *(VirtIOBlockReq
**)b
;
382 * Note that we can't simply subtract sector_num1 from sector_num2
383 * here as that could overflow the return value.
385 if (req1
->sector_num
> req2
->sector_num
) {
387 } else if (req1
->sector_num
< req2
->sector_num
) {
394 static void virtio_blk_submit_multireq(BlockBackend
*blk
, MultiReqBuffer
*mrb
)
396 int i
= 0, start
= 0, num_reqs
= 0, niov
= 0, nb_sectors
= 0;
397 uint32_t max_transfer
;
398 int64_t sector_num
= 0;
400 if (mrb
->num_reqs
== 1) {
401 submit_requests(blk
, mrb
, 0, 1, -1);
406 max_transfer
= blk_get_max_transfer(mrb
->reqs
[0]->dev
->blk
);
408 qsort(mrb
->reqs
, mrb
->num_reqs
, sizeof(*mrb
->reqs
),
411 for (i
= 0; i
< mrb
->num_reqs
; i
++) {
412 VirtIOBlockReq
*req
= mrb
->reqs
[i
];
415 * NOTE: We cannot merge the requests in below situations:
416 * 1. requests are not sequential
417 * 2. merge would exceed maximum number of IOVs
418 * 3. merge would exceed maximum transfer length of backend device
420 if (sector_num
+ nb_sectors
!= req
->sector_num
||
421 niov
> blk_get_max_iov(blk
) - req
->qiov
.niov
||
422 req
->qiov
.size
> max_transfer
||
423 nb_sectors
> (max_transfer
-
424 req
->qiov
.size
) / BDRV_SECTOR_SIZE
) {
425 submit_requests(blk
, mrb
, start
, num_reqs
, niov
);
431 sector_num
= req
->sector_num
;
432 nb_sectors
= niov
= 0;
436 nb_sectors
+= req
->qiov
.size
/ BDRV_SECTOR_SIZE
;
437 niov
+= req
->qiov
.niov
;
441 submit_requests(blk
, mrb
, start
, num_reqs
, niov
);
445 static void virtio_blk_handle_flush(VirtIOBlockReq
*req
, MultiReqBuffer
*mrb
)
447 block_acct_start(blk_get_stats(req
->dev
->blk
), &req
->acct
, 0,
451 * Make sure all outstanding writes are posted to the backing device.
453 if (mrb
->is_write
&& mrb
->num_reqs
> 0) {
454 virtio_blk_submit_multireq(req
->dev
->blk
, mrb
);
456 blk_aio_flush(req
->dev
->blk
, virtio_blk_flush_complete
, req
);
459 static bool virtio_blk_sect_range_ok(VirtIOBlock
*dev
,
460 uint64_t sector
, size_t size
)
462 uint64_t nb_sectors
= size
>> BDRV_SECTOR_BITS
;
463 uint64_t total_sectors
;
465 if (nb_sectors
> BDRV_REQUEST_MAX_SECTORS
) {
468 if (sector
& dev
->sector_mask
) {
471 if (size
% dev
->conf
.conf
.logical_block_size
) {
474 blk_get_geometry(dev
->blk
, &total_sectors
);
475 if (sector
> total_sectors
|| nb_sectors
> total_sectors
- sector
) {
481 static int virtio_blk_handle_request(VirtIOBlockReq
*req
, MultiReqBuffer
*mrb
)
484 struct iovec
*in_iov
= req
->elem
.in_sg
;
485 struct iovec
*out_iov
= req
->elem
.out_sg
;
486 unsigned in_num
= req
->elem
.in_num
;
487 unsigned out_num
= req
->elem
.out_num
;
488 VirtIOBlock
*s
= req
->dev
;
489 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
491 if (req
->elem
.out_num
< 1 || req
->elem
.in_num
< 1) {
492 virtio_error(vdev
, "virtio-blk missing headers");
496 if (unlikely(iov_to_buf(out_iov
, out_num
, 0, &req
->out
,
497 sizeof(req
->out
)) != sizeof(req
->out
))) {
498 virtio_error(vdev
, "virtio-blk request outhdr too short");
502 iov_discard_front(&out_iov
, &out_num
, sizeof(req
->out
));
504 if (in_iov
[in_num
- 1].iov_len
< sizeof(struct virtio_blk_inhdr
)) {
505 virtio_error(vdev
, "virtio-blk request inhdr too short");
509 /* We always touch the last byte, so just see how big in_iov is. */
510 req
->in_len
= iov_size(in_iov
, in_num
);
511 req
->in
= (void *)in_iov
[in_num
- 1].iov_base
512 + in_iov
[in_num
- 1].iov_len
513 - sizeof(struct virtio_blk_inhdr
);
514 iov_discard_back(in_iov
, &in_num
, sizeof(struct virtio_blk_inhdr
));
516 type
= virtio_ldl_p(VIRTIO_DEVICE(req
->dev
), &req
->out
.type
);
518 /* VIRTIO_BLK_T_OUT defines the command direction. VIRTIO_BLK_T_BARRIER
519 * is an optional flag. Although a guest should not send this flag if
520 * not negotiated we ignored it in the past. So keep ignoring it. */
521 switch (type
& ~(VIRTIO_BLK_T_OUT
| VIRTIO_BLK_T_BARRIER
)) {
522 case VIRTIO_BLK_T_IN
:
524 bool is_write
= type
& VIRTIO_BLK_T_OUT
;
525 req
->sector_num
= virtio_ldq_p(VIRTIO_DEVICE(req
->dev
),
529 qemu_iovec_init_external(&req
->qiov
, out_iov
, out_num
);
530 trace_virtio_blk_handle_write(vdev
, req
, req
->sector_num
,
531 req
->qiov
.size
/ BDRV_SECTOR_SIZE
);
533 qemu_iovec_init_external(&req
->qiov
, in_iov
, in_num
);
534 trace_virtio_blk_handle_read(vdev
, req
, req
->sector_num
,
535 req
->qiov
.size
/ BDRV_SECTOR_SIZE
);
538 if (!virtio_blk_sect_range_ok(req
->dev
, req
->sector_num
,
540 virtio_blk_req_complete(req
, VIRTIO_BLK_S_IOERR
);
541 block_acct_invalid(blk_get_stats(req
->dev
->blk
),
542 is_write
? BLOCK_ACCT_WRITE
: BLOCK_ACCT_READ
);
543 virtio_blk_free_request(req
);
547 block_acct_start(blk_get_stats(req
->dev
->blk
),
548 &req
->acct
, req
->qiov
.size
,
549 is_write
? BLOCK_ACCT_WRITE
: BLOCK_ACCT_READ
);
551 /* merge would exceed maximum number of requests or IO direction
553 if (mrb
->num_reqs
> 0 && (mrb
->num_reqs
== VIRTIO_BLK_MAX_MERGE_REQS
||
554 is_write
!= mrb
->is_write
||
555 !req
->dev
->conf
.request_merging
)) {
556 virtio_blk_submit_multireq(req
->dev
->blk
, mrb
);
559 assert(mrb
->num_reqs
< VIRTIO_BLK_MAX_MERGE_REQS
);
560 mrb
->reqs
[mrb
->num_reqs
++] = req
;
561 mrb
->is_write
= is_write
;
564 case VIRTIO_BLK_T_FLUSH
:
565 virtio_blk_handle_flush(req
, mrb
);
567 case VIRTIO_BLK_T_SCSI_CMD
:
568 virtio_blk_handle_scsi(req
);
570 case VIRTIO_BLK_T_GET_ID
:
572 VirtIOBlock
*s
= req
->dev
;
575 * NB: per existing s/n string convention the string is
576 * terminated by '\0' only when shorter than buffer.
578 const char *serial
= s
->conf
.serial
? s
->conf
.serial
: "";
579 size_t size
= MIN(strlen(serial
) + 1,
580 MIN(iov_size(in_iov
, in_num
),
581 VIRTIO_BLK_ID_BYTES
));
582 iov_from_buf(in_iov
, in_num
, 0, serial
, size
);
583 virtio_blk_req_complete(req
, VIRTIO_BLK_S_OK
);
584 virtio_blk_free_request(req
);
588 virtio_blk_req_complete(req
, VIRTIO_BLK_S_UNSUPP
);
589 virtio_blk_free_request(req
);
594 bool virtio_blk_handle_vq(VirtIOBlock
*s
, VirtQueue
*vq
)
597 MultiReqBuffer mrb
= {};
598 bool progress
= false;
600 aio_context_acquire(blk_get_aio_context(s
->blk
));
604 virtio_queue_set_notification(vq
, 0);
606 while ((req
= virtio_blk_get_request(s
, vq
))) {
608 if (virtio_blk_handle_request(req
, &mrb
)) {
609 virtqueue_detach_element(req
->vq
, &req
->elem
, 0);
610 virtio_blk_free_request(req
);
615 virtio_queue_set_notification(vq
, 1);
616 } while (!virtio_queue_empty(vq
));
619 virtio_blk_submit_multireq(s
->blk
, &mrb
);
622 blk_io_unplug(s
->blk
);
623 aio_context_release(blk_get_aio_context(s
->blk
));
627 static void virtio_blk_handle_output_do(VirtIOBlock
*s
, VirtQueue
*vq
)
629 virtio_blk_handle_vq(s
, vq
);
632 static void virtio_blk_handle_output(VirtIODevice
*vdev
, VirtQueue
*vq
)
634 VirtIOBlock
*s
= (VirtIOBlock
*)vdev
;
637 /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
638 * dataplane here instead of waiting for .set_status().
640 virtio_device_start_ioeventfd(vdev
);
641 if (!s
->dataplane_disabled
) {
645 virtio_blk_handle_output_do(s
, vq
);
648 static void virtio_blk_dma_restart_bh(void *opaque
)
650 VirtIOBlock
*s
= opaque
;
651 VirtIOBlockReq
*req
= s
->rq
;
652 MultiReqBuffer mrb
= {};
654 qemu_bh_delete(s
->bh
);
659 aio_context_acquire(blk_get_aio_context(s
->conf
.conf
.blk
));
661 VirtIOBlockReq
*next
= req
->next
;
662 if (virtio_blk_handle_request(req
, &mrb
)) {
663 /* Device is now broken and won't do any processing until it gets
664 * reset. Already queued requests will be lost: let's purge them.
668 virtqueue_detach_element(req
->vq
, &req
->elem
, 0);
669 virtio_blk_free_request(req
);
678 virtio_blk_submit_multireq(s
->blk
, &mrb
);
680 aio_context_release(blk_get_aio_context(s
->conf
.conf
.blk
));
683 static void virtio_blk_dma_restart_cb(void *opaque
, int running
,
686 VirtIOBlock
*s
= opaque
;
693 s
->bh
= aio_bh_new(blk_get_aio_context(s
->conf
.conf
.blk
),
694 virtio_blk_dma_restart_bh
, s
);
695 qemu_bh_schedule(s
->bh
);
699 static void virtio_blk_reset(VirtIODevice
*vdev
)
701 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
705 ctx
= blk_get_aio_context(s
->blk
);
706 aio_context_acquire(ctx
);
709 /* We drop queued requests after blk_drain() because blk_drain() itself can
714 virtqueue_detach_element(req
->vq
, &req
->elem
, 0);
715 virtio_blk_free_request(req
);
718 aio_context_release(ctx
);
720 assert(!s
->dataplane_started
);
721 blk_set_enable_write_cache(s
->blk
, s
->original_wce
);
724 /* coalesce internal state, copy to pci i/o region 0
726 static void virtio_blk_update_config(VirtIODevice
*vdev
, uint8_t *config
)
728 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
729 BlockConf
*conf
= &s
->conf
.conf
;
730 struct virtio_blk_config blkcfg
;
733 int blk_size
= conf
->logical_block_size
;
735 blk_get_geometry(s
->blk
, &capacity
);
736 memset(&blkcfg
, 0, sizeof(blkcfg
));
737 virtio_stq_p(vdev
, &blkcfg
.capacity
, capacity
);
738 virtio_stl_p(vdev
, &blkcfg
.seg_max
, 128 - 2);
739 virtio_stw_p(vdev
, &blkcfg
.geometry
.cylinders
, conf
->cyls
);
740 virtio_stl_p(vdev
, &blkcfg
.blk_size
, blk_size
);
741 virtio_stw_p(vdev
, &blkcfg
.min_io_size
, conf
->min_io_size
/ blk_size
);
742 virtio_stw_p(vdev
, &blkcfg
.opt_io_size
, conf
->opt_io_size
/ blk_size
);
743 blkcfg
.geometry
.heads
= conf
->heads
;
745 * We must ensure that the block device capacity is a multiple of
746 * the logical block size. If that is not the case, let's use
747 * sector_mask to adopt the geometry to have a correct picture.
748 * For those devices where the capacity is ok for the given geometry
749 * we don't touch the sector value of the geometry, since some devices
750 * (like s390 dasd) need a specific value. Here the capacity is already
751 * cyls*heads*secs*blk_size and the sector value is not block size
752 * divided by 512 - instead it is the amount of blk_size blocks
753 * per track (cylinder).
755 length
= blk_getlength(s
->blk
);
756 if (length
> 0 && length
/ conf
->heads
/ conf
->secs
% blk_size
) {
757 blkcfg
.geometry
.sectors
= conf
->secs
& ~s
->sector_mask
;
759 blkcfg
.geometry
.sectors
= conf
->secs
;
762 blkcfg
.physical_block_exp
= get_physical_block_exp(conf
);
763 blkcfg
.alignment_offset
= 0;
764 blkcfg
.wce
= blk_enable_write_cache(s
->blk
);
765 virtio_stw_p(vdev
, &blkcfg
.num_queues
, s
->conf
.num_queues
);
766 memcpy(config
, &blkcfg
, sizeof(struct virtio_blk_config
));
769 static void virtio_blk_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
771 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
772 struct virtio_blk_config blkcfg
;
774 memcpy(&blkcfg
, config
, sizeof(blkcfg
));
776 aio_context_acquire(blk_get_aio_context(s
->blk
));
777 blk_set_enable_write_cache(s
->blk
, blkcfg
.wce
!= 0);
778 aio_context_release(blk_get_aio_context(s
->blk
));
781 static uint64_t virtio_blk_get_features(VirtIODevice
*vdev
, uint64_t features
,
784 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
786 virtio_add_feature(&features
, VIRTIO_BLK_F_SEG_MAX
);
787 virtio_add_feature(&features
, VIRTIO_BLK_F_GEOMETRY
);
788 virtio_add_feature(&features
, VIRTIO_BLK_F_TOPOLOGY
);
789 virtio_add_feature(&features
, VIRTIO_BLK_F_BLK_SIZE
);
790 if (virtio_has_feature(features
, VIRTIO_F_VERSION_1
)) {
792 error_setg(errp
, "Please set scsi=off for virtio-blk devices in order to use virtio 1.0");
796 virtio_clear_feature(&features
, VIRTIO_F_ANY_LAYOUT
);
797 virtio_add_feature(&features
, VIRTIO_BLK_F_SCSI
);
800 if (s
->conf
.config_wce
) {
801 virtio_add_feature(&features
, VIRTIO_BLK_F_CONFIG_WCE
);
803 if (blk_enable_write_cache(s
->blk
)) {
804 virtio_add_feature(&features
, VIRTIO_BLK_F_WCE
);
806 if (blk_is_read_only(s
->blk
)) {
807 virtio_add_feature(&features
, VIRTIO_BLK_F_RO
);
809 if (s
->conf
.num_queues
> 1) {
810 virtio_add_feature(&features
, VIRTIO_BLK_F_MQ
);
816 static void virtio_blk_set_status(VirtIODevice
*vdev
, uint8_t status
)
818 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
820 if (!(status
& (VIRTIO_CONFIG_S_DRIVER
| VIRTIO_CONFIG_S_DRIVER_OK
))) {
821 assert(!s
->dataplane_started
);
824 if (!(status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
828 /* A guest that supports VIRTIO_BLK_F_CONFIG_WCE must be able to send
829 * cache flushes. Thus, the "auto writethrough" behavior is never
830 * necessary for guests that support the VIRTIO_BLK_F_CONFIG_WCE feature.
831 * Leaving it enabled would break the following sequence:
833 * Guest started with "-drive cache=writethrough"
834 * Guest sets status to 0
835 * Guest sets DRIVER bit in status field
836 * Guest reads host features (WCE=0, CONFIG_WCE=1)
837 * Guest writes guest features (WCE=0, CONFIG_WCE=1)
838 * Guest writes 1 to the WCE configuration field (writeback mode)
839 * Guest sets DRIVER_OK bit in status field
841 * s->blk would erroneously be placed in writethrough mode.
843 if (!virtio_vdev_has_feature(vdev
, VIRTIO_BLK_F_CONFIG_WCE
)) {
844 aio_context_acquire(blk_get_aio_context(s
->blk
));
845 blk_set_enable_write_cache(s
->blk
,
846 virtio_vdev_has_feature(vdev
,
848 aio_context_release(blk_get_aio_context(s
->blk
));
852 static void virtio_blk_save_device(VirtIODevice
*vdev
, QEMUFile
*f
)
854 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
855 VirtIOBlockReq
*req
= s
->rq
;
858 qemu_put_sbyte(f
, 1);
860 if (s
->conf
.num_queues
> 1) {
861 qemu_put_be32(f
, virtio_get_queue_index(req
->vq
));
864 qemu_put_virtqueue_element(f
, &req
->elem
);
867 qemu_put_sbyte(f
, 0);
870 static int virtio_blk_load_device(VirtIODevice
*vdev
, QEMUFile
*f
,
873 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
875 while (qemu_get_sbyte(f
)) {
876 unsigned nvqs
= s
->conf
.num_queues
;
881 vq_idx
= qemu_get_be32(f
);
883 if (vq_idx
>= nvqs
) {
884 error_report("Invalid virtqueue index in request list: %#x",
890 req
= qemu_get_virtqueue_element(vdev
, f
, sizeof(VirtIOBlockReq
));
891 virtio_blk_init_request(s
, virtio_get_queue(vdev
, vq_idx
), req
);
899 static void virtio_blk_resize(void *opaque
)
901 VirtIODevice
*vdev
= VIRTIO_DEVICE(opaque
);
903 virtio_notify_config(vdev
);
906 static const BlockDevOps virtio_block_ops
= {
907 .resize_cb
= virtio_blk_resize
,
910 static void virtio_blk_device_realize(DeviceState
*dev
, Error
**errp
)
912 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
913 VirtIOBlock
*s
= VIRTIO_BLK(dev
);
914 VirtIOBlkConf
*conf
= &s
->conf
;
918 if (!conf
->conf
.blk
) {
919 error_setg(errp
, "drive property not set");
922 if (!blk_is_inserted(conf
->conf
.blk
)) {
923 error_setg(errp
, "Device needs media, but drive is empty");
926 if (!conf
->num_queues
) {
927 error_setg(errp
, "num-queues property must be larger than 0");
930 if (!is_power_of_2(conf
->queue_size
) ||
931 conf
->queue_size
> VIRTQUEUE_MAX_SIZE
) {
932 error_setg(errp
, "invalid queue-size property (%" PRIu16
"), "
933 "must be a power of 2 (max %d)",
934 conf
->queue_size
, VIRTQUEUE_MAX_SIZE
);
938 if (!blkconf_apply_backend_options(&conf
->conf
,
939 blk_is_read_only(conf
->conf
.blk
), true,
943 s
->original_wce
= blk_enable_write_cache(conf
->conf
.blk
);
944 if (!blkconf_geometry(&conf
->conf
, NULL
, 65535, 255, 255, errp
)) {
948 blkconf_blocksizes(&conf
->conf
);
950 if (conf
->conf
.logical_block_size
>
951 conf
->conf
.physical_block_size
) {
953 "logical_block_size > physical_block_size not supported");
957 virtio_init(vdev
, "virtio-blk", VIRTIO_ID_BLOCK
,
958 sizeof(struct virtio_blk_config
));
960 s
->blk
= conf
->conf
.blk
;
962 s
->sector_mask
= (s
->conf
.conf
.logical_block_size
/ BDRV_SECTOR_SIZE
) - 1;
964 for (i
= 0; i
< conf
->num_queues
; i
++) {
965 virtio_add_queue(vdev
, conf
->queue_size
, virtio_blk_handle_output
);
967 virtio_blk_data_plane_create(vdev
, conf
, &s
->dataplane
, &err
);
969 error_propagate(errp
, err
);
970 virtio_cleanup(vdev
);
974 s
->change
= qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb
, s
);
975 blk_set_dev_ops(s
->blk
, &virtio_block_ops
, s
);
976 blk_set_guest_block_size(s
->blk
, s
->conf
.conf
.logical_block_size
);
978 blk_iostatus_enable(s
->blk
);
981 static void virtio_blk_device_unrealize(DeviceState
*dev
, Error
**errp
)
983 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
984 VirtIOBlock
*s
= VIRTIO_BLK(dev
);
986 virtio_blk_data_plane_destroy(s
->dataplane
);
988 qemu_del_vm_change_state_handler(s
->change
);
989 blockdev_mark_auto_del(s
->blk
);
990 virtio_cleanup(vdev
);
993 static void virtio_blk_instance_init(Object
*obj
)
995 VirtIOBlock
*s
= VIRTIO_BLK(obj
);
997 device_add_bootindex_property(obj
, &s
->conf
.conf
.bootindex
,
998 "bootindex", "/disk@0,0",
1002 static const VMStateDescription vmstate_virtio_blk
= {
1003 .name
= "virtio-blk",
1004 .minimum_version_id
= 2,
1006 .fields
= (VMStateField
[]) {
1007 VMSTATE_VIRTIO_DEVICE
,
1008 VMSTATE_END_OF_LIST()
1012 static Property virtio_blk_properties
[] = {
1013 DEFINE_BLOCK_PROPERTIES(VirtIOBlock
, conf
.conf
),
1014 DEFINE_BLOCK_ERROR_PROPERTIES(VirtIOBlock
, conf
.conf
),
1015 DEFINE_BLOCK_CHS_PROPERTIES(VirtIOBlock
, conf
.conf
),
1016 DEFINE_PROP_STRING("serial", VirtIOBlock
, conf
.serial
),
1017 DEFINE_PROP_BIT("config-wce", VirtIOBlock
, conf
.config_wce
, 0, true),
1019 DEFINE_PROP_BIT("scsi", VirtIOBlock
, conf
.scsi
, 0, false),
1021 DEFINE_PROP_BIT("request-merging", VirtIOBlock
, conf
.request_merging
, 0,
1023 DEFINE_PROP_UINT16("num-queues", VirtIOBlock
, conf
.num_queues
, 1),
1024 DEFINE_PROP_UINT16("queue-size", VirtIOBlock
, conf
.queue_size
, 128),
1025 DEFINE_PROP_LINK("iothread", VirtIOBlock
, conf
.iothread
, TYPE_IOTHREAD
,
1027 DEFINE_PROP_END_OF_LIST(),
1030 static void virtio_blk_class_init(ObjectClass
*klass
, void *data
)
1032 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1033 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
1035 dc
->props
= virtio_blk_properties
;
1036 dc
->vmsd
= &vmstate_virtio_blk
;
1037 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
1038 vdc
->realize
= virtio_blk_device_realize
;
1039 vdc
->unrealize
= virtio_blk_device_unrealize
;
1040 vdc
->get_config
= virtio_blk_update_config
;
1041 vdc
->set_config
= virtio_blk_set_config
;
1042 vdc
->get_features
= virtio_blk_get_features
;
1043 vdc
->set_status
= virtio_blk_set_status
;
1044 vdc
->reset
= virtio_blk_reset
;
1045 vdc
->save
= virtio_blk_save_device
;
1046 vdc
->load
= virtio_blk_load_device
;
1047 vdc
->start_ioeventfd
= virtio_blk_data_plane_start
;
1048 vdc
->stop_ioeventfd
= virtio_blk_data_plane_stop
;
1051 static const TypeInfo virtio_blk_info
= {
1052 .name
= TYPE_VIRTIO_BLK
,
1053 .parent
= TYPE_VIRTIO_DEVICE
,
1054 .instance_size
= sizeof(VirtIOBlock
),
1055 .instance_init
= virtio_blk_instance_init
,
1056 .class_init
= virtio_blk_class_init
,
1059 static void virtio_register_types(void)
1061 type_register_static(&virtio_blk_info
);
1064 type_init(virtio_register_types
)