4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu-common.h"
15 #include "qemu/error-report.h"
17 #include "hw/block/block.h"
18 #include "sysemu/blockdev.h"
19 #include "hw/virtio/virtio-blk.h"
20 #ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
21 # include "dataplane/virtio-blk.h"
22 # include "migration/migration.h"
24 #include "block/scsi.h"
28 #include "hw/virtio/virtio-bus.h"
30 static VirtIOBlockReq
*virtio_blk_alloc_request(VirtIOBlock
*s
)
32 VirtIOBlockReq
*req
= g_slice_new0(VirtIOBlockReq
);
34 req
->elem
= g_slice_new0(VirtQueueElement
);
38 static void virtio_blk_free_request(VirtIOBlockReq
*req
)
41 g_slice_free(VirtQueueElement
, req
->elem
);
42 g_slice_free(VirtIOBlockReq
, req
);
46 static void virtio_blk_req_complete(VirtIOBlockReq
*req
, int status
)
48 VirtIOBlock
*s
= req
->dev
;
49 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
51 trace_virtio_blk_req_complete(req
, status
);
53 stb_p(&req
->in
->status
, status
);
54 virtqueue_push(s
->vq
, req
->elem
, req
->qiov
.size
+ sizeof(*req
->in
));
55 virtio_notify(vdev
, s
->vq
);
58 static int virtio_blk_handle_rw_error(VirtIOBlockReq
*req
, int error
,
61 BlockErrorAction action
= bdrv_get_error_action(req
->dev
->bs
, is_read
, error
);
62 VirtIOBlock
*s
= req
->dev
;
64 if (action
== BLOCK_ERROR_ACTION_STOP
) {
67 } else if (action
== BLOCK_ERROR_ACTION_REPORT
) {
68 virtio_blk_req_complete(req
, VIRTIO_BLK_S_IOERR
);
69 bdrv_acct_done(s
->bs
, &req
->acct
);
70 virtio_blk_free_request(req
);
73 bdrv_error_action(s
->bs
, action
, is_read
, error
);
74 return action
!= BLOCK_ERROR_ACTION_IGNORE
;
77 static void virtio_blk_rw_complete(void *opaque
, int ret
)
79 VirtIOBlockReq
*req
= opaque
;
81 trace_virtio_blk_rw_complete(req
, ret
);
84 bool is_read
= !(ldl_p(&req
->out
->type
) & VIRTIO_BLK_T_OUT
);
85 if (virtio_blk_handle_rw_error(req
, -ret
, is_read
))
89 virtio_blk_req_complete(req
, VIRTIO_BLK_S_OK
);
90 bdrv_acct_done(req
->dev
->bs
, &req
->acct
);
91 virtio_blk_free_request(req
);
94 static void virtio_blk_flush_complete(void *opaque
, int ret
)
96 VirtIOBlockReq
*req
= opaque
;
99 if (virtio_blk_handle_rw_error(req
, -ret
, 0)) {
104 virtio_blk_req_complete(req
, VIRTIO_BLK_S_OK
);
105 bdrv_acct_done(req
->dev
->bs
, &req
->acct
);
106 virtio_blk_free_request(req
);
109 static VirtIOBlockReq
*virtio_blk_get_request(VirtIOBlock
*s
)
111 VirtIOBlockReq
*req
= virtio_blk_alloc_request(s
);
113 if (!virtqueue_pop(s
->vq
, req
->elem
)) {
114 virtio_blk_free_request(req
);
121 int virtio_blk_handle_scsi_req(VirtIOBlock
*blk
,
122 VirtQueueElement
*elem
)
124 int status
= VIRTIO_BLK_S_OK
;
125 struct virtio_scsi_inhdr
*scsi
= NULL
;
128 struct sg_io_hdr hdr
;
132 * We require at least one output segment each for the virtio_blk_outhdr
133 * and the SCSI command block.
135 * We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr
136 * and the sense buffer pointer in the input segments.
138 if (elem
->out_num
< 2 || elem
->in_num
< 3) {
139 status
= VIRTIO_BLK_S_IOERR
;
144 * The scsi inhdr is placed in the second-to-last input segment, just
145 * before the regular inhdr.
147 scsi
= (void *)elem
->in_sg
[elem
->in_num
- 2].iov_base
;
149 if (!blk
->blk
.scsi
) {
150 status
= VIRTIO_BLK_S_UNSUPP
;
155 * No support for bidirection commands yet.
157 if (elem
->out_num
> 2 && elem
->in_num
> 3) {
158 status
= VIRTIO_BLK_S_UNSUPP
;
163 memset(&hdr
, 0, sizeof(struct sg_io_hdr
));
164 hdr
.interface_id
= 'S';
165 hdr
.cmd_len
= elem
->out_sg
[1].iov_len
;
166 hdr
.cmdp
= elem
->out_sg
[1].iov_base
;
169 if (elem
->out_num
> 2) {
171 * If there are more than the minimally required 2 output segments
172 * there is write payload starting from the third iovec.
174 hdr
.dxfer_direction
= SG_DXFER_TO_DEV
;
175 hdr
.iovec_count
= elem
->out_num
- 2;
177 for (i
= 0; i
< hdr
.iovec_count
; i
++)
178 hdr
.dxfer_len
+= elem
->out_sg
[i
+ 2].iov_len
;
180 hdr
.dxferp
= elem
->out_sg
+ 2;
182 } else if (elem
->in_num
> 3) {
184 * If we have more than 3 input segments the guest wants to actually
187 hdr
.dxfer_direction
= SG_DXFER_FROM_DEV
;
188 hdr
.iovec_count
= elem
->in_num
- 3;
189 for (i
= 0; i
< hdr
.iovec_count
; i
++)
190 hdr
.dxfer_len
+= elem
->in_sg
[i
].iov_len
;
192 hdr
.dxferp
= elem
->in_sg
;
195 * Some SCSI commands don't actually transfer any data.
197 hdr
.dxfer_direction
= SG_DXFER_NONE
;
200 hdr
.sbp
= elem
->in_sg
[elem
->in_num
- 3].iov_base
;
201 hdr
.mx_sb_len
= elem
->in_sg
[elem
->in_num
- 3].iov_len
;
203 status
= bdrv_ioctl(blk
->bs
, SG_IO
, &hdr
);
205 status
= VIRTIO_BLK_S_UNSUPP
;
210 * From SCSI-Generic-HOWTO: "Some lower level drivers (e.g. ide-scsi)
211 * clear the masked_status field [hence status gets cleared too, see
212 * block/scsi_ioctl.c] even when a CHECK_CONDITION or COMMAND_TERMINATED
213 * status has occurred. However they do set DRIVER_SENSE in driver_status
214 * field. Also a (sb_len_wr > 0) indicates there is a sense buffer.
216 if (hdr
.status
== 0 && hdr
.sb_len_wr
> 0) {
217 hdr
.status
= CHECK_CONDITION
;
221 hdr
.status
| (hdr
.msg_status
<< 8) |
222 (hdr
.host_status
<< 16) | (hdr
.driver_status
<< 24));
223 stl_p(&scsi
->residual
, hdr
.resid
);
224 stl_p(&scsi
->sense_len
, hdr
.sb_len_wr
);
225 stl_p(&scsi
->data_len
, hdr
.dxfer_len
);
233 /* Just put anything nonzero so that the ioctl fails in the guest. */
235 stl_p(&scsi
->errors
, 255);
240 static void virtio_blk_handle_scsi(VirtIOBlockReq
*req
)
244 status
= virtio_blk_handle_scsi_req(req
->dev
, req
->elem
);
245 virtio_blk_req_complete(req
, status
);
246 virtio_blk_free_request(req
);
249 typedef struct MultiReqBuffer
{
250 BlockRequest blkreq
[32];
251 unsigned int num_writes
;
254 static void virtio_submit_multiwrite(BlockDriverState
*bs
, MultiReqBuffer
*mrb
)
258 if (!mrb
->num_writes
) {
262 ret
= bdrv_aio_multiwrite(bs
, mrb
->blkreq
, mrb
->num_writes
);
264 for (i
= 0; i
< mrb
->num_writes
; i
++) {
265 if (mrb
->blkreq
[i
].error
) {
266 virtio_blk_rw_complete(mrb
->blkreq
[i
].opaque
, -EIO
);
274 static void virtio_blk_handle_flush(VirtIOBlockReq
*req
, MultiReqBuffer
*mrb
)
276 bdrv_acct_start(req
->dev
->bs
, &req
->acct
, 0, BDRV_ACCT_FLUSH
);
279 * Make sure all outstanding writes are posted to the backing device.
281 virtio_submit_multiwrite(req
->dev
->bs
, mrb
);
282 bdrv_aio_flush(req
->dev
->bs
, virtio_blk_flush_complete
, req
);
285 static void virtio_blk_handle_write(VirtIOBlockReq
*req
, MultiReqBuffer
*mrb
)
287 BlockRequest
*blkreq
;
290 sector
= ldq_p(&req
->out
->sector
);
292 bdrv_acct_start(req
->dev
->bs
, &req
->acct
, req
->qiov
.size
, BDRV_ACCT_WRITE
);
294 trace_virtio_blk_handle_write(req
, sector
, req
->qiov
.size
/ 512);
296 if (sector
& req
->dev
->sector_mask
) {
297 virtio_blk_rw_complete(req
, -EIO
);
300 if (req
->qiov
.size
% req
->dev
->conf
->logical_block_size
) {
301 virtio_blk_rw_complete(req
, -EIO
);
305 if (mrb
->num_writes
== 32) {
306 virtio_submit_multiwrite(req
->dev
->bs
, mrb
);
309 blkreq
= &mrb
->blkreq
[mrb
->num_writes
];
310 blkreq
->sector
= sector
;
311 blkreq
->nb_sectors
= req
->qiov
.size
/ BDRV_SECTOR_SIZE
;
312 blkreq
->qiov
= &req
->qiov
;
313 blkreq
->cb
= virtio_blk_rw_complete
;
314 blkreq
->opaque
= req
;
320 static void virtio_blk_handle_read(VirtIOBlockReq
*req
)
324 sector
= ldq_p(&req
->out
->sector
);
326 bdrv_acct_start(req
->dev
->bs
, &req
->acct
, req
->qiov
.size
, BDRV_ACCT_READ
);
328 trace_virtio_blk_handle_read(req
, sector
, req
->qiov
.size
/ 512);
330 if (sector
& req
->dev
->sector_mask
) {
331 virtio_blk_rw_complete(req
, -EIO
);
334 if (req
->qiov
.size
% req
->dev
->conf
->logical_block_size
) {
335 virtio_blk_rw_complete(req
, -EIO
);
338 bdrv_aio_readv(req
->dev
->bs
, sector
, &req
->qiov
,
339 req
->qiov
.size
/ BDRV_SECTOR_SIZE
,
340 virtio_blk_rw_complete
, req
);
343 static void virtio_blk_handle_request(VirtIOBlockReq
*req
,
348 if (req
->elem
->out_num
< 1 || req
->elem
->in_num
< 1) {
349 error_report("virtio-blk missing headers");
353 if (req
->elem
->out_sg
[0].iov_len
< sizeof(*req
->out
) ||
354 req
->elem
->in_sg
[req
->elem
->in_num
- 1].iov_len
< sizeof(*req
->in
)) {
355 error_report("virtio-blk header not in correct element");
359 req
->out
= (void *)req
->elem
->out_sg
[0].iov_base
;
360 req
->in
= (void *)req
->elem
->in_sg
[req
->elem
->in_num
- 1].iov_base
;
362 type
= ldl_p(&req
->out
->type
);
364 if (type
& VIRTIO_BLK_T_FLUSH
) {
365 virtio_blk_handle_flush(req
, mrb
);
366 } else if (type
& VIRTIO_BLK_T_SCSI_CMD
) {
367 virtio_blk_handle_scsi(req
);
368 } else if (type
& VIRTIO_BLK_T_GET_ID
) {
369 VirtIOBlock
*s
= req
->dev
;
372 * NB: per existing s/n string convention the string is
373 * terminated by '\0' only when shorter than buffer.
375 strncpy(req
->elem
->in_sg
[0].iov_base
,
376 s
->blk
.serial
? s
->blk
.serial
: "",
377 MIN(req
->elem
->in_sg
[0].iov_len
, VIRTIO_BLK_ID_BYTES
));
378 virtio_blk_req_complete(req
, VIRTIO_BLK_S_OK
);
379 virtio_blk_free_request(req
);
380 } else if (type
& VIRTIO_BLK_T_OUT
) {
381 qemu_iovec_init_external(&req
->qiov
, &req
->elem
->out_sg
[1],
382 req
->elem
->out_num
- 1);
383 virtio_blk_handle_write(req
, mrb
);
384 } else if (type
== VIRTIO_BLK_T_IN
|| type
== VIRTIO_BLK_T_BARRIER
) {
385 /* VIRTIO_BLK_T_IN is 0, so we can't just & it. */
386 qemu_iovec_init_external(&req
->qiov
, &req
->elem
->in_sg
[0],
387 req
->elem
->in_num
- 1);
388 virtio_blk_handle_read(req
);
390 virtio_blk_req_complete(req
, VIRTIO_BLK_S_UNSUPP
);
391 virtio_blk_free_request(req
);
395 static void virtio_blk_handle_output(VirtIODevice
*vdev
, VirtQueue
*vq
)
397 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
399 MultiReqBuffer mrb
= {
403 #ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
404 /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
405 * dataplane here instead of waiting for .set_status().
408 virtio_blk_data_plane_start(s
->dataplane
);
413 while ((req
= virtio_blk_get_request(s
))) {
414 virtio_blk_handle_request(req
, &mrb
);
417 virtio_submit_multiwrite(s
->bs
, &mrb
);
420 * FIXME: Want to check for completions before returning to guest mode,
421 * so cached reads and writes are reported as quickly as possible. But
422 * that should be done in the generic block layer.
426 static void virtio_blk_dma_restart_bh(void *opaque
)
428 VirtIOBlock
*s
= opaque
;
429 VirtIOBlockReq
*req
= s
->rq
;
430 MultiReqBuffer mrb
= {
434 qemu_bh_delete(s
->bh
);
440 virtio_blk_handle_request(req
, &mrb
);
444 virtio_submit_multiwrite(s
->bs
, &mrb
);
447 static void virtio_blk_dma_restart_cb(void *opaque
, int running
,
450 VirtIOBlock
*s
= opaque
;
457 s
->bh
= qemu_bh_new(virtio_blk_dma_restart_bh
, s
);
458 qemu_bh_schedule(s
->bh
);
462 static void virtio_blk_reset(VirtIODevice
*vdev
)
464 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
466 #ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
468 virtio_blk_data_plane_stop(s
->dataplane
);
473 * This should cancel pending requests, but can't do nicely until there
474 * are per-device request lists.
477 bdrv_set_enable_write_cache(s
->bs
, s
->original_wce
);
480 /* coalesce internal state, copy to pci i/o region 0
482 static void virtio_blk_update_config(VirtIODevice
*vdev
, uint8_t *config
)
484 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
485 struct virtio_blk_config blkcfg
;
487 int blk_size
= s
->conf
->logical_block_size
;
489 bdrv_get_geometry(s
->bs
, &capacity
);
490 memset(&blkcfg
, 0, sizeof(blkcfg
));
491 stq_p(&blkcfg
.capacity
, capacity
);
492 stl_p(&blkcfg
.seg_max
, 128 - 2);
493 stw_p(&blkcfg
.cylinders
, s
->conf
->cyls
);
494 stl_p(&blkcfg
.blk_size
, blk_size
);
495 stw_p(&blkcfg
.min_io_size
, s
->conf
->min_io_size
/ blk_size
);
496 stw_p(&blkcfg
.opt_io_size
, s
->conf
->opt_io_size
/ blk_size
);
497 blkcfg
.heads
= s
->conf
->heads
;
499 * We must ensure that the block device capacity is a multiple of
500 * the logical block size. If that is not the case, let's use
501 * sector_mask to adopt the geometry to have a correct picture.
502 * For those devices where the capacity is ok for the given geometry
503 * we don't touch the sector value of the geometry, since some devices
504 * (like s390 dasd) need a specific value. Here the capacity is already
505 * cyls*heads*secs*blk_size and the sector value is not block size
506 * divided by 512 - instead it is the amount of blk_size blocks
507 * per track (cylinder).
509 if (bdrv_getlength(s
->bs
) / s
->conf
->heads
/ s
->conf
->secs
% blk_size
) {
510 blkcfg
.sectors
= s
->conf
->secs
& ~s
->sector_mask
;
512 blkcfg
.sectors
= s
->conf
->secs
;
515 blkcfg
.physical_block_exp
= get_physical_block_exp(s
->conf
);
516 blkcfg
.alignment_offset
= 0;
517 blkcfg
.wce
= bdrv_enable_write_cache(s
->bs
);
518 memcpy(config
, &blkcfg
, sizeof(struct virtio_blk_config
));
521 static void virtio_blk_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
523 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
524 struct virtio_blk_config blkcfg
;
526 memcpy(&blkcfg
, config
, sizeof(blkcfg
));
528 aio_context_acquire(bdrv_get_aio_context(s
->bs
));
529 bdrv_set_enable_write_cache(s
->bs
, blkcfg
.wce
!= 0);
530 aio_context_release(bdrv_get_aio_context(s
->bs
));
533 static uint32_t virtio_blk_get_features(VirtIODevice
*vdev
, uint32_t features
)
535 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
537 features
|= (1 << VIRTIO_BLK_F_SEG_MAX
);
538 features
|= (1 << VIRTIO_BLK_F_GEOMETRY
);
539 features
|= (1 << VIRTIO_BLK_F_TOPOLOGY
);
540 features
|= (1 << VIRTIO_BLK_F_BLK_SIZE
);
541 features
|= (1 << VIRTIO_BLK_F_SCSI
);
543 if (s
->blk
.config_wce
) {
544 features
|= (1 << VIRTIO_BLK_F_CONFIG_WCE
);
546 if (bdrv_enable_write_cache(s
->bs
))
547 features
|= (1 << VIRTIO_BLK_F_WCE
);
549 if (bdrv_is_read_only(s
->bs
))
550 features
|= 1 << VIRTIO_BLK_F_RO
;
555 static void virtio_blk_set_status(VirtIODevice
*vdev
, uint8_t status
)
557 VirtIOBlock
*s
= VIRTIO_BLK(vdev
);
560 #ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
561 if (s
->dataplane
&& !(status
& (VIRTIO_CONFIG_S_DRIVER
|
562 VIRTIO_CONFIG_S_DRIVER_OK
))) {
563 virtio_blk_data_plane_stop(s
->dataplane
);
567 if (!(status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
571 features
= vdev
->guest_features
;
573 /* A guest that supports VIRTIO_BLK_F_CONFIG_WCE must be able to send
574 * cache flushes. Thus, the "auto writethrough" behavior is never
575 * necessary for guests that support the VIRTIO_BLK_F_CONFIG_WCE feature.
576 * Leaving it enabled would break the following sequence:
578 * Guest started with "-drive cache=writethrough"
579 * Guest sets status to 0
580 * Guest sets DRIVER bit in status field
581 * Guest reads host features (WCE=0, CONFIG_WCE=1)
582 * Guest writes guest features (WCE=0, CONFIG_WCE=1)
583 * Guest writes 1 to the WCE configuration field (writeback mode)
584 * Guest sets DRIVER_OK bit in status field
586 * s->bs would erroneously be placed in writethrough mode.
588 if (!(features
& (1 << VIRTIO_BLK_F_CONFIG_WCE
))) {
589 aio_context_acquire(bdrv_get_aio_context(s
->bs
));
590 bdrv_set_enable_write_cache(s
->bs
,
591 !!(features
& (1 << VIRTIO_BLK_F_WCE
)));
592 aio_context_release(bdrv_get_aio_context(s
->bs
));
596 static void virtio_blk_save(QEMUFile
*f
, void *opaque
)
598 VirtIOBlock
*s
= opaque
;
599 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
600 VirtIOBlockReq
*req
= s
->rq
;
602 virtio_save(vdev
, f
);
605 qemu_put_sbyte(f
, 1);
606 qemu_put_buffer(f
, (unsigned char *)req
->elem
,
607 sizeof(VirtQueueElement
));
610 qemu_put_sbyte(f
, 0);
613 static int virtio_blk_load(QEMUFile
*f
, void *opaque
, int version_id
)
615 VirtIOBlock
*s
= opaque
;
616 VirtIODevice
*vdev
= VIRTIO_DEVICE(s
);
622 ret
= virtio_load(vdev
, f
);
627 while (qemu_get_sbyte(f
)) {
628 VirtIOBlockReq
*req
= virtio_blk_alloc_request(s
);
629 qemu_get_buffer(f
, (unsigned char *)req
->elem
,
630 sizeof(VirtQueueElement
));
634 virtqueue_map_sg(req
->elem
->in_sg
, req
->elem
->in_addr
,
635 req
->elem
->in_num
, 1);
636 virtqueue_map_sg(req
->elem
->out_sg
, req
->elem
->out_addr
,
637 req
->elem
->out_num
, 0);
643 static void virtio_blk_resize(void *opaque
)
645 VirtIODevice
*vdev
= VIRTIO_DEVICE(opaque
);
647 virtio_notify_config(vdev
);
650 static const BlockDevOps virtio_block_ops
= {
651 .resize_cb
= virtio_blk_resize
,
654 void virtio_blk_set_conf(DeviceState
*dev
, VirtIOBlkConf
*blk
)
656 VirtIOBlock
*s
= VIRTIO_BLK(dev
);
657 memcpy(&(s
->blk
), blk
, sizeof(struct VirtIOBlkConf
));
660 #ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
661 /* Disable dataplane thread during live migration since it does not
662 * update the dirty memory bitmap yet.
664 static void virtio_blk_migration_state_changed(Notifier
*notifier
, void *data
)
666 VirtIOBlock
*s
= container_of(notifier
, VirtIOBlock
,
667 migration_state_notifier
);
668 MigrationState
*mig
= data
;
671 if (migration_in_setup(mig
)) {
675 virtio_blk_data_plane_destroy(s
->dataplane
);
677 } else if (migration_has_finished(mig
) ||
678 migration_has_failed(mig
)) {
682 bdrv_drain_all(); /* complete in-flight non-dataplane requests */
683 virtio_blk_data_plane_create(VIRTIO_DEVICE(s
), &s
->blk
,
684 &s
->dataplane
, &err
);
686 error_report("%s", error_get_pretty(err
));
691 #endif /* CONFIG_VIRTIO_BLK_DATA_PLANE */
693 static void virtio_blk_device_realize(DeviceState
*dev
, Error
**errp
)
695 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
696 VirtIOBlock
*s
= VIRTIO_BLK(dev
);
697 VirtIOBlkConf
*blk
= &(s
->blk
);
698 #ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
701 static int virtio_blk_id
;
704 error_setg(errp
, "drive property not set");
707 if (!bdrv_is_inserted(blk
->conf
.bs
)) {
708 error_setg(errp
, "Device needs media, but drive is empty");
712 blkconf_serial(&blk
->conf
, &blk
->serial
);
713 s
->original_wce
= bdrv_enable_write_cache(blk
->conf
.bs
);
714 if (blkconf_geometry(&blk
->conf
, NULL
, 65535, 255, 255) < 0) {
715 error_setg(errp
, "Error setting geometry");
719 virtio_init(vdev
, "virtio-blk", VIRTIO_ID_BLOCK
,
720 sizeof(struct virtio_blk_config
));
722 s
->bs
= blk
->conf
.bs
;
723 s
->conf
= &blk
->conf
;
725 s
->sector_mask
= (s
->conf
->logical_block_size
/ BDRV_SECTOR_SIZE
) - 1;
727 s
->vq
= virtio_add_queue(vdev
, 128, virtio_blk_handle_output
);
728 #ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
729 virtio_blk_data_plane_create(vdev
, blk
, &s
->dataplane
, &err
);
731 error_propagate(errp
, err
);
732 virtio_cleanup(vdev
);
735 s
->migration_state_notifier
.notify
= virtio_blk_migration_state_changed
;
736 add_migration_state_change_notifier(&s
->migration_state_notifier
);
739 s
->change
= qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb
, s
);
740 register_savevm(dev
, "virtio-blk", virtio_blk_id
++, 2,
741 virtio_blk_save
, virtio_blk_load
, s
);
742 bdrv_set_dev_ops(s
->bs
, &virtio_block_ops
, s
);
743 bdrv_set_guest_block_size(s
->bs
, s
->conf
->logical_block_size
);
745 bdrv_iostatus_enable(s
->bs
);
747 add_boot_device_path(s
->conf
->bootindex
, dev
, "/disk@0,0");
750 static void virtio_blk_device_unrealize(DeviceState
*dev
, Error
**errp
)
752 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
753 VirtIOBlock
*s
= VIRTIO_BLK(dev
);
755 #ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
756 remove_migration_state_change_notifier(&s
->migration_state_notifier
);
757 virtio_blk_data_plane_destroy(s
->dataplane
);
760 qemu_del_vm_change_state_handler(s
->change
);
761 unregister_savevm(dev
, "virtio-blk", s
);
762 blockdev_mark_auto_del(s
->bs
);
763 virtio_cleanup(vdev
);
766 static Property virtio_blk_properties
[] = {
767 DEFINE_VIRTIO_BLK_PROPERTIES(VirtIOBlock
, blk
),
768 DEFINE_PROP_END_OF_LIST(),
771 static void virtio_blk_class_init(ObjectClass
*klass
, void *data
)
773 DeviceClass
*dc
= DEVICE_CLASS(klass
);
774 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
776 dc
->props
= virtio_blk_properties
;
777 set_bit(DEVICE_CATEGORY_STORAGE
, dc
->categories
);
778 vdc
->realize
= virtio_blk_device_realize
;
779 vdc
->unrealize
= virtio_blk_device_unrealize
;
780 vdc
->get_config
= virtio_blk_update_config
;
781 vdc
->set_config
= virtio_blk_set_config
;
782 vdc
->get_features
= virtio_blk_get_features
;
783 vdc
->set_status
= virtio_blk_set_status
;
784 vdc
->reset
= virtio_blk_reset
;
787 static const TypeInfo virtio_device_info
= {
788 .name
= TYPE_VIRTIO_BLK
,
789 .parent
= TYPE_VIRTIO_DEVICE
,
790 .instance_size
= sizeof(VirtIOBlock
),
791 .class_init
= virtio_blk_class_init
,
794 static void virtio_register_types(void)
796 type_register_static(&virtio_device_info
);
799 type_init(virtio_register_types
)