4 * Copyright (c) 2011 Linaro Limited
7 * Peter Maydell <peter.maydell@linaro.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
23 #include "standard-headers/linux/virtio_mmio.h"
25 #include "hw/qdev-properties.h"
26 #include "hw/sysbus.h"
27 #include "hw/virtio/virtio.h"
28 #include "migration/qemu-file-types.h"
29 #include "qemu/host-utils.h"
30 #include "qemu/module.h"
31 #include "sysemu/kvm.h"
32 #include "hw/virtio/virtio-bus.h"
33 #include "qemu/error-report.h"
39 #define TYPE_VIRTIO_MMIO_BUS "virtio-mmio-bus"
40 #define VIRTIO_MMIO_BUS(obj) \
41 OBJECT_CHECK(VirtioBusState, (obj), TYPE_VIRTIO_MMIO_BUS)
42 #define VIRTIO_MMIO_BUS_GET_CLASS(obj) \
43 OBJECT_GET_CLASS(VirtioBusClass, (obj), TYPE_VIRTIO_MMIO_BUS)
44 #define VIRTIO_MMIO_BUS_CLASS(klass) \
45 OBJECT_CLASS_CHECK(VirtioBusClass, (klass), TYPE_VIRTIO_MMIO_BUS)
48 #define TYPE_VIRTIO_MMIO "virtio-mmio"
49 #define VIRTIO_MMIO(obj) \
50 OBJECT_CHECK(VirtIOMMIOProxy, (obj), TYPE_VIRTIO_MMIO)
52 #define VIRT_MAGIC 0x74726976 /* 'virt' */
53 #define VIRT_VERSION 2
54 #define VIRT_VERSION_LEGACY 1
55 #define VIRT_VENDOR 0x554D4551 /* 'QEMU' */
57 typedef struct VirtIOMMIOQueue
{
67 SysBusDevice parent_obj
;
71 /* Guest accessible state needing migration and reset */
72 uint32_t host_features_sel
;
73 uint32_t guest_features_sel
;
74 uint32_t guest_page_shift
;
77 bool format_transport_address
;
78 /* Fields only used for non-legacy (v2) devices */
79 uint32_t guest_features
[2];
80 VirtIOMMIOQueue vqs
[VIRTIO_QUEUE_MAX
];
83 static bool virtio_mmio_ioeventfd_enabled(DeviceState
*d
)
85 return kvm_eventfds_enabled();
88 static int virtio_mmio_ioeventfd_assign(DeviceState
*d
,
89 EventNotifier
*notifier
,
92 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
95 memory_region_add_eventfd(&proxy
->iomem
, VIRTIO_MMIO_QUEUE_NOTIFY
, 4,
98 memory_region_del_eventfd(&proxy
->iomem
, VIRTIO_MMIO_QUEUE_NOTIFY
, 4,
104 static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy
*proxy
)
106 virtio_bus_start_ioeventfd(&proxy
->bus
);
109 static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy
*proxy
)
111 virtio_bus_stop_ioeventfd(&proxy
->bus
);
114 static uint64_t virtio_mmio_read(void *opaque
, hwaddr offset
, unsigned size
)
116 VirtIOMMIOProxy
*proxy
= (VirtIOMMIOProxy
*)opaque
;
117 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
119 trace_virtio_mmio_read(offset
);
122 /* If no backend is present, we treat most registers as
123 * read-as-zero, except for the magic number, version and
124 * vendor ID. This is not strictly sanctioned by the virtio
125 * spec, but it allows us to provide transports with no backend
126 * plugged in which don't confuse Linux's virtio code: the
127 * probe won't complain about the bad magic number, but the
128 * device ID of zero means no backend will claim it.
131 case VIRTIO_MMIO_MAGIC_VALUE
:
133 case VIRTIO_MMIO_VERSION
:
135 return VIRT_VERSION_LEGACY
;
139 case VIRTIO_MMIO_VENDOR_ID
:
146 if (offset
>= VIRTIO_MMIO_CONFIG
) {
147 offset
-= VIRTIO_MMIO_CONFIG
;
150 return virtio_config_readb(vdev
, offset
);
152 return virtio_config_readw(vdev
, offset
);
154 return virtio_config_readl(vdev
, offset
);
160 qemu_log_mask(LOG_GUEST_ERROR
,
161 "%s: wrong size access to register!\n",
166 case VIRTIO_MMIO_MAGIC_VALUE
:
168 case VIRTIO_MMIO_VERSION
:
170 return VIRT_VERSION_LEGACY
;
174 case VIRTIO_MMIO_DEVICE_ID
:
175 return vdev
->device_id
;
176 case VIRTIO_MMIO_VENDOR_ID
:
178 case VIRTIO_MMIO_DEVICE_FEATURES
:
180 if (proxy
->host_features_sel
) {
183 return vdev
->host_features
;
186 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
187 return (vdev
->host_features
& ~vdc
->legacy_features
)
188 >> (32 * proxy
->host_features_sel
);
190 case VIRTIO_MMIO_QUEUE_NUM_MAX
:
191 if (!virtio_queue_get_num(vdev
, vdev
->queue_sel
)) {
194 return VIRTQUEUE_MAX_SIZE
;
195 case VIRTIO_MMIO_QUEUE_PFN
:
196 if (!proxy
->legacy
) {
197 qemu_log_mask(LOG_GUEST_ERROR
,
198 "%s: read from legacy register (0x%"
199 HWADDR_PRIx
") in non-legacy mode\n",
203 return virtio_queue_get_addr(vdev
, vdev
->queue_sel
)
204 >> proxy
->guest_page_shift
;
205 case VIRTIO_MMIO_QUEUE_READY
:
207 qemu_log_mask(LOG_GUEST_ERROR
,
208 "%s: read from non-legacy register (0x%"
209 HWADDR_PRIx
") in legacy mode\n",
213 return proxy
->vqs
[vdev
->queue_sel
].enabled
;
214 case VIRTIO_MMIO_INTERRUPT_STATUS
:
215 return atomic_read(&vdev
->isr
);
216 case VIRTIO_MMIO_STATUS
:
218 case VIRTIO_MMIO_CONFIG_GENERATION
:
220 qemu_log_mask(LOG_GUEST_ERROR
,
221 "%s: read from non-legacy register (0x%"
222 HWADDR_PRIx
") in legacy mode\n",
226 return vdev
->generation
;
227 case VIRTIO_MMIO_DEVICE_FEATURES_SEL
:
228 case VIRTIO_MMIO_DRIVER_FEATURES
:
229 case VIRTIO_MMIO_DRIVER_FEATURES_SEL
:
230 case VIRTIO_MMIO_GUEST_PAGE_SIZE
:
231 case VIRTIO_MMIO_QUEUE_SEL
:
232 case VIRTIO_MMIO_QUEUE_NUM
:
233 case VIRTIO_MMIO_QUEUE_ALIGN
:
234 case VIRTIO_MMIO_QUEUE_NOTIFY
:
235 case VIRTIO_MMIO_INTERRUPT_ACK
:
236 case VIRTIO_MMIO_QUEUE_DESC_LOW
:
237 case VIRTIO_MMIO_QUEUE_DESC_HIGH
:
238 case VIRTIO_MMIO_QUEUE_AVAIL_LOW
:
239 case VIRTIO_MMIO_QUEUE_AVAIL_HIGH
:
240 case VIRTIO_MMIO_QUEUE_USED_LOW
:
241 case VIRTIO_MMIO_QUEUE_USED_HIGH
:
242 qemu_log_mask(LOG_GUEST_ERROR
,
243 "%s: read of write-only register (0x%" HWADDR_PRIx
")\n",
247 qemu_log_mask(LOG_GUEST_ERROR
,
248 "%s: bad register offset (0x%" HWADDR_PRIx
")\n",
255 static void virtio_mmio_write(void *opaque
, hwaddr offset
, uint64_t value
,
258 VirtIOMMIOProxy
*proxy
= (VirtIOMMIOProxy
*)opaque
;
259 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
261 trace_virtio_mmio_write_offset(offset
, value
);
264 /* If no backend is present, we just make all registers
265 * write-ignored. This allows us to provide transports with
266 * no backend plugged in.
271 if (offset
>= VIRTIO_MMIO_CONFIG
) {
272 offset
-= VIRTIO_MMIO_CONFIG
;
275 virtio_config_writeb(vdev
, offset
, value
);
278 virtio_config_writew(vdev
, offset
, value
);
281 virtio_config_writel(vdev
, offset
, value
);
289 qemu_log_mask(LOG_GUEST_ERROR
,
290 "%s: wrong size access to register!\n",
295 case VIRTIO_MMIO_DEVICE_FEATURES_SEL
:
297 proxy
->host_features_sel
= 1;
299 proxy
->host_features_sel
= 0;
302 case VIRTIO_MMIO_DRIVER_FEATURES
:
304 if (proxy
->guest_features_sel
) {
305 qemu_log_mask(LOG_GUEST_ERROR
,
306 "%s: attempt to write guest features with "
307 "guest_features_sel > 0 in legacy mode\n",
310 virtio_set_features(vdev
, value
);
313 proxy
->guest_features
[proxy
->guest_features_sel
] = value
;
316 case VIRTIO_MMIO_DRIVER_FEATURES_SEL
:
318 proxy
->guest_features_sel
= 1;
320 proxy
->guest_features_sel
= 0;
323 case VIRTIO_MMIO_GUEST_PAGE_SIZE
:
324 if (!proxy
->legacy
) {
325 qemu_log_mask(LOG_GUEST_ERROR
,
326 "%s: write to legacy register (0x%"
327 HWADDR_PRIx
") in non-legacy mode\n",
331 proxy
->guest_page_shift
= ctz32(value
);
332 if (proxy
->guest_page_shift
> 31) {
333 proxy
->guest_page_shift
= 0;
335 trace_virtio_mmio_guest_page(value
, proxy
->guest_page_shift
);
337 case VIRTIO_MMIO_QUEUE_SEL
:
338 if (value
< VIRTIO_QUEUE_MAX
) {
339 vdev
->queue_sel
= value
;
342 case VIRTIO_MMIO_QUEUE_NUM
:
343 trace_virtio_mmio_queue_write(value
, VIRTQUEUE_MAX_SIZE
);
345 virtio_queue_set_num(vdev
, vdev
->queue_sel
, value
);
346 virtio_queue_update_rings(vdev
, vdev
->queue_sel
);
348 proxy
->vqs
[vdev
->queue_sel
].num
= value
;
351 case VIRTIO_MMIO_QUEUE_ALIGN
:
352 if (!proxy
->legacy
) {
353 qemu_log_mask(LOG_GUEST_ERROR
,
354 "%s: write to legacy register (0x%"
355 HWADDR_PRIx
") in non-legacy mode\n",
359 virtio_queue_set_align(vdev
, vdev
->queue_sel
, value
);
361 case VIRTIO_MMIO_QUEUE_PFN
:
362 if (!proxy
->legacy
) {
363 qemu_log_mask(LOG_GUEST_ERROR
,
364 "%s: write to legacy register (0x%"
365 HWADDR_PRIx
") in non-legacy mode\n",
372 virtio_queue_set_addr(vdev
, vdev
->queue_sel
,
373 value
<< proxy
->guest_page_shift
);
376 case VIRTIO_MMIO_QUEUE_READY
:
378 qemu_log_mask(LOG_GUEST_ERROR
,
379 "%s: write to non-legacy register (0x%"
380 HWADDR_PRIx
") in legacy mode\n",
385 virtio_queue_set_num(vdev
, vdev
->queue_sel
,
386 proxy
->vqs
[vdev
->queue_sel
].num
);
387 virtio_queue_set_rings(vdev
, vdev
->queue_sel
,
388 ((uint64_t)proxy
->vqs
[vdev
->queue_sel
].desc
[1]) << 32 |
389 proxy
->vqs
[vdev
->queue_sel
].desc
[0],
390 ((uint64_t)proxy
->vqs
[vdev
->queue_sel
].avail
[1]) << 32 |
391 proxy
->vqs
[vdev
->queue_sel
].avail
[0],
392 ((uint64_t)proxy
->vqs
[vdev
->queue_sel
].used
[1]) << 32 |
393 proxy
->vqs
[vdev
->queue_sel
].used
[0]);
394 proxy
->vqs
[vdev
->queue_sel
].enabled
= 1;
396 proxy
->vqs
[vdev
->queue_sel
].enabled
= 0;
399 case VIRTIO_MMIO_QUEUE_NOTIFY
:
400 if (value
< VIRTIO_QUEUE_MAX
) {
401 virtio_queue_notify(vdev
, value
);
404 case VIRTIO_MMIO_INTERRUPT_ACK
:
405 atomic_and(&vdev
->isr
, ~value
);
406 virtio_update_irq(vdev
);
408 case VIRTIO_MMIO_STATUS
:
409 if (!(value
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
410 virtio_mmio_stop_ioeventfd(proxy
);
413 if (!proxy
->legacy
&& (value
& VIRTIO_CONFIG_S_FEATURES_OK
)) {
414 virtio_set_features(vdev
,
415 ((uint64_t)proxy
->guest_features
[1]) << 32 |
416 proxy
->guest_features
[0]);
419 virtio_set_status(vdev
, value
& 0xff);
421 if (value
& VIRTIO_CONFIG_S_DRIVER_OK
) {
422 virtio_mmio_start_ioeventfd(proxy
);
425 if (vdev
->status
== 0) {
429 case VIRTIO_MMIO_QUEUE_DESC_LOW
:
431 qemu_log_mask(LOG_GUEST_ERROR
,
432 "%s: write to non-legacy register (0x%"
433 HWADDR_PRIx
") in legacy mode\n",
437 proxy
->vqs
[vdev
->queue_sel
].desc
[0] = value
;
439 case VIRTIO_MMIO_QUEUE_DESC_HIGH
:
441 qemu_log_mask(LOG_GUEST_ERROR
,
442 "%s: write to non-legacy register (0x%"
443 HWADDR_PRIx
") in legacy mode\n",
447 proxy
->vqs
[vdev
->queue_sel
].desc
[1] = value
;
449 case VIRTIO_MMIO_QUEUE_AVAIL_LOW
:
451 qemu_log_mask(LOG_GUEST_ERROR
,
452 "%s: write to non-legacy register (0x%"
453 HWADDR_PRIx
") in legacy mode\n",
457 proxy
->vqs
[vdev
->queue_sel
].avail
[0] = value
;
459 case VIRTIO_MMIO_QUEUE_AVAIL_HIGH
:
461 qemu_log_mask(LOG_GUEST_ERROR
,
462 "%s: write to non-legacy register (0x%"
463 HWADDR_PRIx
") in legacy mode\n",
467 proxy
->vqs
[vdev
->queue_sel
].avail
[1] = value
;
469 case VIRTIO_MMIO_QUEUE_USED_LOW
:
471 qemu_log_mask(LOG_GUEST_ERROR
,
472 "%s: write to non-legacy register (0x%"
473 HWADDR_PRIx
") in legacy mode\n",
477 proxy
->vqs
[vdev
->queue_sel
].used
[0] = value
;
479 case VIRTIO_MMIO_QUEUE_USED_HIGH
:
481 qemu_log_mask(LOG_GUEST_ERROR
,
482 "%s: write to non-legacy register (0x%"
483 HWADDR_PRIx
") in legacy mode\n",
487 proxy
->vqs
[vdev
->queue_sel
].used
[1] = value
;
489 case VIRTIO_MMIO_MAGIC_VALUE
:
490 case VIRTIO_MMIO_VERSION
:
491 case VIRTIO_MMIO_DEVICE_ID
:
492 case VIRTIO_MMIO_VENDOR_ID
:
493 case VIRTIO_MMIO_DEVICE_FEATURES
:
494 case VIRTIO_MMIO_QUEUE_NUM_MAX
:
495 case VIRTIO_MMIO_INTERRUPT_STATUS
:
496 case VIRTIO_MMIO_CONFIG_GENERATION
:
497 qemu_log_mask(LOG_GUEST_ERROR
,
498 "%s: write to read-only register (0x%" HWADDR_PRIx
")\n",
503 qemu_log_mask(LOG_GUEST_ERROR
,
504 "%s: bad register offset (0x%" HWADDR_PRIx
")\n",
509 static const MemoryRegionOps virtio_legacy_mem_ops
= {
510 .read
= virtio_mmio_read
,
511 .write
= virtio_mmio_write
,
512 .endianness
= DEVICE_NATIVE_ENDIAN
,
515 static const MemoryRegionOps virtio_mem_ops
= {
516 .read
= virtio_mmio_read
,
517 .write
= virtio_mmio_write
,
518 .endianness
= DEVICE_LITTLE_ENDIAN
,
521 static void virtio_mmio_update_irq(DeviceState
*opaque
, uint16_t vector
)
523 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(opaque
);
524 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
530 level
= (atomic_read(&vdev
->isr
) != 0);
531 trace_virtio_mmio_setting_irq(level
);
532 qemu_set_irq(proxy
->irq
, level
);
535 static int virtio_mmio_load_config(DeviceState
*opaque
, QEMUFile
*f
)
537 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(opaque
);
539 proxy
->host_features_sel
= qemu_get_be32(f
);
540 proxy
->guest_features_sel
= qemu_get_be32(f
);
541 proxy
->guest_page_shift
= qemu_get_be32(f
);
545 static void virtio_mmio_save_config(DeviceState
*opaque
, QEMUFile
*f
)
547 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(opaque
);
549 qemu_put_be32(f
, proxy
->host_features_sel
);
550 qemu_put_be32(f
, proxy
->guest_features_sel
);
551 qemu_put_be32(f
, proxy
->guest_page_shift
);
554 static const VMStateDescription vmstate_virtio_mmio_queue_state
= {
555 .name
= "virtio_mmio/queue_state",
557 .minimum_version_id
= 1,
558 .fields
= (VMStateField
[]) {
559 VMSTATE_UINT16(num
, VirtIOMMIOQueue
),
560 VMSTATE_BOOL(enabled
, VirtIOMMIOQueue
),
561 VMSTATE_UINT32_ARRAY(desc
, VirtIOMMIOQueue
, 2),
562 VMSTATE_UINT32_ARRAY(avail
, VirtIOMMIOQueue
, 2),
563 VMSTATE_UINT32_ARRAY(used
, VirtIOMMIOQueue
, 2),
564 VMSTATE_END_OF_LIST()
568 static const VMStateDescription vmstate_virtio_mmio_state_sub
= {
569 .name
= "virtio_mmio/state",
571 .minimum_version_id
= 1,
572 .fields
= (VMStateField
[]) {
573 VMSTATE_UINT32_ARRAY(guest_features
, VirtIOMMIOProxy
, 2),
574 VMSTATE_STRUCT_ARRAY(vqs
, VirtIOMMIOProxy
, VIRTIO_QUEUE_MAX
, 0,
575 vmstate_virtio_mmio_queue_state
,
577 VMSTATE_END_OF_LIST()
581 static const VMStateDescription vmstate_virtio_mmio
= {
582 .name
= "virtio_mmio",
584 .minimum_version_id
= 1,
585 .minimum_version_id_old
= 1,
586 .fields
= (VMStateField
[]) {
587 VMSTATE_END_OF_LIST()
589 .subsections
= (const VMStateDescription
* []) {
590 &vmstate_virtio_mmio_state_sub
,
595 static void virtio_mmio_save_extra_state(DeviceState
*opaque
, QEMUFile
*f
)
597 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(opaque
);
599 vmstate_save_state(f
, &vmstate_virtio_mmio
, proxy
, NULL
);
602 static int virtio_mmio_load_extra_state(DeviceState
*opaque
, QEMUFile
*f
)
604 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(opaque
);
606 return vmstate_load_state(f
, &vmstate_virtio_mmio
, proxy
, 1);
609 static bool virtio_mmio_has_extra_state(DeviceState
*opaque
)
611 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(opaque
);
613 return !proxy
->legacy
;
616 static void virtio_mmio_reset(DeviceState
*d
)
618 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
621 virtio_mmio_stop_ioeventfd(proxy
);
622 virtio_bus_reset(&proxy
->bus
);
623 proxy
->host_features_sel
= 0;
624 proxy
->guest_features_sel
= 0;
625 proxy
->guest_page_shift
= 0;
627 if (!proxy
->legacy
) {
628 proxy
->guest_features
[0] = proxy
->guest_features
[1] = 0;
630 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
631 proxy
->vqs
[i
].enabled
= 0;
632 proxy
->vqs
[i
].num
= 0;
633 proxy
->vqs
[i
].desc
[0] = proxy
->vqs
[i
].desc
[1] = 0;
634 proxy
->vqs
[i
].avail
[0] = proxy
->vqs
[i
].avail
[1] = 0;
635 proxy
->vqs
[i
].used
[0] = proxy
->vqs
[i
].used
[1] = 0;
640 static int virtio_mmio_set_guest_notifier(DeviceState
*d
, int n
, bool assign
,
643 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
644 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
645 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
646 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
647 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
650 int r
= event_notifier_init(notifier
, 0);
654 virtio_queue_set_guest_notifier_fd_handler(vq
, true, with_irqfd
);
656 virtio_queue_set_guest_notifier_fd_handler(vq
, false, with_irqfd
);
657 event_notifier_cleanup(notifier
);
660 if (vdc
->guest_notifier_mask
&& vdev
->use_guest_notifier_mask
) {
661 vdc
->guest_notifier_mask(vdev
, n
, !assign
);
667 static int virtio_mmio_set_guest_notifiers(DeviceState
*d
, int nvqs
,
670 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
671 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
672 /* TODO: need to check if kvm-arm supports irqfd */
673 bool with_irqfd
= false;
676 nvqs
= MIN(nvqs
, VIRTIO_QUEUE_MAX
);
678 for (n
= 0; n
< nvqs
; n
++) {
679 if (!virtio_queue_get_num(vdev
, n
)) {
683 r
= virtio_mmio_set_guest_notifier(d
, n
, assign
, with_irqfd
);
692 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
695 virtio_mmio_set_guest_notifier(d
, n
, !assign
, false);
700 static void virtio_mmio_pre_plugged(DeviceState
*d
, Error
**errp
)
702 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
703 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
705 if (!proxy
->legacy
) {
706 virtio_add_feature(&vdev
->host_features
, VIRTIO_F_VERSION_1
);
710 /* virtio-mmio device */
712 static Property virtio_mmio_properties
[] = {
713 DEFINE_PROP_BOOL("format_transport_address", VirtIOMMIOProxy
,
714 format_transport_address
, true),
715 DEFINE_PROP_BOOL("force-legacy", VirtIOMMIOProxy
, legacy
, true),
716 DEFINE_PROP_END_OF_LIST(),
719 static void virtio_mmio_realizefn(DeviceState
*d
, Error
**errp
)
721 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
722 SysBusDevice
*sbd
= SYS_BUS_DEVICE(d
);
724 qbus_create_inplace(&proxy
->bus
, sizeof(proxy
->bus
), TYPE_VIRTIO_MMIO_BUS
,
726 sysbus_init_irq(sbd
, &proxy
->irq
);
728 memory_region_init_io(&proxy
->iomem
, OBJECT(d
),
729 &virtio_legacy_mem_ops
, proxy
,
730 TYPE_VIRTIO_MMIO
, 0x200);
732 memory_region_init_io(&proxy
->iomem
, OBJECT(d
),
733 &virtio_mem_ops
, proxy
,
734 TYPE_VIRTIO_MMIO
, 0x200);
736 sysbus_init_mmio(sbd
, &proxy
->iomem
);
739 static void virtio_mmio_class_init(ObjectClass
*klass
, void *data
)
741 DeviceClass
*dc
= DEVICE_CLASS(klass
);
743 dc
->realize
= virtio_mmio_realizefn
;
744 dc
->reset
= virtio_mmio_reset
;
745 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
746 dc
->props
= virtio_mmio_properties
;
749 static const TypeInfo virtio_mmio_info
= {
750 .name
= TYPE_VIRTIO_MMIO
,
751 .parent
= TYPE_SYS_BUS_DEVICE
,
752 .instance_size
= sizeof(VirtIOMMIOProxy
),
753 .class_init
= virtio_mmio_class_init
,
756 /* virtio-mmio-bus. */
758 static char *virtio_mmio_bus_get_dev_path(DeviceState
*dev
)
760 BusState
*virtio_mmio_bus
;
761 VirtIOMMIOProxy
*virtio_mmio_proxy
;
763 SysBusDevice
*proxy_sbd
;
766 virtio_mmio_bus
= qdev_get_parent_bus(dev
);
767 virtio_mmio_proxy
= VIRTIO_MMIO(virtio_mmio_bus
->parent
);
768 proxy_path
= qdev_get_dev_path(DEVICE(virtio_mmio_proxy
));
771 * If @format_transport_address is false, then we just perform the same as
772 * virtio_bus_get_dev_path(): we delegate the address formatting for the
773 * device on the virtio-mmio bus to the bus that the virtio-mmio proxy
774 * (i.e., the device that implements the virtio-mmio bus) resides on. In
775 * this case the base address of the virtio-mmio transport will be
778 if (!virtio_mmio_proxy
->format_transport_address
) {
782 /* Otherwise, we append the base address of the transport. */
783 proxy_sbd
= SYS_BUS_DEVICE(virtio_mmio_proxy
);
784 assert(proxy_sbd
->num_mmio
== 1);
785 assert(proxy_sbd
->mmio
[0].memory
== &virtio_mmio_proxy
->iomem
);
788 path
= g_strdup_printf("%s/virtio-mmio@" TARGET_FMT_plx
, proxy_path
,
789 proxy_sbd
->mmio
[0].addr
);
791 path
= g_strdup_printf("virtio-mmio@" TARGET_FMT_plx
,
792 proxy_sbd
->mmio
[0].addr
);
798 static void virtio_mmio_bus_class_init(ObjectClass
*klass
, void *data
)
800 BusClass
*bus_class
= BUS_CLASS(klass
);
801 VirtioBusClass
*k
= VIRTIO_BUS_CLASS(klass
);
803 k
->notify
= virtio_mmio_update_irq
;
804 k
->save_config
= virtio_mmio_save_config
;
805 k
->load_config
= virtio_mmio_load_config
;
806 k
->save_extra_state
= virtio_mmio_save_extra_state
;
807 k
->load_extra_state
= virtio_mmio_load_extra_state
;
808 k
->has_extra_state
= virtio_mmio_has_extra_state
;
809 k
->set_guest_notifiers
= virtio_mmio_set_guest_notifiers
;
810 k
->ioeventfd_enabled
= virtio_mmio_ioeventfd_enabled
;
811 k
->ioeventfd_assign
= virtio_mmio_ioeventfd_assign
;
812 k
->pre_plugged
= virtio_mmio_pre_plugged
;
813 k
->has_variable_vring_alignment
= true;
814 bus_class
->max_dev
= 1;
815 bus_class
->get_dev_path
= virtio_mmio_bus_get_dev_path
;
818 static const TypeInfo virtio_mmio_bus_info
= {
819 .name
= TYPE_VIRTIO_MMIO_BUS
,
820 .parent
= TYPE_VIRTIO_BUS
,
821 .instance_size
= sizeof(VirtioBusState
),
822 .class_init
= virtio_mmio_bus_class_init
,
825 static void virtio_mmio_register_types(void)
827 type_register_static(&virtio_mmio_bus_info
);
828 type_register_static(&virtio_mmio_info
);
831 type_init(virtio_mmio_register_types
)