4 * Copyright (c) 2011 Linaro Limited
7 * Peter Maydell <peter.maydell@linaro.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
23 #include "standard-headers/linux/virtio_mmio.h"
25 #include "hw/qdev-properties.h"
26 #include "hw/sysbus.h"
27 #include "hw/virtio/virtio.h"
28 #include "migration/qemu-file-types.h"
29 #include "qemu/host-utils.h"
30 #include "qemu/module.h"
31 #include "sysemu/kvm.h"
32 #include "hw/virtio/virtio-mmio.h"
33 #include "qemu/error-report.h"
37 static bool virtio_mmio_ioeventfd_enabled(DeviceState
*d
)
39 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
41 return (proxy
->flags
& VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD
) != 0;
44 static int virtio_mmio_ioeventfd_assign(DeviceState
*d
,
45 EventNotifier
*notifier
,
48 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
51 memory_region_add_eventfd(&proxy
->iomem
, VIRTIO_MMIO_QUEUE_NOTIFY
, 4,
54 memory_region_del_eventfd(&proxy
->iomem
, VIRTIO_MMIO_QUEUE_NOTIFY
, 4,
60 static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy
*proxy
)
62 virtio_bus_start_ioeventfd(&proxy
->bus
);
65 static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy
*proxy
)
67 virtio_bus_stop_ioeventfd(&proxy
->bus
);
70 static void virtio_mmio_soft_reset(VirtIOMMIOProxy
*proxy
)
78 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
79 proxy
->vqs
[i
].enabled
= 0;
83 static uint64_t virtio_mmio_read(void *opaque
, hwaddr offset
, unsigned size
)
85 VirtIOMMIOProxy
*proxy
= (VirtIOMMIOProxy
*)opaque
;
86 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
88 trace_virtio_mmio_read(offset
);
91 /* If no backend is present, we treat most registers as
92 * read-as-zero, except for the magic number, version and
93 * vendor ID. This is not strictly sanctioned by the virtio
94 * spec, but it allows us to provide transports with no backend
95 * plugged in which don't confuse Linux's virtio code: the
96 * probe won't complain about the bad magic number, but the
97 * device ID of zero means no backend will claim it.
100 case VIRTIO_MMIO_MAGIC_VALUE
:
102 case VIRTIO_MMIO_VERSION
:
104 return VIRT_VERSION_LEGACY
;
108 case VIRTIO_MMIO_VENDOR_ID
:
115 if (offset
>= VIRTIO_MMIO_CONFIG
) {
116 offset
-= VIRTIO_MMIO_CONFIG
;
120 return virtio_config_readb(vdev
, offset
);
122 return virtio_config_readw(vdev
, offset
);
124 return virtio_config_readl(vdev
, offset
);
131 return virtio_config_modern_readb(vdev
, offset
);
133 return virtio_config_modern_readw(vdev
, offset
);
135 return virtio_config_modern_readl(vdev
, offset
);
142 qemu_log_mask(LOG_GUEST_ERROR
,
143 "%s: wrong size access to register!\n",
148 case VIRTIO_MMIO_MAGIC_VALUE
:
150 case VIRTIO_MMIO_VERSION
:
152 return VIRT_VERSION_LEGACY
;
156 case VIRTIO_MMIO_DEVICE_ID
:
157 return vdev
->device_id
;
158 case VIRTIO_MMIO_VENDOR_ID
:
160 case VIRTIO_MMIO_DEVICE_FEATURES
:
162 if (proxy
->host_features_sel
) {
165 return vdev
->host_features
;
168 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
169 return (vdev
->host_features
& ~vdc
->legacy_features
)
170 >> (32 * proxy
->host_features_sel
);
172 case VIRTIO_MMIO_QUEUE_NUM_MAX
:
173 if (!virtio_queue_get_num(vdev
, vdev
->queue_sel
)) {
176 return VIRTQUEUE_MAX_SIZE
;
177 case VIRTIO_MMIO_QUEUE_PFN
:
178 if (!proxy
->legacy
) {
179 qemu_log_mask(LOG_GUEST_ERROR
,
180 "%s: read from legacy register (0x%"
181 HWADDR_PRIx
") in non-legacy mode\n",
185 return virtio_queue_get_addr(vdev
, vdev
->queue_sel
)
186 >> proxy
->guest_page_shift
;
187 case VIRTIO_MMIO_QUEUE_READY
:
189 qemu_log_mask(LOG_GUEST_ERROR
,
190 "%s: read from non-legacy register (0x%"
191 HWADDR_PRIx
") in legacy mode\n",
195 return proxy
->vqs
[vdev
->queue_sel
].enabled
;
196 case VIRTIO_MMIO_INTERRUPT_STATUS
:
197 return qatomic_read(&vdev
->isr
);
198 case VIRTIO_MMIO_STATUS
:
200 case VIRTIO_MMIO_CONFIG_GENERATION
:
202 qemu_log_mask(LOG_GUEST_ERROR
,
203 "%s: read from non-legacy register (0x%"
204 HWADDR_PRIx
") in legacy mode\n",
208 return vdev
->generation
;
209 case VIRTIO_MMIO_SHM_LEN_LOW
:
210 case VIRTIO_MMIO_SHM_LEN_HIGH
:
212 * VIRTIO_MMIO_SHM_SEL is unimplemented
213 * according to the linux driver, if region length is -1
214 * the shared memory doesn't exist
217 case VIRTIO_MMIO_DEVICE_FEATURES_SEL
:
218 case VIRTIO_MMIO_DRIVER_FEATURES
:
219 case VIRTIO_MMIO_DRIVER_FEATURES_SEL
:
220 case VIRTIO_MMIO_GUEST_PAGE_SIZE
:
221 case VIRTIO_MMIO_QUEUE_SEL
:
222 case VIRTIO_MMIO_QUEUE_NUM
:
223 case VIRTIO_MMIO_QUEUE_ALIGN
:
224 case VIRTIO_MMIO_QUEUE_NOTIFY
:
225 case VIRTIO_MMIO_INTERRUPT_ACK
:
226 case VIRTIO_MMIO_QUEUE_DESC_LOW
:
227 case VIRTIO_MMIO_QUEUE_DESC_HIGH
:
228 case VIRTIO_MMIO_QUEUE_AVAIL_LOW
:
229 case VIRTIO_MMIO_QUEUE_AVAIL_HIGH
:
230 case VIRTIO_MMIO_QUEUE_USED_LOW
:
231 case VIRTIO_MMIO_QUEUE_USED_HIGH
:
232 qemu_log_mask(LOG_GUEST_ERROR
,
233 "%s: read of write-only register (0x%" HWADDR_PRIx
")\n",
237 qemu_log_mask(LOG_GUEST_ERROR
,
238 "%s: bad register offset (0x%" HWADDR_PRIx
")\n",
245 static void virtio_mmio_write(void *opaque
, hwaddr offset
, uint64_t value
,
248 VirtIOMMIOProxy
*proxy
= (VirtIOMMIOProxy
*)opaque
;
249 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
251 trace_virtio_mmio_write_offset(offset
, value
);
254 /* If no backend is present, we just make all registers
255 * write-ignored. This allows us to provide transports with
256 * no backend plugged in.
261 if (offset
>= VIRTIO_MMIO_CONFIG
) {
262 offset
-= VIRTIO_MMIO_CONFIG
;
266 virtio_config_writeb(vdev
, offset
, value
);
269 virtio_config_writew(vdev
, offset
, value
);
272 virtio_config_writel(vdev
, offset
, value
);
281 virtio_config_modern_writeb(vdev
, offset
, value
);
284 virtio_config_modern_writew(vdev
, offset
, value
);
287 virtio_config_modern_writel(vdev
, offset
, value
);
296 qemu_log_mask(LOG_GUEST_ERROR
,
297 "%s: wrong size access to register!\n",
302 case VIRTIO_MMIO_DEVICE_FEATURES_SEL
:
304 proxy
->host_features_sel
= 1;
306 proxy
->host_features_sel
= 0;
309 case VIRTIO_MMIO_DRIVER_FEATURES
:
311 if (proxy
->guest_features_sel
) {
312 qemu_log_mask(LOG_GUEST_ERROR
,
313 "%s: attempt to write guest features with "
314 "guest_features_sel > 0 in legacy mode\n",
317 virtio_set_features(vdev
, value
);
320 proxy
->guest_features
[proxy
->guest_features_sel
] = value
;
323 case VIRTIO_MMIO_DRIVER_FEATURES_SEL
:
325 proxy
->guest_features_sel
= 1;
327 proxy
->guest_features_sel
= 0;
330 case VIRTIO_MMIO_GUEST_PAGE_SIZE
:
331 if (!proxy
->legacy
) {
332 qemu_log_mask(LOG_GUEST_ERROR
,
333 "%s: write to legacy register (0x%"
334 HWADDR_PRIx
") in non-legacy mode\n",
338 proxy
->guest_page_shift
= ctz32(value
);
339 if (proxy
->guest_page_shift
> 31) {
340 proxy
->guest_page_shift
= 0;
342 trace_virtio_mmio_guest_page(value
, proxy
->guest_page_shift
);
344 case VIRTIO_MMIO_QUEUE_SEL
:
345 if (value
< VIRTIO_QUEUE_MAX
) {
346 vdev
->queue_sel
= value
;
349 case VIRTIO_MMIO_QUEUE_NUM
:
350 trace_virtio_mmio_queue_write(value
, VIRTQUEUE_MAX_SIZE
);
351 virtio_queue_set_num(vdev
, vdev
->queue_sel
, value
);
354 virtio_queue_update_rings(vdev
, vdev
->queue_sel
);
356 proxy
->vqs
[vdev
->queue_sel
].num
= value
;
359 case VIRTIO_MMIO_QUEUE_ALIGN
:
360 if (!proxy
->legacy
) {
361 qemu_log_mask(LOG_GUEST_ERROR
,
362 "%s: write to legacy register (0x%"
363 HWADDR_PRIx
") in non-legacy mode\n",
367 virtio_queue_set_align(vdev
, vdev
->queue_sel
, value
);
369 case VIRTIO_MMIO_QUEUE_PFN
:
370 if (!proxy
->legacy
) {
371 qemu_log_mask(LOG_GUEST_ERROR
,
372 "%s: write to legacy register (0x%"
373 HWADDR_PRIx
") in non-legacy mode\n",
380 virtio_queue_set_addr(vdev
, vdev
->queue_sel
,
381 value
<< proxy
->guest_page_shift
);
384 case VIRTIO_MMIO_QUEUE_READY
:
386 qemu_log_mask(LOG_GUEST_ERROR
,
387 "%s: write to non-legacy register (0x%"
388 HWADDR_PRIx
") in legacy mode\n",
393 virtio_queue_set_num(vdev
, vdev
->queue_sel
,
394 proxy
->vqs
[vdev
->queue_sel
].num
);
395 virtio_queue_set_rings(vdev
, vdev
->queue_sel
,
396 ((uint64_t)proxy
->vqs
[vdev
->queue_sel
].desc
[1]) << 32 |
397 proxy
->vqs
[vdev
->queue_sel
].desc
[0],
398 ((uint64_t)proxy
->vqs
[vdev
->queue_sel
].avail
[1]) << 32 |
399 proxy
->vqs
[vdev
->queue_sel
].avail
[0],
400 ((uint64_t)proxy
->vqs
[vdev
->queue_sel
].used
[1]) << 32 |
401 proxy
->vqs
[vdev
->queue_sel
].used
[0]);
402 proxy
->vqs
[vdev
->queue_sel
].enabled
= 1;
404 proxy
->vqs
[vdev
->queue_sel
].enabled
= 0;
407 case VIRTIO_MMIO_QUEUE_NOTIFY
:
408 if (value
< VIRTIO_QUEUE_MAX
) {
409 virtio_queue_notify(vdev
, value
);
412 case VIRTIO_MMIO_INTERRUPT_ACK
:
413 qatomic_and(&vdev
->isr
, ~value
);
414 virtio_update_irq(vdev
);
416 case VIRTIO_MMIO_STATUS
:
417 if (!(value
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
418 virtio_mmio_stop_ioeventfd(proxy
);
421 if (!proxy
->legacy
&& (value
& VIRTIO_CONFIG_S_FEATURES_OK
)) {
422 virtio_set_features(vdev
,
423 ((uint64_t)proxy
->guest_features
[1]) << 32 |
424 proxy
->guest_features
[0]);
427 virtio_set_status(vdev
, value
& 0xff);
429 if (value
& VIRTIO_CONFIG_S_DRIVER_OK
) {
430 virtio_mmio_start_ioeventfd(proxy
);
433 if (vdev
->status
== 0) {
435 virtio_mmio_soft_reset(proxy
);
438 case VIRTIO_MMIO_QUEUE_DESC_LOW
:
440 qemu_log_mask(LOG_GUEST_ERROR
,
441 "%s: write to non-legacy register (0x%"
442 HWADDR_PRIx
") in legacy mode\n",
446 proxy
->vqs
[vdev
->queue_sel
].desc
[0] = value
;
448 case VIRTIO_MMIO_QUEUE_DESC_HIGH
:
450 qemu_log_mask(LOG_GUEST_ERROR
,
451 "%s: write to non-legacy register (0x%"
452 HWADDR_PRIx
") in legacy mode\n",
456 proxy
->vqs
[vdev
->queue_sel
].desc
[1] = value
;
458 case VIRTIO_MMIO_QUEUE_AVAIL_LOW
:
460 qemu_log_mask(LOG_GUEST_ERROR
,
461 "%s: write to non-legacy register (0x%"
462 HWADDR_PRIx
") in legacy mode\n",
466 proxy
->vqs
[vdev
->queue_sel
].avail
[0] = value
;
468 case VIRTIO_MMIO_QUEUE_AVAIL_HIGH
:
470 qemu_log_mask(LOG_GUEST_ERROR
,
471 "%s: write to non-legacy register (0x%"
472 HWADDR_PRIx
") in legacy mode\n",
476 proxy
->vqs
[vdev
->queue_sel
].avail
[1] = value
;
478 case VIRTIO_MMIO_QUEUE_USED_LOW
:
480 qemu_log_mask(LOG_GUEST_ERROR
,
481 "%s: write to non-legacy register (0x%"
482 HWADDR_PRIx
") in legacy mode\n",
486 proxy
->vqs
[vdev
->queue_sel
].used
[0] = value
;
488 case VIRTIO_MMIO_QUEUE_USED_HIGH
:
490 qemu_log_mask(LOG_GUEST_ERROR
,
491 "%s: write to non-legacy register (0x%"
492 HWADDR_PRIx
") in legacy mode\n",
496 proxy
->vqs
[vdev
->queue_sel
].used
[1] = value
;
498 case VIRTIO_MMIO_MAGIC_VALUE
:
499 case VIRTIO_MMIO_VERSION
:
500 case VIRTIO_MMIO_DEVICE_ID
:
501 case VIRTIO_MMIO_VENDOR_ID
:
502 case VIRTIO_MMIO_DEVICE_FEATURES
:
503 case VIRTIO_MMIO_QUEUE_NUM_MAX
:
504 case VIRTIO_MMIO_INTERRUPT_STATUS
:
505 case VIRTIO_MMIO_CONFIG_GENERATION
:
506 qemu_log_mask(LOG_GUEST_ERROR
,
507 "%s: write to read-only register (0x%" HWADDR_PRIx
")\n",
512 qemu_log_mask(LOG_GUEST_ERROR
,
513 "%s: bad register offset (0x%" HWADDR_PRIx
")\n",
518 static const MemoryRegionOps virtio_legacy_mem_ops
= {
519 .read
= virtio_mmio_read
,
520 .write
= virtio_mmio_write
,
521 .endianness
= DEVICE_NATIVE_ENDIAN
,
524 static const MemoryRegionOps virtio_mem_ops
= {
525 .read
= virtio_mmio_read
,
526 .write
= virtio_mmio_write
,
527 .endianness
= DEVICE_LITTLE_ENDIAN
,
530 static void virtio_mmio_update_irq(DeviceState
*opaque
, uint16_t vector
)
532 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(opaque
);
533 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
539 level
= (qatomic_read(&vdev
->isr
) != 0);
540 trace_virtio_mmio_setting_irq(level
);
541 qemu_set_irq(proxy
->irq
, level
);
544 static int virtio_mmio_load_config(DeviceState
*opaque
, QEMUFile
*f
)
546 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(opaque
);
548 proxy
->host_features_sel
= qemu_get_be32(f
);
549 proxy
->guest_features_sel
= qemu_get_be32(f
);
550 proxy
->guest_page_shift
= qemu_get_be32(f
);
554 static void virtio_mmio_save_config(DeviceState
*opaque
, QEMUFile
*f
)
556 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(opaque
);
558 qemu_put_be32(f
, proxy
->host_features_sel
);
559 qemu_put_be32(f
, proxy
->guest_features_sel
);
560 qemu_put_be32(f
, proxy
->guest_page_shift
);
563 static const VMStateDescription vmstate_virtio_mmio_queue_state
= {
564 .name
= "virtio_mmio/queue_state",
566 .minimum_version_id
= 1,
567 .fields
= (VMStateField
[]) {
568 VMSTATE_UINT16(num
, VirtIOMMIOQueue
),
569 VMSTATE_BOOL(enabled
, VirtIOMMIOQueue
),
570 VMSTATE_UINT32_ARRAY(desc
, VirtIOMMIOQueue
, 2),
571 VMSTATE_UINT32_ARRAY(avail
, VirtIOMMIOQueue
, 2),
572 VMSTATE_UINT32_ARRAY(used
, VirtIOMMIOQueue
, 2),
573 VMSTATE_END_OF_LIST()
577 static const VMStateDescription vmstate_virtio_mmio_state_sub
= {
578 .name
= "virtio_mmio/state",
580 .minimum_version_id
= 1,
581 .fields
= (VMStateField
[]) {
582 VMSTATE_UINT32_ARRAY(guest_features
, VirtIOMMIOProxy
, 2),
583 VMSTATE_STRUCT_ARRAY(vqs
, VirtIOMMIOProxy
, VIRTIO_QUEUE_MAX
, 0,
584 vmstate_virtio_mmio_queue_state
,
586 VMSTATE_END_OF_LIST()
590 static const VMStateDescription vmstate_virtio_mmio
= {
591 .name
= "virtio_mmio",
593 .minimum_version_id
= 1,
594 .minimum_version_id_old
= 1,
595 .fields
= (VMStateField
[]) {
596 VMSTATE_END_OF_LIST()
598 .subsections
= (const VMStateDescription
* []) {
599 &vmstate_virtio_mmio_state_sub
,
604 static void virtio_mmio_save_extra_state(DeviceState
*opaque
, QEMUFile
*f
)
606 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(opaque
);
608 vmstate_save_state(f
, &vmstate_virtio_mmio
, proxy
, NULL
);
611 static int virtio_mmio_load_extra_state(DeviceState
*opaque
, QEMUFile
*f
)
613 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(opaque
);
615 return vmstate_load_state(f
, &vmstate_virtio_mmio
, proxy
, 1);
618 static bool virtio_mmio_has_extra_state(DeviceState
*opaque
)
620 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(opaque
);
622 return !proxy
->legacy
;
625 static void virtio_mmio_reset(DeviceState
*d
)
627 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
630 virtio_mmio_stop_ioeventfd(proxy
);
631 virtio_bus_reset(&proxy
->bus
);
632 proxy
->host_features_sel
= 0;
633 proxy
->guest_features_sel
= 0;
634 proxy
->guest_page_shift
= 0;
636 if (!proxy
->legacy
) {
637 proxy
->guest_features
[0] = proxy
->guest_features
[1] = 0;
639 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
640 proxy
->vqs
[i
].enabled
= 0;
641 proxy
->vqs
[i
].num
= 0;
642 proxy
->vqs
[i
].desc
[0] = proxy
->vqs
[i
].desc
[1] = 0;
643 proxy
->vqs
[i
].avail
[0] = proxy
->vqs
[i
].avail
[1] = 0;
644 proxy
->vqs
[i
].used
[0] = proxy
->vqs
[i
].used
[1] = 0;
649 static int virtio_mmio_set_guest_notifier(DeviceState
*d
, int n
, bool assign
,
652 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
653 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
654 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
655 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
656 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
659 int r
= event_notifier_init(notifier
, 0);
663 virtio_queue_set_guest_notifier_fd_handler(vq
, true, with_irqfd
);
665 virtio_queue_set_guest_notifier_fd_handler(vq
, false, with_irqfd
);
666 event_notifier_cleanup(notifier
);
669 if (vdc
->guest_notifier_mask
&& vdev
->use_guest_notifier_mask
) {
670 vdc
->guest_notifier_mask(vdev
, n
, !assign
);
676 static int virtio_mmio_set_guest_notifiers(DeviceState
*d
, int nvqs
,
679 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
680 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
681 /* TODO: need to check if kvm-arm supports irqfd */
682 bool with_irqfd
= false;
685 nvqs
= MIN(nvqs
, VIRTIO_QUEUE_MAX
);
687 for (n
= 0; n
< nvqs
; n
++) {
688 if (!virtio_queue_get_num(vdev
, n
)) {
692 r
= virtio_mmio_set_guest_notifier(d
, n
, assign
, with_irqfd
);
701 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
704 virtio_mmio_set_guest_notifier(d
, n
, !assign
, false);
709 static void virtio_mmio_pre_plugged(DeviceState
*d
, Error
**errp
)
711 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
712 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
714 if (!proxy
->legacy
) {
715 virtio_add_feature(&vdev
->host_features
, VIRTIO_F_VERSION_1
);
719 /* virtio-mmio device */
721 static Property virtio_mmio_properties
[] = {
722 DEFINE_PROP_BOOL("format_transport_address", VirtIOMMIOProxy
,
723 format_transport_address
, true),
724 DEFINE_PROP_BOOL("force-legacy", VirtIOMMIOProxy
, legacy
, true),
725 DEFINE_PROP_BIT("ioeventfd", VirtIOMMIOProxy
, flags
,
726 VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD_BIT
, true),
727 DEFINE_PROP_END_OF_LIST(),
730 static void virtio_mmio_realizefn(DeviceState
*d
, Error
**errp
)
732 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
733 SysBusDevice
*sbd
= SYS_BUS_DEVICE(d
);
735 qbus_create_inplace(&proxy
->bus
, sizeof(proxy
->bus
), TYPE_VIRTIO_MMIO_BUS
,
737 sysbus_init_irq(sbd
, &proxy
->irq
);
739 if (!kvm_eventfds_enabled()) {
740 proxy
->flags
&= ~VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD
;
744 memory_region_init_io(&proxy
->iomem
, OBJECT(d
),
745 &virtio_legacy_mem_ops
, proxy
,
746 TYPE_VIRTIO_MMIO
, 0x200);
748 memory_region_init_io(&proxy
->iomem
, OBJECT(d
),
749 &virtio_mem_ops
, proxy
,
750 TYPE_VIRTIO_MMIO
, 0x200);
752 sysbus_init_mmio(sbd
, &proxy
->iomem
);
755 static void virtio_mmio_class_init(ObjectClass
*klass
, void *data
)
757 DeviceClass
*dc
= DEVICE_CLASS(klass
);
759 dc
->realize
= virtio_mmio_realizefn
;
760 dc
->reset
= virtio_mmio_reset
;
761 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
762 device_class_set_props(dc
, virtio_mmio_properties
);
765 static const TypeInfo virtio_mmio_info
= {
766 .name
= TYPE_VIRTIO_MMIO
,
767 .parent
= TYPE_SYS_BUS_DEVICE
,
768 .instance_size
= sizeof(VirtIOMMIOProxy
),
769 .class_init
= virtio_mmio_class_init
,
772 /* virtio-mmio-bus. */
774 static char *virtio_mmio_bus_get_dev_path(DeviceState
*dev
)
776 BusState
*virtio_mmio_bus
;
777 VirtIOMMIOProxy
*virtio_mmio_proxy
;
780 MemoryRegionSection section
;
782 virtio_mmio_bus
= qdev_get_parent_bus(dev
);
783 virtio_mmio_proxy
= VIRTIO_MMIO(virtio_mmio_bus
->parent
);
784 proxy_path
= qdev_get_dev_path(DEVICE(virtio_mmio_proxy
));
787 * If @format_transport_address is false, then we just perform the same as
788 * virtio_bus_get_dev_path(): we delegate the address formatting for the
789 * device on the virtio-mmio bus to the bus that the virtio-mmio proxy
790 * (i.e., the device that implements the virtio-mmio bus) resides on. In
791 * this case the base address of the virtio-mmio transport will be
794 if (!virtio_mmio_proxy
->format_transport_address
) {
798 /* Otherwise, we append the base address of the transport. */
799 section
= memory_region_find(&virtio_mmio_proxy
->iomem
, 0, 0x200);
803 path
= g_strdup_printf("%s/virtio-mmio@" TARGET_FMT_plx
, proxy_path
,
804 section
.offset_within_address_space
);
806 path
= g_strdup_printf("virtio-mmio@" TARGET_FMT_plx
,
807 section
.offset_within_address_space
);
809 memory_region_unref(section
.mr
);
815 static void virtio_mmio_bus_class_init(ObjectClass
*klass
, void *data
)
817 BusClass
*bus_class
= BUS_CLASS(klass
);
818 VirtioBusClass
*k
= VIRTIO_BUS_CLASS(klass
);
820 k
->notify
= virtio_mmio_update_irq
;
821 k
->save_config
= virtio_mmio_save_config
;
822 k
->load_config
= virtio_mmio_load_config
;
823 k
->save_extra_state
= virtio_mmio_save_extra_state
;
824 k
->load_extra_state
= virtio_mmio_load_extra_state
;
825 k
->has_extra_state
= virtio_mmio_has_extra_state
;
826 k
->set_guest_notifiers
= virtio_mmio_set_guest_notifiers
;
827 k
->ioeventfd_enabled
= virtio_mmio_ioeventfd_enabled
;
828 k
->ioeventfd_assign
= virtio_mmio_ioeventfd_assign
;
829 k
->pre_plugged
= virtio_mmio_pre_plugged
;
830 k
->has_variable_vring_alignment
= true;
831 bus_class
->max_dev
= 1;
832 bus_class
->get_dev_path
= virtio_mmio_bus_get_dev_path
;
835 static const TypeInfo virtio_mmio_bus_info
= {
836 .name
= TYPE_VIRTIO_MMIO_BUS
,
837 .parent
= TYPE_VIRTIO_BUS
,
838 .instance_size
= sizeof(VirtioBusState
),
839 .class_init
= virtio_mmio_bus_class_init
,
842 static void virtio_mmio_register_types(void)
844 type_register_static(&virtio_mmio_bus_info
);
845 type_register_static(&virtio_mmio_info
);
848 type_init(virtio_mmio_register_types
)