4 * Copyright (c) 2011 Linaro Limited
7 * Peter Maydell <peter.maydell@linaro.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
23 #include "hw/sysbus.h"
24 #include "hw/virtio/virtio.h"
25 #include "qemu/host-utils.h"
26 #include "sysemu/kvm.h"
27 #include "hw/virtio/virtio-bus.h"
28 #include "qemu/error-report.h"
30 /* #define DEBUG_VIRTIO_MMIO */
32 #ifdef DEBUG_VIRTIO_MMIO
34 #define DPRINTF(fmt, ...) \
35 do { printf("virtio_mmio: " fmt , ## __VA_ARGS__); } while (0)
37 #define DPRINTF(fmt, ...) do {} while (0)
42 #define TYPE_VIRTIO_MMIO_BUS "virtio-mmio-bus"
43 #define VIRTIO_MMIO_BUS(obj) \
44 OBJECT_CHECK(VirtioBusState, (obj), TYPE_VIRTIO_MMIO_BUS)
45 #define VIRTIO_MMIO_BUS_GET_CLASS(obj) \
46 OBJECT_GET_CLASS(VirtioBusClass, (obj), TYPE_VIRTIO_MMIO_BUS)
47 #define VIRTIO_MMIO_BUS_CLASS(klass) \
48 OBJECT_CLASS_CHECK(VirtioBusClass, (klass), TYPE_VIRTIO_MMIO_BUS)
51 #define TYPE_VIRTIO_MMIO "virtio-mmio"
52 #define VIRTIO_MMIO(obj) \
53 OBJECT_CHECK(VirtIOMMIOProxy, (obj), TYPE_VIRTIO_MMIO)
55 /* Memory mapped register offsets */
56 #define VIRTIO_MMIO_MAGIC 0x0
57 #define VIRTIO_MMIO_VERSION 0x4
58 #define VIRTIO_MMIO_DEVICEID 0x8
59 #define VIRTIO_MMIO_VENDORID 0xc
60 #define VIRTIO_MMIO_HOSTFEATURES 0x10
61 #define VIRTIO_MMIO_HOSTFEATURESSEL 0x14
62 #define VIRTIO_MMIO_GUESTFEATURES 0x20
63 #define VIRTIO_MMIO_GUESTFEATURESSEL 0x24
64 #define VIRTIO_MMIO_GUESTPAGESIZE 0x28
65 #define VIRTIO_MMIO_QUEUESEL 0x30
66 #define VIRTIO_MMIO_QUEUENUMMAX 0x34
67 #define VIRTIO_MMIO_QUEUENUM 0x38
68 #define VIRTIO_MMIO_QUEUEALIGN 0x3c
69 #define VIRTIO_MMIO_QUEUEPFN 0x40
70 #define VIRTIO_MMIO_QUEUENOTIFY 0x50
71 #define VIRTIO_MMIO_INTERRUPTSTATUS 0x60
72 #define VIRTIO_MMIO_INTERRUPTACK 0x64
73 #define VIRTIO_MMIO_STATUS 0x70
74 /* Device specific config space starts here */
75 #define VIRTIO_MMIO_CONFIG 0x100
77 #define VIRT_MAGIC 0x74726976 /* 'virt' */
78 #define VIRT_VERSION 1
79 #define VIRT_VENDOR 0x554D4551 /* 'QEMU' */
83 SysBusDevice parent_obj
;
86 /* Guest accessible state needing migration and reset */
87 uint32_t host_features_sel
;
88 uint32_t guest_features_sel
;
89 uint32_t guest_page_shift
;
92 bool format_transport_address
;
95 static bool virtio_mmio_ioeventfd_enabled(DeviceState
*d
)
97 return kvm_eventfds_enabled();
100 static int virtio_mmio_ioeventfd_assign(DeviceState
*d
,
101 EventNotifier
*notifier
,
104 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
107 memory_region_add_eventfd(&proxy
->iomem
, VIRTIO_MMIO_QUEUENOTIFY
, 4,
110 memory_region_del_eventfd(&proxy
->iomem
, VIRTIO_MMIO_QUEUENOTIFY
, 4,
116 static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy
*proxy
)
118 virtio_bus_start_ioeventfd(&proxy
->bus
);
121 static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy
*proxy
)
123 virtio_bus_stop_ioeventfd(&proxy
->bus
);
126 static uint64_t virtio_mmio_read(void *opaque
, hwaddr offset
, unsigned size
)
128 VirtIOMMIOProxy
*proxy
= (VirtIOMMIOProxy
*)opaque
;
129 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
131 DPRINTF("virtio_mmio_read offset 0x%x\n", (int)offset
);
134 /* If no backend is present, we treat most registers as
135 * read-as-zero, except for the magic number, version and
136 * vendor ID. This is not strictly sanctioned by the virtio
137 * spec, but it allows us to provide transports with no backend
138 * plugged in which don't confuse Linux's virtio code: the
139 * probe won't complain about the bad magic number, but the
140 * device ID of zero means no backend will claim it.
143 case VIRTIO_MMIO_MAGIC
:
145 case VIRTIO_MMIO_VERSION
:
147 case VIRTIO_MMIO_VENDORID
:
154 if (offset
>= VIRTIO_MMIO_CONFIG
) {
155 offset
-= VIRTIO_MMIO_CONFIG
;
158 return virtio_config_readb(vdev
, offset
);
160 return virtio_config_readw(vdev
, offset
);
162 return virtio_config_readl(vdev
, offset
);
168 DPRINTF("wrong size access to register!\n");
172 case VIRTIO_MMIO_MAGIC
:
174 case VIRTIO_MMIO_VERSION
:
176 case VIRTIO_MMIO_DEVICEID
:
177 return vdev
->device_id
;
178 case VIRTIO_MMIO_VENDORID
:
180 case VIRTIO_MMIO_HOSTFEATURES
:
181 if (proxy
->host_features_sel
) {
184 return vdev
->host_features
;
185 case VIRTIO_MMIO_QUEUENUMMAX
:
186 if (!virtio_queue_get_num(vdev
, vdev
->queue_sel
)) {
189 return VIRTQUEUE_MAX_SIZE
;
190 case VIRTIO_MMIO_QUEUEPFN
:
191 return virtio_queue_get_addr(vdev
, vdev
->queue_sel
)
192 >> proxy
->guest_page_shift
;
193 case VIRTIO_MMIO_INTERRUPTSTATUS
:
194 return atomic_read(&vdev
->isr
);
195 case VIRTIO_MMIO_STATUS
:
197 case VIRTIO_MMIO_HOSTFEATURESSEL
:
198 case VIRTIO_MMIO_GUESTFEATURES
:
199 case VIRTIO_MMIO_GUESTFEATURESSEL
:
200 case VIRTIO_MMIO_GUESTPAGESIZE
:
201 case VIRTIO_MMIO_QUEUESEL
:
202 case VIRTIO_MMIO_QUEUENUM
:
203 case VIRTIO_MMIO_QUEUEALIGN
:
204 case VIRTIO_MMIO_QUEUENOTIFY
:
205 case VIRTIO_MMIO_INTERRUPTACK
:
206 DPRINTF("read of write-only register\n");
209 DPRINTF("bad register offset\n");
215 static void virtio_mmio_write(void *opaque
, hwaddr offset
, uint64_t value
,
218 VirtIOMMIOProxy
*proxy
= (VirtIOMMIOProxy
*)opaque
;
219 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
221 DPRINTF("virtio_mmio_write offset 0x%x value 0x%" PRIx64
"\n",
225 /* If no backend is present, we just make all registers
226 * write-ignored. This allows us to provide transports with
227 * no backend plugged in.
232 if (offset
>= VIRTIO_MMIO_CONFIG
) {
233 offset
-= VIRTIO_MMIO_CONFIG
;
236 virtio_config_writeb(vdev
, offset
, value
);
239 virtio_config_writew(vdev
, offset
, value
);
242 virtio_config_writel(vdev
, offset
, value
);
250 DPRINTF("wrong size access to register!\n");
254 case VIRTIO_MMIO_HOSTFEATURESSEL
:
255 proxy
->host_features_sel
= value
;
257 case VIRTIO_MMIO_GUESTFEATURES
:
258 if (!proxy
->guest_features_sel
) {
259 virtio_set_features(vdev
, value
);
262 case VIRTIO_MMIO_GUESTFEATURESSEL
:
263 proxy
->guest_features_sel
= value
;
265 case VIRTIO_MMIO_GUESTPAGESIZE
:
266 proxy
->guest_page_shift
= ctz32(value
);
267 if (proxy
->guest_page_shift
> 31) {
268 proxy
->guest_page_shift
= 0;
270 DPRINTF("guest page size %" PRIx64
" shift %d\n", value
,
271 proxy
->guest_page_shift
);
273 case VIRTIO_MMIO_QUEUESEL
:
274 if (value
< VIRTIO_QUEUE_MAX
) {
275 vdev
->queue_sel
= value
;
278 case VIRTIO_MMIO_QUEUENUM
:
279 DPRINTF("mmio_queue write %d max %d\n", (int)value
, VIRTQUEUE_MAX_SIZE
);
280 virtio_queue_set_num(vdev
, vdev
->queue_sel
, value
);
281 /* Note: only call this function for legacy devices */
282 virtio_queue_update_rings(vdev
, vdev
->queue_sel
);
284 case VIRTIO_MMIO_QUEUEALIGN
:
285 /* Note: this is only valid for legacy devices */
286 virtio_queue_set_align(vdev
, vdev
->queue_sel
, value
);
288 case VIRTIO_MMIO_QUEUEPFN
:
292 virtio_queue_set_addr(vdev
, vdev
->queue_sel
,
293 value
<< proxy
->guest_page_shift
);
296 case VIRTIO_MMIO_QUEUENOTIFY
:
297 if (value
< VIRTIO_QUEUE_MAX
) {
298 virtio_queue_notify(vdev
, value
);
301 case VIRTIO_MMIO_INTERRUPTACK
:
302 atomic_and(&vdev
->isr
, ~value
);
303 virtio_update_irq(vdev
);
305 case VIRTIO_MMIO_STATUS
:
306 if (!(value
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
307 virtio_mmio_stop_ioeventfd(proxy
);
310 virtio_set_status(vdev
, value
& 0xff);
312 if (value
& VIRTIO_CONFIG_S_DRIVER_OK
) {
313 virtio_mmio_start_ioeventfd(proxy
);
316 if (vdev
->status
== 0) {
320 case VIRTIO_MMIO_MAGIC
:
321 case VIRTIO_MMIO_VERSION
:
322 case VIRTIO_MMIO_DEVICEID
:
323 case VIRTIO_MMIO_VENDORID
:
324 case VIRTIO_MMIO_HOSTFEATURES
:
325 case VIRTIO_MMIO_QUEUENUMMAX
:
326 case VIRTIO_MMIO_INTERRUPTSTATUS
:
327 DPRINTF("write to readonly register\n");
331 DPRINTF("bad register offset\n");
335 static const MemoryRegionOps virtio_mem_ops
= {
336 .read
= virtio_mmio_read
,
337 .write
= virtio_mmio_write
,
338 .endianness
= DEVICE_NATIVE_ENDIAN
,
341 static void virtio_mmio_update_irq(DeviceState
*opaque
, uint16_t vector
)
343 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(opaque
);
344 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
350 level
= (atomic_read(&vdev
->isr
) != 0);
351 DPRINTF("virtio_mmio setting IRQ %d\n", level
);
352 qemu_set_irq(proxy
->irq
, level
);
355 static int virtio_mmio_load_config(DeviceState
*opaque
, QEMUFile
*f
)
357 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(opaque
);
359 proxy
->host_features_sel
= qemu_get_be32(f
);
360 proxy
->guest_features_sel
= qemu_get_be32(f
);
361 proxy
->guest_page_shift
= qemu_get_be32(f
);
365 static void virtio_mmio_save_config(DeviceState
*opaque
, QEMUFile
*f
)
367 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(opaque
);
369 qemu_put_be32(f
, proxy
->host_features_sel
);
370 qemu_put_be32(f
, proxy
->guest_features_sel
);
371 qemu_put_be32(f
, proxy
->guest_page_shift
);
374 static void virtio_mmio_reset(DeviceState
*d
)
376 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
378 virtio_mmio_stop_ioeventfd(proxy
);
379 virtio_bus_reset(&proxy
->bus
);
380 proxy
->host_features_sel
= 0;
381 proxy
->guest_features_sel
= 0;
382 proxy
->guest_page_shift
= 0;
385 static int virtio_mmio_set_guest_notifier(DeviceState
*d
, int n
, bool assign
,
388 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
389 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
390 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
391 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
392 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
395 int r
= event_notifier_init(notifier
, 0);
399 virtio_queue_set_guest_notifier_fd_handler(vq
, true, with_irqfd
);
401 virtio_queue_set_guest_notifier_fd_handler(vq
, false, with_irqfd
);
402 event_notifier_cleanup(notifier
);
405 if (vdc
->guest_notifier_mask
&& vdev
->use_guest_notifier_mask
) {
406 vdc
->guest_notifier_mask(vdev
, n
, !assign
);
412 static int virtio_mmio_set_guest_notifiers(DeviceState
*d
, int nvqs
,
415 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
416 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
417 /* TODO: need to check if kvm-arm supports irqfd */
418 bool with_irqfd
= false;
421 nvqs
= MIN(nvqs
, VIRTIO_QUEUE_MAX
);
423 for (n
= 0; n
< nvqs
; n
++) {
424 if (!virtio_queue_get_num(vdev
, n
)) {
428 r
= virtio_mmio_set_guest_notifier(d
, n
, assign
, with_irqfd
);
437 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
440 virtio_mmio_set_guest_notifier(d
, n
, !assign
, false);
445 /* virtio-mmio device */
447 static Property virtio_mmio_properties
[] = {
448 DEFINE_PROP_BOOL("format_transport_address", VirtIOMMIOProxy
,
449 format_transport_address
, true),
450 DEFINE_PROP_END_OF_LIST(),
453 static void virtio_mmio_realizefn(DeviceState
*d
, Error
**errp
)
455 VirtIOMMIOProxy
*proxy
= VIRTIO_MMIO(d
);
456 SysBusDevice
*sbd
= SYS_BUS_DEVICE(d
);
458 qbus_create_inplace(&proxy
->bus
, sizeof(proxy
->bus
), TYPE_VIRTIO_MMIO_BUS
,
460 sysbus_init_irq(sbd
, &proxy
->irq
);
461 memory_region_init_io(&proxy
->iomem
, OBJECT(d
), &virtio_mem_ops
, proxy
,
462 TYPE_VIRTIO_MMIO
, 0x200);
463 sysbus_init_mmio(sbd
, &proxy
->iomem
);
466 static void virtio_mmio_class_init(ObjectClass
*klass
, void *data
)
468 DeviceClass
*dc
= DEVICE_CLASS(klass
);
470 dc
->realize
= virtio_mmio_realizefn
;
471 dc
->reset
= virtio_mmio_reset
;
472 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
473 dc
->props
= virtio_mmio_properties
;
476 static const TypeInfo virtio_mmio_info
= {
477 .name
= TYPE_VIRTIO_MMIO
,
478 .parent
= TYPE_SYS_BUS_DEVICE
,
479 .instance_size
= sizeof(VirtIOMMIOProxy
),
480 .class_init
= virtio_mmio_class_init
,
483 /* virtio-mmio-bus. */
485 static char *virtio_mmio_bus_get_dev_path(DeviceState
*dev
)
487 BusState
*virtio_mmio_bus
;
488 VirtIOMMIOProxy
*virtio_mmio_proxy
;
490 SysBusDevice
*proxy_sbd
;
493 virtio_mmio_bus
= qdev_get_parent_bus(dev
);
494 virtio_mmio_proxy
= VIRTIO_MMIO(virtio_mmio_bus
->parent
);
495 proxy_path
= qdev_get_dev_path(DEVICE(virtio_mmio_proxy
));
498 * If @format_transport_address is false, then we just perform the same as
499 * virtio_bus_get_dev_path(): we delegate the address formatting for the
500 * device on the virtio-mmio bus to the bus that the virtio-mmio proxy
501 * (i.e., the device that implements the virtio-mmio bus) resides on. In
502 * this case the base address of the virtio-mmio transport will be
505 if (!virtio_mmio_proxy
->format_transport_address
) {
509 /* Otherwise, we append the base address of the transport. */
510 proxy_sbd
= SYS_BUS_DEVICE(virtio_mmio_proxy
);
511 assert(proxy_sbd
->num_mmio
== 1);
512 assert(proxy_sbd
->mmio
[0].memory
== &virtio_mmio_proxy
->iomem
);
515 path
= g_strdup_printf("%s/virtio-mmio@" TARGET_FMT_plx
, proxy_path
,
516 proxy_sbd
->mmio
[0].addr
);
518 path
= g_strdup_printf("virtio-mmio@" TARGET_FMT_plx
,
519 proxy_sbd
->mmio
[0].addr
);
525 static void virtio_mmio_bus_class_init(ObjectClass
*klass
, void *data
)
527 BusClass
*bus_class
= BUS_CLASS(klass
);
528 VirtioBusClass
*k
= VIRTIO_BUS_CLASS(klass
);
530 k
->notify
= virtio_mmio_update_irq
;
531 k
->save_config
= virtio_mmio_save_config
;
532 k
->load_config
= virtio_mmio_load_config
;
533 k
->set_guest_notifiers
= virtio_mmio_set_guest_notifiers
;
534 k
->ioeventfd_enabled
= virtio_mmio_ioeventfd_enabled
;
535 k
->ioeventfd_assign
= virtio_mmio_ioeventfd_assign
;
536 k
->has_variable_vring_alignment
= true;
537 bus_class
->max_dev
= 1;
538 bus_class
->get_dev_path
= virtio_mmio_bus_get_dev_path
;
541 static const TypeInfo virtio_mmio_bus_info
= {
542 .name
= TYPE_VIRTIO_MMIO_BUS
,
543 .parent
= TYPE_VIRTIO_BUS
,
544 .instance_size
= sizeof(VirtioBusState
),
545 .class_init
= virtio_mmio_bus_class_init
,
548 static void virtio_mmio_register_types(void)
550 type_register_static(&virtio_mmio_bus_info
);
551 type_register_static(&virtio_mmio_info
);
554 type_init(virtio_mmio_register_types
)