xen: handle inbound migration of VMs without ioreq server pages
[qemu.git] / hw / virtio / virtio-mmio.c
blob13798b3cb844a8f4b622f56234d7c1ec0b6a18cc
1 /*
2 * Virtio MMIO bindings
4 * Copyright (c) 2011 Linaro Limited
6 * Author:
7 * Peter Maydell <peter.maydell@linaro.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
23 #include "hw/sysbus.h"
24 #include "hw/virtio/virtio.h"
25 #include "qemu/host-utils.h"
26 #include "sysemu/kvm.h"
27 #include "hw/virtio/virtio-bus.h"
28 #include "qemu/error-report.h"
30 /* #define DEBUG_VIRTIO_MMIO */
32 #ifdef DEBUG_VIRTIO_MMIO
34 #define DPRINTF(fmt, ...) \
35 do { printf("virtio_mmio: " fmt , ## __VA_ARGS__); } while (0)
36 #else
37 #define DPRINTF(fmt, ...) do {} while (0)
38 #endif
40 /* QOM macros */
41 /* virtio-mmio-bus */
42 #define TYPE_VIRTIO_MMIO_BUS "virtio-mmio-bus"
43 #define VIRTIO_MMIO_BUS(obj) \
44 OBJECT_CHECK(VirtioBusState, (obj), TYPE_VIRTIO_MMIO_BUS)
45 #define VIRTIO_MMIO_BUS_GET_CLASS(obj) \
46 OBJECT_GET_CLASS(VirtioBusClass, (obj), TYPE_VIRTIO_MMIO_BUS)
47 #define VIRTIO_MMIO_BUS_CLASS(klass) \
48 OBJECT_CLASS_CHECK(VirtioBusClass, (klass), TYPE_VIRTIO_MMIO_BUS)
50 /* virtio-mmio */
51 #define TYPE_VIRTIO_MMIO "virtio-mmio"
52 #define VIRTIO_MMIO(obj) \
53 OBJECT_CHECK(VirtIOMMIOProxy, (obj), TYPE_VIRTIO_MMIO)
55 /* Memory mapped register offsets */
56 #define VIRTIO_MMIO_MAGIC 0x0
57 #define VIRTIO_MMIO_VERSION 0x4
58 #define VIRTIO_MMIO_DEVICEID 0x8
59 #define VIRTIO_MMIO_VENDORID 0xc
60 #define VIRTIO_MMIO_HOSTFEATURES 0x10
61 #define VIRTIO_MMIO_HOSTFEATURESSEL 0x14
62 #define VIRTIO_MMIO_GUESTFEATURES 0x20
63 #define VIRTIO_MMIO_GUESTFEATURESSEL 0x24
64 #define VIRTIO_MMIO_GUESTPAGESIZE 0x28
65 #define VIRTIO_MMIO_QUEUESEL 0x30
66 #define VIRTIO_MMIO_QUEUENUMMAX 0x34
67 #define VIRTIO_MMIO_QUEUENUM 0x38
68 #define VIRTIO_MMIO_QUEUEALIGN 0x3c
69 #define VIRTIO_MMIO_QUEUEPFN 0x40
70 #define VIRTIO_MMIO_QUEUENOTIFY 0x50
71 #define VIRTIO_MMIO_INTERRUPTSTATUS 0x60
72 #define VIRTIO_MMIO_INTERRUPTACK 0x64
73 #define VIRTIO_MMIO_STATUS 0x70
74 /* Device specific config space starts here */
75 #define VIRTIO_MMIO_CONFIG 0x100
77 #define VIRT_MAGIC 0x74726976 /* 'virt' */
78 #define VIRT_VERSION 1
79 #define VIRT_VENDOR 0x554D4551 /* 'QEMU' */
81 typedef struct {
82 /* Generic */
83 SysBusDevice parent_obj;
84 MemoryRegion iomem;
85 qemu_irq irq;
86 /* Guest accessible state needing migration and reset */
87 uint32_t host_features_sel;
88 uint32_t guest_features_sel;
89 uint32_t guest_page_shift;
90 /* virtio-bus */
91 VirtioBusState bus;
92 bool ioeventfd_disabled;
93 bool ioeventfd_started;
94 bool format_transport_address;
95 } VirtIOMMIOProxy;
97 static bool virtio_mmio_ioeventfd_started(DeviceState *d)
99 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
101 return proxy->ioeventfd_started;
104 static void virtio_mmio_ioeventfd_set_started(DeviceState *d, bool started,
105 bool err)
107 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
109 proxy->ioeventfd_started = started;
112 static bool virtio_mmio_ioeventfd_disabled(DeviceState *d)
114 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
116 return !kvm_eventfds_enabled() || proxy->ioeventfd_disabled;
119 static void virtio_mmio_ioeventfd_set_disabled(DeviceState *d, bool disabled)
121 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
123 proxy->ioeventfd_disabled = disabled;
126 static int virtio_mmio_ioeventfd_assign(DeviceState *d,
127 EventNotifier *notifier,
128 int n, bool assign)
130 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
132 if (assign) {
133 memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4,
134 true, n, notifier);
135 } else {
136 memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4,
137 true, n, notifier);
139 return 0;
142 static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy)
144 virtio_bus_start_ioeventfd(&proxy->bus);
147 static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy)
149 virtio_bus_stop_ioeventfd(&proxy->bus);
152 static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size)
154 VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
155 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
157 DPRINTF("virtio_mmio_read offset 0x%x\n", (int)offset);
159 if (!vdev) {
160 /* If no backend is present, we treat most registers as
161 * read-as-zero, except for the magic number, version and
162 * vendor ID. This is not strictly sanctioned by the virtio
163 * spec, but it allows us to provide transports with no backend
164 * plugged in which don't confuse Linux's virtio code: the
165 * probe won't complain about the bad magic number, but the
166 * device ID of zero means no backend will claim it.
168 switch (offset) {
169 case VIRTIO_MMIO_MAGIC:
170 return VIRT_MAGIC;
171 case VIRTIO_MMIO_VERSION:
172 return VIRT_VERSION;
173 case VIRTIO_MMIO_VENDORID:
174 return VIRT_VENDOR;
175 default:
176 return 0;
180 if (offset >= VIRTIO_MMIO_CONFIG) {
181 offset -= VIRTIO_MMIO_CONFIG;
182 switch (size) {
183 case 1:
184 return virtio_config_readb(vdev, offset);
185 case 2:
186 return virtio_config_readw(vdev, offset);
187 case 4:
188 return virtio_config_readl(vdev, offset);
189 default:
190 abort();
193 if (size != 4) {
194 DPRINTF("wrong size access to register!\n");
195 return 0;
197 switch (offset) {
198 case VIRTIO_MMIO_MAGIC:
199 return VIRT_MAGIC;
200 case VIRTIO_MMIO_VERSION:
201 return VIRT_VERSION;
202 case VIRTIO_MMIO_DEVICEID:
203 return vdev->device_id;
204 case VIRTIO_MMIO_VENDORID:
205 return VIRT_VENDOR;
206 case VIRTIO_MMIO_HOSTFEATURES:
207 if (proxy->host_features_sel) {
208 return 0;
210 return vdev->host_features;
211 case VIRTIO_MMIO_QUEUENUMMAX:
212 if (!virtio_queue_get_num(vdev, vdev->queue_sel)) {
213 return 0;
215 return VIRTQUEUE_MAX_SIZE;
216 case VIRTIO_MMIO_QUEUEPFN:
217 return virtio_queue_get_addr(vdev, vdev->queue_sel)
218 >> proxy->guest_page_shift;
219 case VIRTIO_MMIO_INTERRUPTSTATUS:
220 return vdev->isr;
221 case VIRTIO_MMIO_STATUS:
222 return vdev->status;
223 case VIRTIO_MMIO_HOSTFEATURESSEL:
224 case VIRTIO_MMIO_GUESTFEATURES:
225 case VIRTIO_MMIO_GUESTFEATURESSEL:
226 case VIRTIO_MMIO_GUESTPAGESIZE:
227 case VIRTIO_MMIO_QUEUESEL:
228 case VIRTIO_MMIO_QUEUENUM:
229 case VIRTIO_MMIO_QUEUEALIGN:
230 case VIRTIO_MMIO_QUEUENOTIFY:
231 case VIRTIO_MMIO_INTERRUPTACK:
232 DPRINTF("read of write-only register\n");
233 return 0;
234 default:
235 DPRINTF("bad register offset\n");
236 return 0;
238 return 0;
241 static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
242 unsigned size)
244 VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
245 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
247 DPRINTF("virtio_mmio_write offset 0x%x value 0x%" PRIx64 "\n",
248 (int)offset, value);
250 if (!vdev) {
251 /* If no backend is present, we just make all registers
252 * write-ignored. This allows us to provide transports with
253 * no backend plugged in.
255 return;
258 if (offset >= VIRTIO_MMIO_CONFIG) {
259 offset -= VIRTIO_MMIO_CONFIG;
260 switch (size) {
261 case 1:
262 virtio_config_writeb(vdev, offset, value);
263 break;
264 case 2:
265 virtio_config_writew(vdev, offset, value);
266 break;
267 case 4:
268 virtio_config_writel(vdev, offset, value);
269 break;
270 default:
271 abort();
273 return;
275 if (size != 4) {
276 DPRINTF("wrong size access to register!\n");
277 return;
279 switch (offset) {
280 case VIRTIO_MMIO_HOSTFEATURESSEL:
281 proxy->host_features_sel = value;
282 break;
283 case VIRTIO_MMIO_GUESTFEATURES:
284 if (!proxy->guest_features_sel) {
285 virtio_set_features(vdev, value);
287 break;
288 case VIRTIO_MMIO_GUESTFEATURESSEL:
289 proxy->guest_features_sel = value;
290 break;
291 case VIRTIO_MMIO_GUESTPAGESIZE:
292 proxy->guest_page_shift = ctz32(value);
293 if (proxy->guest_page_shift > 31) {
294 proxy->guest_page_shift = 0;
296 DPRINTF("guest page size %" PRIx64 " shift %d\n", value,
297 proxy->guest_page_shift);
298 break;
299 case VIRTIO_MMIO_QUEUESEL:
300 if (value < VIRTIO_QUEUE_MAX) {
301 vdev->queue_sel = value;
303 break;
304 case VIRTIO_MMIO_QUEUENUM:
305 DPRINTF("mmio_queue write %d max %d\n", (int)value, VIRTQUEUE_MAX_SIZE);
306 virtio_queue_set_num(vdev, vdev->queue_sel, value);
307 /* Note: only call this function for legacy devices */
308 virtio_queue_update_rings(vdev, vdev->queue_sel);
309 break;
310 case VIRTIO_MMIO_QUEUEALIGN:
311 /* Note: this is only valid for legacy devices */
312 virtio_queue_set_align(vdev, vdev->queue_sel, value);
313 break;
314 case VIRTIO_MMIO_QUEUEPFN:
315 if (value == 0) {
316 virtio_reset(vdev);
317 } else {
318 virtio_queue_set_addr(vdev, vdev->queue_sel,
319 value << proxy->guest_page_shift);
321 break;
322 case VIRTIO_MMIO_QUEUENOTIFY:
323 if (value < VIRTIO_QUEUE_MAX) {
324 virtio_queue_notify(vdev, value);
326 break;
327 case VIRTIO_MMIO_INTERRUPTACK:
328 vdev->isr &= ~value;
329 virtio_update_irq(vdev);
330 break;
331 case VIRTIO_MMIO_STATUS:
332 if (!(value & VIRTIO_CONFIG_S_DRIVER_OK)) {
333 virtio_mmio_stop_ioeventfd(proxy);
336 virtio_set_status(vdev, value & 0xff);
338 if (value & VIRTIO_CONFIG_S_DRIVER_OK) {
339 virtio_mmio_start_ioeventfd(proxy);
342 if (vdev->status == 0) {
343 virtio_reset(vdev);
345 break;
346 case VIRTIO_MMIO_MAGIC:
347 case VIRTIO_MMIO_VERSION:
348 case VIRTIO_MMIO_DEVICEID:
349 case VIRTIO_MMIO_VENDORID:
350 case VIRTIO_MMIO_HOSTFEATURES:
351 case VIRTIO_MMIO_QUEUENUMMAX:
352 case VIRTIO_MMIO_INTERRUPTSTATUS:
353 DPRINTF("write to readonly register\n");
354 break;
356 default:
357 DPRINTF("bad register offset\n");
361 static const MemoryRegionOps virtio_mem_ops = {
362 .read = virtio_mmio_read,
363 .write = virtio_mmio_write,
364 .endianness = DEVICE_NATIVE_ENDIAN,
367 static void virtio_mmio_update_irq(DeviceState *opaque, uint16_t vector)
369 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
370 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
371 int level;
373 if (!vdev) {
374 return;
376 level = (vdev->isr != 0);
377 DPRINTF("virtio_mmio setting IRQ %d\n", level);
378 qemu_set_irq(proxy->irq, level);
381 static int virtio_mmio_load_config(DeviceState *opaque, QEMUFile *f)
383 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
385 proxy->host_features_sel = qemu_get_be32(f);
386 proxy->guest_features_sel = qemu_get_be32(f);
387 proxy->guest_page_shift = qemu_get_be32(f);
388 return 0;
391 static void virtio_mmio_save_config(DeviceState *opaque, QEMUFile *f)
393 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
395 qemu_put_be32(f, proxy->host_features_sel);
396 qemu_put_be32(f, proxy->guest_features_sel);
397 qemu_put_be32(f, proxy->guest_page_shift);
400 static void virtio_mmio_reset(DeviceState *d)
402 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
404 virtio_mmio_stop_ioeventfd(proxy);
405 virtio_bus_reset(&proxy->bus);
406 proxy->host_features_sel = 0;
407 proxy->guest_features_sel = 0;
408 proxy->guest_page_shift = 0;
411 static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign,
412 bool with_irqfd)
414 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
415 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
416 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
417 VirtQueue *vq = virtio_get_queue(vdev, n);
418 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
420 if (assign) {
421 int r = event_notifier_init(notifier, 0);
422 if (r < 0) {
423 return r;
425 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
426 } else {
427 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
428 event_notifier_cleanup(notifier);
431 if (vdc->guest_notifier_mask) {
432 vdc->guest_notifier_mask(vdev, n, !assign);
435 return 0;
438 static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs,
439 bool assign)
441 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
442 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
443 /* TODO: need to check if kvm-arm supports irqfd */
444 bool with_irqfd = false;
445 int r, n;
447 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
449 for (n = 0; n < nvqs; n++) {
450 if (!virtio_queue_get_num(vdev, n)) {
451 break;
454 r = virtio_mmio_set_guest_notifier(d, n, assign, with_irqfd);
455 if (r < 0) {
456 goto assign_error;
460 return 0;
462 assign_error:
463 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
464 assert(assign);
465 while (--n >= 0) {
466 virtio_mmio_set_guest_notifier(d, n, !assign, false);
468 return r;
471 /* virtio-mmio device */
473 static Property virtio_mmio_properties[] = {
474 DEFINE_PROP_BOOL("format_transport_address", VirtIOMMIOProxy,
475 format_transport_address, true),
476 DEFINE_PROP_END_OF_LIST(),
479 static void virtio_mmio_realizefn(DeviceState *d, Error **errp)
481 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
482 SysBusDevice *sbd = SYS_BUS_DEVICE(d);
484 qbus_create_inplace(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS,
485 d, NULL);
486 sysbus_init_irq(sbd, &proxy->irq);
487 memory_region_init_io(&proxy->iomem, OBJECT(d), &virtio_mem_ops, proxy,
488 TYPE_VIRTIO_MMIO, 0x200);
489 sysbus_init_mmio(sbd, &proxy->iomem);
492 static void virtio_mmio_class_init(ObjectClass *klass, void *data)
494 DeviceClass *dc = DEVICE_CLASS(klass);
496 dc->realize = virtio_mmio_realizefn;
497 dc->reset = virtio_mmio_reset;
498 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
499 dc->props = virtio_mmio_properties;
502 static const TypeInfo virtio_mmio_info = {
503 .name = TYPE_VIRTIO_MMIO,
504 .parent = TYPE_SYS_BUS_DEVICE,
505 .instance_size = sizeof(VirtIOMMIOProxy),
506 .class_init = virtio_mmio_class_init,
509 /* virtio-mmio-bus. */
511 static char *virtio_mmio_bus_get_dev_path(DeviceState *dev)
513 BusState *virtio_mmio_bus;
514 VirtIOMMIOProxy *virtio_mmio_proxy;
515 char *proxy_path;
516 SysBusDevice *proxy_sbd;
517 char *path;
519 virtio_mmio_bus = qdev_get_parent_bus(dev);
520 virtio_mmio_proxy = VIRTIO_MMIO(virtio_mmio_bus->parent);
521 proxy_path = qdev_get_dev_path(DEVICE(virtio_mmio_proxy));
524 * If @format_transport_address is false, then we just perform the same as
525 * virtio_bus_get_dev_path(): we delegate the address formatting for the
526 * device on the virtio-mmio bus to the bus that the virtio-mmio proxy
527 * (i.e., the device that implements the virtio-mmio bus) resides on. In
528 * this case the base address of the virtio-mmio transport will be
529 * invisible.
531 if (!virtio_mmio_proxy->format_transport_address) {
532 return proxy_path;
535 /* Otherwise, we append the base address of the transport. */
536 proxy_sbd = SYS_BUS_DEVICE(virtio_mmio_proxy);
537 assert(proxy_sbd->num_mmio == 1);
538 assert(proxy_sbd->mmio[0].memory == &virtio_mmio_proxy->iomem);
540 if (proxy_path) {
541 path = g_strdup_printf("%s/virtio-mmio@" TARGET_FMT_plx, proxy_path,
542 proxy_sbd->mmio[0].addr);
543 } else {
544 path = g_strdup_printf("virtio-mmio@" TARGET_FMT_plx,
545 proxy_sbd->mmio[0].addr);
547 g_free(proxy_path);
548 return path;
551 static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
553 BusClass *bus_class = BUS_CLASS(klass);
554 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
556 k->notify = virtio_mmio_update_irq;
557 k->save_config = virtio_mmio_save_config;
558 k->load_config = virtio_mmio_load_config;
559 k->set_guest_notifiers = virtio_mmio_set_guest_notifiers;
560 k->ioeventfd_started = virtio_mmio_ioeventfd_started;
561 k->ioeventfd_set_started = virtio_mmio_ioeventfd_set_started;
562 k->ioeventfd_disabled = virtio_mmio_ioeventfd_disabled;
563 k->ioeventfd_set_disabled = virtio_mmio_ioeventfd_set_disabled;
564 k->ioeventfd_assign = virtio_mmio_ioeventfd_assign;
565 k->has_variable_vring_alignment = true;
566 bus_class->max_dev = 1;
567 bus_class->get_dev_path = virtio_mmio_bus_get_dev_path;
570 static const TypeInfo virtio_mmio_bus_info = {
571 .name = TYPE_VIRTIO_MMIO_BUS,
572 .parent = TYPE_VIRTIO_BUS,
573 .instance_size = sizeof(VirtioBusState),
574 .class_init = virtio_mmio_bus_class_init,
577 static void virtio_mmio_register_types(void)
579 type_register_static(&virtio_mmio_bus_info);
580 type_register_static(&virtio_mmio_info);
583 type_init(virtio_mmio_register_types)