vfio/pci: Make interrupt bypass runtime configurable
[qemu/ar7.git] / hw / virtio / virtio-mmio.c
blob18660b07b1c9507d69bc44272f7f6ab98e09fce0
1 /*
2 * Virtio MMIO bindings
4 * Copyright (c) 2011 Linaro Limited
6 * Author:
7 * Peter Maydell <peter.maydell@linaro.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "hw/sysbus.h"
23 #include "hw/virtio/virtio.h"
24 #include "qemu/host-utils.h"
25 #include "sysemu/kvm.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "qemu/error-report.h"
29 /* #define DEBUG_VIRTIO_MMIO */
31 #ifdef DEBUG_VIRTIO_MMIO
33 #define DPRINTF(fmt, ...) \
34 do { printf("virtio_mmio: " fmt , ## __VA_ARGS__); } while (0)
35 #else
36 #define DPRINTF(fmt, ...) do {} while (0)
37 #endif
39 /* QOM macros */
40 /* virtio-mmio-bus */
41 #define TYPE_VIRTIO_MMIO_BUS "virtio-mmio-bus"
42 #define VIRTIO_MMIO_BUS(obj) \
43 OBJECT_CHECK(VirtioBusState, (obj), TYPE_VIRTIO_MMIO_BUS)
44 #define VIRTIO_MMIO_BUS_GET_CLASS(obj) \
45 OBJECT_GET_CLASS(VirtioBusClass, (obj), TYPE_VIRTIO_MMIO_BUS)
46 #define VIRTIO_MMIO_BUS_CLASS(klass) \
47 OBJECT_CLASS_CHECK(VirtioBusClass, (klass), TYPE_VIRTIO_MMIO_BUS)
49 /* virtio-mmio */
50 #define TYPE_VIRTIO_MMIO "virtio-mmio"
51 #define VIRTIO_MMIO(obj) \
52 OBJECT_CHECK(VirtIOMMIOProxy, (obj), TYPE_VIRTIO_MMIO)
54 /* Memory mapped register offsets */
55 #define VIRTIO_MMIO_MAGIC 0x0
56 #define VIRTIO_MMIO_VERSION 0x4
57 #define VIRTIO_MMIO_DEVICEID 0x8
58 #define VIRTIO_MMIO_VENDORID 0xc
59 #define VIRTIO_MMIO_HOSTFEATURES 0x10
60 #define VIRTIO_MMIO_HOSTFEATURESSEL 0x14
61 #define VIRTIO_MMIO_GUESTFEATURES 0x20
62 #define VIRTIO_MMIO_GUESTFEATURESSEL 0x24
63 #define VIRTIO_MMIO_GUESTPAGESIZE 0x28
64 #define VIRTIO_MMIO_QUEUESEL 0x30
65 #define VIRTIO_MMIO_QUEUENUMMAX 0x34
66 #define VIRTIO_MMIO_QUEUENUM 0x38
67 #define VIRTIO_MMIO_QUEUEALIGN 0x3c
68 #define VIRTIO_MMIO_QUEUEPFN 0x40
69 #define VIRTIO_MMIO_QUEUENOTIFY 0x50
70 #define VIRTIO_MMIO_INTERRUPTSTATUS 0x60
71 #define VIRTIO_MMIO_INTERRUPTACK 0x64
72 #define VIRTIO_MMIO_STATUS 0x70
73 /* Device specific config space starts here */
74 #define VIRTIO_MMIO_CONFIG 0x100
76 #define VIRT_MAGIC 0x74726976 /* 'virt' */
77 #define VIRT_VERSION 1
78 #define VIRT_VENDOR 0x554D4551 /* 'QEMU' */
80 typedef struct {
81 /* Generic */
82 SysBusDevice parent_obj;
83 MemoryRegion iomem;
84 qemu_irq irq;
85 /* Guest accessible state needing migration and reset */
86 uint32_t host_features_sel;
87 uint32_t guest_features_sel;
88 uint32_t guest_page_shift;
89 /* virtio-bus */
90 VirtioBusState bus;
91 bool ioeventfd_disabled;
92 bool ioeventfd_started;
93 } VirtIOMMIOProxy;
95 static int virtio_mmio_set_host_notifier_internal(VirtIOMMIOProxy *proxy,
96 int n, bool assign,
97 bool set_handler)
99 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
100 VirtQueue *vq = virtio_get_queue(vdev, n);
101 EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
102 int r = 0;
104 if (assign) {
105 r = event_notifier_init(notifier, 1);
106 if (r < 0) {
107 error_report("%s: unable to init event notifier: %d",
108 __func__, r);
109 return r;
111 virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
112 memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4,
113 true, n, notifier);
114 } else {
115 memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4,
116 true, n, notifier);
117 virtio_queue_set_host_notifier_fd_handler(vq, false, false);
118 event_notifier_cleanup(notifier);
120 return r;
123 static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy)
125 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
126 int n, r;
128 if (!kvm_eventfds_enabled() ||
129 proxy->ioeventfd_disabled ||
130 proxy->ioeventfd_started) {
131 return;
134 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
135 if (!virtio_queue_get_num(vdev, n)) {
136 continue;
139 r = virtio_mmio_set_host_notifier_internal(proxy, n, true, true);
140 if (r < 0) {
141 goto assign_error;
144 proxy->ioeventfd_started = true;
145 return;
147 assign_error:
148 while (--n >= 0) {
149 if (!virtio_queue_get_num(vdev, n)) {
150 continue;
153 r = virtio_mmio_set_host_notifier_internal(proxy, n, false, false);
154 assert(r >= 0);
156 proxy->ioeventfd_started = false;
157 error_report("%s: failed. Fallback to a userspace (slower).", __func__);
160 static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy)
162 int r;
163 int n;
164 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
166 if (!proxy->ioeventfd_started) {
167 return;
170 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
171 if (!virtio_queue_get_num(vdev, n)) {
172 continue;
175 r = virtio_mmio_set_host_notifier_internal(proxy, n, false, false);
176 assert(r >= 0);
178 proxy->ioeventfd_started = false;
181 static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size)
183 VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
184 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
186 DPRINTF("virtio_mmio_read offset 0x%x\n", (int)offset);
188 if (!vdev) {
189 /* If no backend is present, we treat most registers as
190 * read-as-zero, except for the magic number, version and
191 * vendor ID. This is not strictly sanctioned by the virtio
192 * spec, but it allows us to provide transports with no backend
193 * plugged in which don't confuse Linux's virtio code: the
194 * probe won't complain about the bad magic number, but the
195 * device ID of zero means no backend will claim it.
197 switch (offset) {
198 case VIRTIO_MMIO_MAGIC:
199 return VIRT_MAGIC;
200 case VIRTIO_MMIO_VERSION:
201 return VIRT_VERSION;
202 case VIRTIO_MMIO_VENDORID:
203 return VIRT_VENDOR;
204 default:
205 return 0;
209 if (offset >= VIRTIO_MMIO_CONFIG) {
210 offset -= VIRTIO_MMIO_CONFIG;
211 switch (size) {
212 case 1:
213 return virtio_config_readb(vdev, offset);
214 case 2:
215 return virtio_config_readw(vdev, offset);
216 case 4:
217 return virtio_config_readl(vdev, offset);
218 default:
219 abort();
222 if (size != 4) {
223 DPRINTF("wrong size access to register!\n");
224 return 0;
226 switch (offset) {
227 case VIRTIO_MMIO_MAGIC:
228 return VIRT_MAGIC;
229 case VIRTIO_MMIO_VERSION:
230 return VIRT_VERSION;
231 case VIRTIO_MMIO_DEVICEID:
232 return vdev->device_id;
233 case VIRTIO_MMIO_VENDORID:
234 return VIRT_VENDOR;
235 case VIRTIO_MMIO_HOSTFEATURES:
236 if (proxy->host_features_sel) {
237 return 0;
239 return vdev->host_features;
240 case VIRTIO_MMIO_QUEUENUMMAX:
241 if (!virtio_queue_get_num(vdev, vdev->queue_sel)) {
242 return 0;
244 return VIRTQUEUE_MAX_SIZE;
245 case VIRTIO_MMIO_QUEUEPFN:
246 return virtio_queue_get_addr(vdev, vdev->queue_sel)
247 >> proxy->guest_page_shift;
248 case VIRTIO_MMIO_INTERRUPTSTATUS:
249 return vdev->isr;
250 case VIRTIO_MMIO_STATUS:
251 return vdev->status;
252 case VIRTIO_MMIO_HOSTFEATURESSEL:
253 case VIRTIO_MMIO_GUESTFEATURES:
254 case VIRTIO_MMIO_GUESTFEATURESSEL:
255 case VIRTIO_MMIO_GUESTPAGESIZE:
256 case VIRTIO_MMIO_QUEUESEL:
257 case VIRTIO_MMIO_QUEUENUM:
258 case VIRTIO_MMIO_QUEUEALIGN:
259 case VIRTIO_MMIO_QUEUENOTIFY:
260 case VIRTIO_MMIO_INTERRUPTACK:
261 DPRINTF("read of write-only register\n");
262 return 0;
263 default:
264 DPRINTF("bad register offset\n");
265 return 0;
267 return 0;
270 static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
271 unsigned size)
273 VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
274 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
276 DPRINTF("virtio_mmio_write offset 0x%x value 0x%" PRIx64 "\n",
277 (int)offset, value);
279 if (!vdev) {
280 /* If no backend is present, we just make all registers
281 * write-ignored. This allows us to provide transports with
282 * no backend plugged in.
284 return;
287 if (offset >= VIRTIO_MMIO_CONFIG) {
288 offset -= VIRTIO_MMIO_CONFIG;
289 switch (size) {
290 case 1:
291 virtio_config_writeb(vdev, offset, value);
292 break;
293 case 2:
294 virtio_config_writew(vdev, offset, value);
295 break;
296 case 4:
297 virtio_config_writel(vdev, offset, value);
298 break;
299 default:
300 abort();
302 return;
304 if (size != 4) {
305 DPRINTF("wrong size access to register!\n");
306 return;
308 switch (offset) {
309 case VIRTIO_MMIO_HOSTFEATURESSEL:
310 proxy->host_features_sel = value;
311 break;
312 case VIRTIO_MMIO_GUESTFEATURES:
313 if (!proxy->guest_features_sel) {
314 virtio_set_features(vdev, value);
316 break;
317 case VIRTIO_MMIO_GUESTFEATURESSEL:
318 proxy->guest_features_sel = value;
319 break;
320 case VIRTIO_MMIO_GUESTPAGESIZE:
321 proxy->guest_page_shift = ctz32(value);
322 if (proxy->guest_page_shift > 31) {
323 proxy->guest_page_shift = 0;
325 DPRINTF("guest page size %" PRIx64 " shift %d\n", value,
326 proxy->guest_page_shift);
327 break;
328 case VIRTIO_MMIO_QUEUESEL:
329 if (value < VIRTIO_QUEUE_MAX) {
330 vdev->queue_sel = value;
332 break;
333 case VIRTIO_MMIO_QUEUENUM:
334 DPRINTF("mmio_queue write %d max %d\n", (int)value, VIRTQUEUE_MAX_SIZE);
335 virtio_queue_set_num(vdev, vdev->queue_sel, value);
336 /* Note: only call this function for legacy devices */
337 virtio_queue_update_rings(vdev, vdev->queue_sel);
338 break;
339 case VIRTIO_MMIO_QUEUEALIGN:
340 /* Note: this is only valid for legacy devices */
341 virtio_queue_set_align(vdev, vdev->queue_sel, value);
342 break;
343 case VIRTIO_MMIO_QUEUEPFN:
344 if (value == 0) {
345 virtio_reset(vdev);
346 } else {
347 virtio_queue_set_addr(vdev, vdev->queue_sel,
348 value << proxy->guest_page_shift);
350 break;
351 case VIRTIO_MMIO_QUEUENOTIFY:
352 if (value < VIRTIO_QUEUE_MAX) {
353 virtio_queue_notify(vdev, value);
355 break;
356 case VIRTIO_MMIO_INTERRUPTACK:
357 vdev->isr &= ~value;
358 virtio_update_irq(vdev);
359 break;
360 case VIRTIO_MMIO_STATUS:
361 if (!(value & VIRTIO_CONFIG_S_DRIVER_OK)) {
362 virtio_mmio_stop_ioeventfd(proxy);
365 virtio_set_status(vdev, value & 0xff);
367 if (value & VIRTIO_CONFIG_S_DRIVER_OK) {
368 virtio_mmio_start_ioeventfd(proxy);
371 if (vdev->status == 0) {
372 virtio_reset(vdev);
374 break;
375 case VIRTIO_MMIO_MAGIC:
376 case VIRTIO_MMIO_VERSION:
377 case VIRTIO_MMIO_DEVICEID:
378 case VIRTIO_MMIO_VENDORID:
379 case VIRTIO_MMIO_HOSTFEATURES:
380 case VIRTIO_MMIO_QUEUENUMMAX:
381 case VIRTIO_MMIO_INTERRUPTSTATUS:
382 DPRINTF("write to readonly register\n");
383 break;
385 default:
386 DPRINTF("bad register offset\n");
390 static const MemoryRegionOps virtio_mem_ops = {
391 .read = virtio_mmio_read,
392 .write = virtio_mmio_write,
393 .endianness = DEVICE_NATIVE_ENDIAN,
396 static void virtio_mmio_update_irq(DeviceState *opaque, uint16_t vector)
398 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
399 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
400 int level;
402 if (!vdev) {
403 return;
405 level = (vdev->isr != 0);
406 DPRINTF("virtio_mmio setting IRQ %d\n", level);
407 qemu_set_irq(proxy->irq, level);
410 static int virtio_mmio_load_config(DeviceState *opaque, QEMUFile *f)
412 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
414 proxy->host_features_sel = qemu_get_be32(f);
415 proxy->guest_features_sel = qemu_get_be32(f);
416 proxy->guest_page_shift = qemu_get_be32(f);
417 return 0;
420 static void virtio_mmio_save_config(DeviceState *opaque, QEMUFile *f)
422 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
424 qemu_put_be32(f, proxy->host_features_sel);
425 qemu_put_be32(f, proxy->guest_features_sel);
426 qemu_put_be32(f, proxy->guest_page_shift);
429 static void virtio_mmio_reset(DeviceState *d)
431 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
433 virtio_mmio_stop_ioeventfd(proxy);
434 virtio_bus_reset(&proxy->bus);
435 proxy->host_features_sel = 0;
436 proxy->guest_features_sel = 0;
437 proxy->guest_page_shift = 0;
440 static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign,
441 bool with_irqfd)
443 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
444 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
445 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
446 VirtQueue *vq = virtio_get_queue(vdev, n);
447 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
449 if (assign) {
450 int r = event_notifier_init(notifier, 0);
451 if (r < 0) {
452 return r;
454 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
455 } else {
456 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
457 event_notifier_cleanup(notifier);
460 if (vdc->guest_notifier_mask) {
461 vdc->guest_notifier_mask(vdev, n, !assign);
464 return 0;
467 static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs,
468 bool assign)
470 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
471 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
472 /* TODO: need to check if kvm-arm supports irqfd */
473 bool with_irqfd = false;
474 int r, n;
476 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
478 for (n = 0; n < nvqs; n++) {
479 if (!virtio_queue_get_num(vdev, n)) {
480 break;
483 r = virtio_mmio_set_guest_notifier(d, n, assign, with_irqfd);
484 if (r < 0) {
485 goto assign_error;
489 return 0;
491 assign_error:
492 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
493 assert(assign);
494 while (--n >= 0) {
495 virtio_mmio_set_guest_notifier(d, n, !assign, false);
497 return r;
500 static int virtio_mmio_set_host_notifier(DeviceState *opaque, int n,
501 bool assign)
503 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
505 /* Stop using ioeventfd for virtqueue kick if the device starts using host
506 * notifiers. This makes it easy to avoid stepping on each others' toes.
508 proxy->ioeventfd_disabled = assign;
509 if (assign) {
510 virtio_mmio_stop_ioeventfd(proxy);
512 /* We don't need to start here: it's not needed because backend
513 * currently only stops on status change away from ok,
514 * reset, vmstop and such. If we do add code to start here,
515 * need to check vmstate, device state etc. */
516 return virtio_mmio_set_host_notifier_internal(proxy, n, assign, false);
519 /* virtio-mmio device */
521 static void virtio_mmio_realizefn(DeviceState *d, Error **errp)
523 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
524 SysBusDevice *sbd = SYS_BUS_DEVICE(d);
526 qbus_create_inplace(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS,
527 d, NULL);
528 sysbus_init_irq(sbd, &proxy->irq);
529 memory_region_init_io(&proxy->iomem, OBJECT(d), &virtio_mem_ops, proxy,
530 TYPE_VIRTIO_MMIO, 0x200);
531 sysbus_init_mmio(sbd, &proxy->iomem);
534 static void virtio_mmio_class_init(ObjectClass *klass, void *data)
536 DeviceClass *dc = DEVICE_CLASS(klass);
538 dc->realize = virtio_mmio_realizefn;
539 dc->reset = virtio_mmio_reset;
540 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
543 static const TypeInfo virtio_mmio_info = {
544 .name = TYPE_VIRTIO_MMIO,
545 .parent = TYPE_SYS_BUS_DEVICE,
546 .instance_size = sizeof(VirtIOMMIOProxy),
547 .class_init = virtio_mmio_class_init,
550 /* virtio-mmio-bus. */
552 static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
554 BusClass *bus_class = BUS_CLASS(klass);
555 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
557 k->notify = virtio_mmio_update_irq;
558 k->save_config = virtio_mmio_save_config;
559 k->load_config = virtio_mmio_load_config;
560 k->set_host_notifier = virtio_mmio_set_host_notifier;
561 k->set_guest_notifiers = virtio_mmio_set_guest_notifiers;
562 k->has_variable_vring_alignment = true;
563 bus_class->max_dev = 1;
566 static const TypeInfo virtio_mmio_bus_info = {
567 .name = TYPE_VIRTIO_MMIO_BUS,
568 .parent = TYPE_VIRTIO_BUS,
569 .instance_size = sizeof(VirtioBusState),
570 .class_init = virtio_mmio_bus_class_init,
573 static void virtio_mmio_register_types(void)
575 type_register_static(&virtio_mmio_bus_info);
576 type_register_static(&virtio_mmio_info);
579 type_init(virtio_mmio_register_types)