iotests/223: Create socket in $SOCK_DIR
[qemu/ar7.git] / hw / virtio / virtio-mmio.c
blob3d5ca0f667f86d8074380347967558a14afc8162
1 /*
2 * Virtio MMIO bindings
4 * Copyright (c) 2011 Linaro Limited
6 * Author:
7 * Peter Maydell <peter.maydell@linaro.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
23 #include "standard-headers/linux/virtio_mmio.h"
24 #include "hw/irq.h"
25 #include "hw/qdev-properties.h"
26 #include "hw/sysbus.h"
27 #include "hw/virtio/virtio.h"
28 #include "migration/qemu-file-types.h"
29 #include "qemu/host-utils.h"
30 #include "qemu/module.h"
31 #include "sysemu/kvm.h"
32 #include "hw/virtio/virtio-bus.h"
33 #include "qemu/error-report.h"
34 #include "qemu/log.h"
35 #include "trace.h"
37 /* QOM macros */
38 /* virtio-mmio-bus */
39 #define TYPE_VIRTIO_MMIO_BUS "virtio-mmio-bus"
40 #define VIRTIO_MMIO_BUS(obj) \
41 OBJECT_CHECK(VirtioBusState, (obj), TYPE_VIRTIO_MMIO_BUS)
42 #define VIRTIO_MMIO_BUS_GET_CLASS(obj) \
43 OBJECT_GET_CLASS(VirtioBusClass, (obj), TYPE_VIRTIO_MMIO_BUS)
44 #define VIRTIO_MMIO_BUS_CLASS(klass) \
45 OBJECT_CLASS_CHECK(VirtioBusClass, (klass), TYPE_VIRTIO_MMIO_BUS)
47 /* virtio-mmio */
48 #define TYPE_VIRTIO_MMIO "virtio-mmio"
49 #define VIRTIO_MMIO(obj) \
50 OBJECT_CHECK(VirtIOMMIOProxy, (obj), TYPE_VIRTIO_MMIO)
52 #define VIRT_MAGIC 0x74726976 /* 'virt' */
53 #define VIRT_VERSION 2
54 #define VIRT_VERSION_LEGACY 1
55 #define VIRT_VENDOR 0x554D4551 /* 'QEMU' */
57 typedef struct VirtIOMMIOQueue {
58 uint16_t num;
59 bool enabled;
60 uint32_t desc[2];
61 uint32_t avail[2];
62 uint32_t used[2];
63 } VirtIOMMIOQueue;
65 typedef struct {
66 /* Generic */
67 SysBusDevice parent_obj;
68 MemoryRegion iomem;
69 qemu_irq irq;
70 bool legacy;
71 /* Guest accessible state needing migration and reset */
72 uint32_t host_features_sel;
73 uint32_t guest_features_sel;
74 uint32_t guest_page_shift;
75 /* virtio-bus */
76 VirtioBusState bus;
77 bool format_transport_address;
78 /* Fields only used for non-legacy (v2) devices */
79 uint32_t guest_features[2];
80 VirtIOMMIOQueue vqs[VIRTIO_QUEUE_MAX];
81 } VirtIOMMIOProxy;
83 static bool virtio_mmio_ioeventfd_enabled(DeviceState *d)
85 return kvm_eventfds_enabled();
88 static int virtio_mmio_ioeventfd_assign(DeviceState *d,
89 EventNotifier *notifier,
90 int n, bool assign)
92 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
94 if (assign) {
95 memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4,
96 true, n, notifier);
97 } else {
98 memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4,
99 true, n, notifier);
101 return 0;
104 static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy)
106 virtio_bus_start_ioeventfd(&proxy->bus);
109 static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy)
111 virtio_bus_stop_ioeventfd(&proxy->bus);
114 static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size)
116 VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
117 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
119 trace_virtio_mmio_read(offset);
121 if (!vdev) {
122 /* If no backend is present, we treat most registers as
123 * read-as-zero, except for the magic number, version and
124 * vendor ID. This is not strictly sanctioned by the virtio
125 * spec, but it allows us to provide transports with no backend
126 * plugged in which don't confuse Linux's virtio code: the
127 * probe won't complain about the bad magic number, but the
128 * device ID of zero means no backend will claim it.
130 switch (offset) {
131 case VIRTIO_MMIO_MAGIC_VALUE:
132 return VIRT_MAGIC;
133 case VIRTIO_MMIO_VERSION:
134 if (proxy->legacy) {
135 return VIRT_VERSION_LEGACY;
136 } else {
137 return VIRT_VERSION;
139 case VIRTIO_MMIO_VENDOR_ID:
140 return VIRT_VENDOR;
141 default:
142 return 0;
146 if (offset >= VIRTIO_MMIO_CONFIG) {
147 offset -= VIRTIO_MMIO_CONFIG;
148 switch (size) {
149 case 1:
150 return virtio_config_readb(vdev, offset);
151 case 2:
152 return virtio_config_readw(vdev, offset);
153 case 4:
154 return virtio_config_readl(vdev, offset);
155 default:
156 abort();
159 if (size != 4) {
160 qemu_log_mask(LOG_GUEST_ERROR,
161 "%s: wrong size access to register!\n",
162 __func__);
163 return 0;
165 switch (offset) {
166 case VIRTIO_MMIO_MAGIC_VALUE:
167 return VIRT_MAGIC;
168 case VIRTIO_MMIO_VERSION:
169 if (proxy->legacy) {
170 return VIRT_VERSION_LEGACY;
171 } else {
172 return VIRT_VERSION;
174 case VIRTIO_MMIO_DEVICE_ID:
175 return vdev->device_id;
176 case VIRTIO_MMIO_VENDOR_ID:
177 return VIRT_VENDOR;
178 case VIRTIO_MMIO_DEVICE_FEATURES:
179 if (proxy->legacy) {
180 if (proxy->host_features_sel) {
181 return 0;
182 } else {
183 return vdev->host_features;
185 } else {
186 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
187 return (vdev->host_features & ~vdc->legacy_features)
188 >> (32 * proxy->host_features_sel);
190 case VIRTIO_MMIO_QUEUE_NUM_MAX:
191 if (!virtio_queue_get_num(vdev, vdev->queue_sel)) {
192 return 0;
194 return VIRTQUEUE_MAX_SIZE;
195 case VIRTIO_MMIO_QUEUE_PFN:
196 if (!proxy->legacy) {
197 qemu_log_mask(LOG_GUEST_ERROR,
198 "%s: read from legacy register (0x%"
199 HWADDR_PRIx ") in non-legacy mode\n",
200 __func__, offset);
201 return 0;
203 return virtio_queue_get_addr(vdev, vdev->queue_sel)
204 >> proxy->guest_page_shift;
205 case VIRTIO_MMIO_QUEUE_READY:
206 if (proxy->legacy) {
207 qemu_log_mask(LOG_GUEST_ERROR,
208 "%s: read from non-legacy register (0x%"
209 HWADDR_PRIx ") in legacy mode\n",
210 __func__, offset);
211 return 0;
213 return proxy->vqs[vdev->queue_sel].enabled;
214 case VIRTIO_MMIO_INTERRUPT_STATUS:
215 return atomic_read(&vdev->isr);
216 case VIRTIO_MMIO_STATUS:
217 return vdev->status;
218 case VIRTIO_MMIO_CONFIG_GENERATION:
219 if (proxy->legacy) {
220 qemu_log_mask(LOG_GUEST_ERROR,
221 "%s: read from non-legacy register (0x%"
222 HWADDR_PRIx ") in legacy mode\n",
223 __func__, offset);
224 return 0;
226 return vdev->generation;
227 case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
228 case VIRTIO_MMIO_DRIVER_FEATURES:
229 case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
230 case VIRTIO_MMIO_GUEST_PAGE_SIZE:
231 case VIRTIO_MMIO_QUEUE_SEL:
232 case VIRTIO_MMIO_QUEUE_NUM:
233 case VIRTIO_MMIO_QUEUE_ALIGN:
234 case VIRTIO_MMIO_QUEUE_NOTIFY:
235 case VIRTIO_MMIO_INTERRUPT_ACK:
236 case VIRTIO_MMIO_QUEUE_DESC_LOW:
237 case VIRTIO_MMIO_QUEUE_DESC_HIGH:
238 case VIRTIO_MMIO_QUEUE_AVAIL_LOW:
239 case VIRTIO_MMIO_QUEUE_AVAIL_HIGH:
240 case VIRTIO_MMIO_QUEUE_USED_LOW:
241 case VIRTIO_MMIO_QUEUE_USED_HIGH:
242 qemu_log_mask(LOG_GUEST_ERROR,
243 "%s: read of write-only register (0x%" HWADDR_PRIx ")\n",
244 __func__, offset);
245 return 0;
246 default:
247 qemu_log_mask(LOG_GUEST_ERROR,
248 "%s: bad register offset (0x%" HWADDR_PRIx ")\n",
249 __func__, offset);
250 return 0;
252 return 0;
255 static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
256 unsigned size)
258 VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
259 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
261 trace_virtio_mmio_write_offset(offset, value);
263 if (!vdev) {
264 /* If no backend is present, we just make all registers
265 * write-ignored. This allows us to provide transports with
266 * no backend plugged in.
268 return;
271 if (offset >= VIRTIO_MMIO_CONFIG) {
272 offset -= VIRTIO_MMIO_CONFIG;
273 switch (size) {
274 case 1:
275 virtio_config_writeb(vdev, offset, value);
276 break;
277 case 2:
278 virtio_config_writew(vdev, offset, value);
279 break;
280 case 4:
281 virtio_config_writel(vdev, offset, value);
282 break;
283 default:
284 abort();
286 return;
288 if (size != 4) {
289 qemu_log_mask(LOG_GUEST_ERROR,
290 "%s: wrong size access to register!\n",
291 __func__);
292 return;
294 switch (offset) {
295 case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
296 if (value) {
297 proxy->host_features_sel = 1;
298 } else {
299 proxy->host_features_sel = 0;
301 break;
302 case VIRTIO_MMIO_DRIVER_FEATURES:
303 if (proxy->legacy) {
304 if (proxy->guest_features_sel) {
305 qemu_log_mask(LOG_GUEST_ERROR,
306 "%s: attempt to write guest features with "
307 "guest_features_sel > 0 in legacy mode\n",
308 __func__);
309 } else {
310 virtio_set_features(vdev, value);
312 } else {
313 proxy->guest_features[proxy->guest_features_sel] = value;
315 break;
316 case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
317 if (value) {
318 proxy->guest_features_sel = 1;
319 } else {
320 proxy->guest_features_sel = 0;
322 break;
323 case VIRTIO_MMIO_GUEST_PAGE_SIZE:
324 if (!proxy->legacy) {
325 qemu_log_mask(LOG_GUEST_ERROR,
326 "%s: write to legacy register (0x%"
327 HWADDR_PRIx ") in non-legacy mode\n",
328 __func__, offset);
329 return;
331 proxy->guest_page_shift = ctz32(value);
332 if (proxy->guest_page_shift > 31) {
333 proxy->guest_page_shift = 0;
335 trace_virtio_mmio_guest_page(value, proxy->guest_page_shift);
336 break;
337 case VIRTIO_MMIO_QUEUE_SEL:
338 if (value < VIRTIO_QUEUE_MAX) {
339 vdev->queue_sel = value;
341 break;
342 case VIRTIO_MMIO_QUEUE_NUM:
343 trace_virtio_mmio_queue_write(value, VIRTQUEUE_MAX_SIZE);
344 if (proxy->legacy) {
345 virtio_queue_set_num(vdev, vdev->queue_sel, value);
346 virtio_queue_update_rings(vdev, vdev->queue_sel);
347 } else {
348 proxy->vqs[vdev->queue_sel].num = value;
350 break;
351 case VIRTIO_MMIO_QUEUE_ALIGN:
352 if (!proxy->legacy) {
353 qemu_log_mask(LOG_GUEST_ERROR,
354 "%s: write to legacy register (0x%"
355 HWADDR_PRIx ") in non-legacy mode\n",
356 __func__, offset);
357 return;
359 virtio_queue_set_align(vdev, vdev->queue_sel, value);
360 break;
361 case VIRTIO_MMIO_QUEUE_PFN:
362 if (!proxy->legacy) {
363 qemu_log_mask(LOG_GUEST_ERROR,
364 "%s: write to legacy register (0x%"
365 HWADDR_PRIx ") in non-legacy mode\n",
366 __func__, offset);
367 return;
369 if (value == 0) {
370 virtio_reset(vdev);
371 } else {
372 virtio_queue_set_addr(vdev, vdev->queue_sel,
373 value << proxy->guest_page_shift);
375 break;
376 case VIRTIO_MMIO_QUEUE_READY:
377 if (proxy->legacy) {
378 qemu_log_mask(LOG_GUEST_ERROR,
379 "%s: write to non-legacy register (0x%"
380 HWADDR_PRIx ") in legacy mode\n",
381 __func__, offset);
382 return;
384 if (value) {
385 virtio_queue_set_num(vdev, vdev->queue_sel,
386 proxy->vqs[vdev->queue_sel].num);
387 virtio_queue_set_rings(vdev, vdev->queue_sel,
388 ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 |
389 proxy->vqs[vdev->queue_sel].desc[0],
390 ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 |
391 proxy->vqs[vdev->queue_sel].avail[0],
392 ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 |
393 proxy->vqs[vdev->queue_sel].used[0]);
394 proxy->vqs[vdev->queue_sel].enabled = 1;
395 } else {
396 proxy->vqs[vdev->queue_sel].enabled = 0;
398 break;
399 case VIRTIO_MMIO_QUEUE_NOTIFY:
400 if (value < VIRTIO_QUEUE_MAX) {
401 virtio_queue_notify(vdev, value);
403 break;
404 case VIRTIO_MMIO_INTERRUPT_ACK:
405 atomic_and(&vdev->isr, ~value);
406 virtio_update_irq(vdev);
407 break;
408 case VIRTIO_MMIO_STATUS:
409 if (!(value & VIRTIO_CONFIG_S_DRIVER_OK)) {
410 virtio_mmio_stop_ioeventfd(proxy);
413 if (!proxy->legacy && (value & VIRTIO_CONFIG_S_FEATURES_OK)) {
414 virtio_set_features(vdev,
415 ((uint64_t)proxy->guest_features[1]) << 32 |
416 proxy->guest_features[0]);
419 virtio_set_status(vdev, value & 0xff);
421 if (value & VIRTIO_CONFIG_S_DRIVER_OK) {
422 virtio_mmio_start_ioeventfd(proxy);
425 if (vdev->status == 0) {
426 virtio_reset(vdev);
428 break;
429 case VIRTIO_MMIO_QUEUE_DESC_LOW:
430 if (proxy->legacy) {
431 qemu_log_mask(LOG_GUEST_ERROR,
432 "%s: write to non-legacy register (0x%"
433 HWADDR_PRIx ") in legacy mode\n",
434 __func__, offset);
435 return;
437 proxy->vqs[vdev->queue_sel].desc[0] = value;
438 break;
439 case VIRTIO_MMIO_QUEUE_DESC_HIGH:
440 if (proxy->legacy) {
441 qemu_log_mask(LOG_GUEST_ERROR,
442 "%s: write to non-legacy register (0x%"
443 HWADDR_PRIx ") in legacy mode\n",
444 __func__, offset);
445 return;
447 proxy->vqs[vdev->queue_sel].desc[1] = value;
448 break;
449 case VIRTIO_MMIO_QUEUE_AVAIL_LOW:
450 if (proxy->legacy) {
451 qemu_log_mask(LOG_GUEST_ERROR,
452 "%s: write to non-legacy register (0x%"
453 HWADDR_PRIx ") in legacy mode\n",
454 __func__, offset);
455 return;
457 proxy->vqs[vdev->queue_sel].avail[0] = value;
458 break;
459 case VIRTIO_MMIO_QUEUE_AVAIL_HIGH:
460 if (proxy->legacy) {
461 qemu_log_mask(LOG_GUEST_ERROR,
462 "%s: write to non-legacy register (0x%"
463 HWADDR_PRIx ") in legacy mode\n",
464 __func__, offset);
465 return;
467 proxy->vqs[vdev->queue_sel].avail[1] = value;
468 break;
469 case VIRTIO_MMIO_QUEUE_USED_LOW:
470 if (proxy->legacy) {
471 qemu_log_mask(LOG_GUEST_ERROR,
472 "%s: write to non-legacy register (0x%"
473 HWADDR_PRIx ") in legacy mode\n",
474 __func__, offset);
475 return;
477 proxy->vqs[vdev->queue_sel].used[0] = value;
478 break;
479 case VIRTIO_MMIO_QUEUE_USED_HIGH:
480 if (proxy->legacy) {
481 qemu_log_mask(LOG_GUEST_ERROR,
482 "%s: write to non-legacy register (0x%"
483 HWADDR_PRIx ") in legacy mode\n",
484 __func__, offset);
485 return;
487 proxy->vqs[vdev->queue_sel].used[1] = value;
488 break;
489 case VIRTIO_MMIO_MAGIC_VALUE:
490 case VIRTIO_MMIO_VERSION:
491 case VIRTIO_MMIO_DEVICE_ID:
492 case VIRTIO_MMIO_VENDOR_ID:
493 case VIRTIO_MMIO_DEVICE_FEATURES:
494 case VIRTIO_MMIO_QUEUE_NUM_MAX:
495 case VIRTIO_MMIO_INTERRUPT_STATUS:
496 case VIRTIO_MMIO_CONFIG_GENERATION:
497 qemu_log_mask(LOG_GUEST_ERROR,
498 "%s: write to read-only register (0x%" HWADDR_PRIx ")\n",
499 __func__, offset);
500 break;
502 default:
503 qemu_log_mask(LOG_GUEST_ERROR,
504 "%s: bad register offset (0x%" HWADDR_PRIx ")\n",
505 __func__, offset);
509 static const MemoryRegionOps virtio_legacy_mem_ops = {
510 .read = virtio_mmio_read,
511 .write = virtio_mmio_write,
512 .endianness = DEVICE_NATIVE_ENDIAN,
515 static const MemoryRegionOps virtio_mem_ops = {
516 .read = virtio_mmio_read,
517 .write = virtio_mmio_write,
518 .endianness = DEVICE_LITTLE_ENDIAN,
521 static void virtio_mmio_update_irq(DeviceState *opaque, uint16_t vector)
523 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
524 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
525 int level;
527 if (!vdev) {
528 return;
530 level = (atomic_read(&vdev->isr) != 0);
531 trace_virtio_mmio_setting_irq(level);
532 qemu_set_irq(proxy->irq, level);
535 static int virtio_mmio_load_config(DeviceState *opaque, QEMUFile *f)
537 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
539 proxy->host_features_sel = qemu_get_be32(f);
540 proxy->guest_features_sel = qemu_get_be32(f);
541 proxy->guest_page_shift = qemu_get_be32(f);
542 return 0;
545 static void virtio_mmio_save_config(DeviceState *opaque, QEMUFile *f)
547 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
549 qemu_put_be32(f, proxy->host_features_sel);
550 qemu_put_be32(f, proxy->guest_features_sel);
551 qemu_put_be32(f, proxy->guest_page_shift);
554 static const VMStateDescription vmstate_virtio_mmio_queue_state = {
555 .name = "virtio_mmio/queue_state",
556 .version_id = 1,
557 .minimum_version_id = 1,
558 .fields = (VMStateField[]) {
559 VMSTATE_UINT16(num, VirtIOMMIOQueue),
560 VMSTATE_BOOL(enabled, VirtIOMMIOQueue),
561 VMSTATE_UINT32_ARRAY(desc, VirtIOMMIOQueue, 2),
562 VMSTATE_UINT32_ARRAY(avail, VirtIOMMIOQueue, 2),
563 VMSTATE_UINT32_ARRAY(used, VirtIOMMIOQueue, 2),
564 VMSTATE_END_OF_LIST()
568 static const VMStateDescription vmstate_virtio_mmio_state_sub = {
569 .name = "virtio_mmio/state",
570 .version_id = 1,
571 .minimum_version_id = 1,
572 .fields = (VMStateField[]) {
573 VMSTATE_UINT32_ARRAY(guest_features, VirtIOMMIOProxy, 2),
574 VMSTATE_STRUCT_ARRAY(vqs, VirtIOMMIOProxy, VIRTIO_QUEUE_MAX, 0,
575 vmstate_virtio_mmio_queue_state,
576 VirtIOMMIOQueue),
577 VMSTATE_END_OF_LIST()
581 static const VMStateDescription vmstate_virtio_mmio = {
582 .name = "virtio_mmio",
583 .version_id = 1,
584 .minimum_version_id = 1,
585 .minimum_version_id_old = 1,
586 .fields = (VMStateField[]) {
587 VMSTATE_END_OF_LIST()
589 .subsections = (const VMStateDescription * []) {
590 &vmstate_virtio_mmio_state_sub,
591 NULL
595 static void virtio_mmio_save_extra_state(DeviceState *opaque, QEMUFile *f)
597 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
599 vmstate_save_state(f, &vmstate_virtio_mmio, proxy, NULL);
602 static int virtio_mmio_load_extra_state(DeviceState *opaque, QEMUFile *f)
604 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
606 return vmstate_load_state(f, &vmstate_virtio_mmio, proxy, 1);
609 static bool virtio_mmio_has_extra_state(DeviceState *opaque)
611 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
613 return !proxy->legacy;
616 static void virtio_mmio_reset(DeviceState *d)
618 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
619 int i;
621 virtio_mmio_stop_ioeventfd(proxy);
622 virtio_bus_reset(&proxy->bus);
623 proxy->host_features_sel = 0;
624 proxy->guest_features_sel = 0;
625 proxy->guest_page_shift = 0;
627 if (!proxy->legacy) {
628 proxy->guest_features[0] = proxy->guest_features[1] = 0;
630 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
631 proxy->vqs[i].enabled = 0;
632 proxy->vqs[i].num = 0;
633 proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0;
634 proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0;
635 proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0;
640 static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign,
641 bool with_irqfd)
643 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
644 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
645 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
646 VirtQueue *vq = virtio_get_queue(vdev, n);
647 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
649 if (assign) {
650 int r = event_notifier_init(notifier, 0);
651 if (r < 0) {
652 return r;
654 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
655 } else {
656 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
657 event_notifier_cleanup(notifier);
660 if (vdc->guest_notifier_mask && vdev->use_guest_notifier_mask) {
661 vdc->guest_notifier_mask(vdev, n, !assign);
664 return 0;
667 static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs,
668 bool assign)
670 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
671 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
672 /* TODO: need to check if kvm-arm supports irqfd */
673 bool with_irqfd = false;
674 int r, n;
676 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
678 for (n = 0; n < nvqs; n++) {
679 if (!virtio_queue_get_num(vdev, n)) {
680 break;
683 r = virtio_mmio_set_guest_notifier(d, n, assign, with_irqfd);
684 if (r < 0) {
685 goto assign_error;
689 return 0;
691 assign_error:
692 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
693 assert(assign);
694 while (--n >= 0) {
695 virtio_mmio_set_guest_notifier(d, n, !assign, false);
697 return r;
700 static void virtio_mmio_pre_plugged(DeviceState *d, Error **errp)
702 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
703 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
705 if (!proxy->legacy) {
706 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
710 /* virtio-mmio device */
712 static Property virtio_mmio_properties[] = {
713 DEFINE_PROP_BOOL("format_transport_address", VirtIOMMIOProxy,
714 format_transport_address, true),
715 DEFINE_PROP_BOOL("force-legacy", VirtIOMMIOProxy, legacy, true),
716 DEFINE_PROP_END_OF_LIST(),
719 static void virtio_mmio_realizefn(DeviceState *d, Error **errp)
721 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
722 SysBusDevice *sbd = SYS_BUS_DEVICE(d);
724 qbus_create_inplace(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS,
725 d, NULL);
726 sysbus_init_irq(sbd, &proxy->irq);
727 if (proxy->legacy) {
728 memory_region_init_io(&proxy->iomem, OBJECT(d),
729 &virtio_legacy_mem_ops, proxy,
730 TYPE_VIRTIO_MMIO, 0x200);
731 } else {
732 memory_region_init_io(&proxy->iomem, OBJECT(d),
733 &virtio_mem_ops, proxy,
734 TYPE_VIRTIO_MMIO, 0x200);
736 sysbus_init_mmio(sbd, &proxy->iomem);
739 static void virtio_mmio_class_init(ObjectClass *klass, void *data)
741 DeviceClass *dc = DEVICE_CLASS(klass);
743 dc->realize = virtio_mmio_realizefn;
744 dc->reset = virtio_mmio_reset;
745 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
746 dc->props = virtio_mmio_properties;
749 static const TypeInfo virtio_mmio_info = {
750 .name = TYPE_VIRTIO_MMIO,
751 .parent = TYPE_SYS_BUS_DEVICE,
752 .instance_size = sizeof(VirtIOMMIOProxy),
753 .class_init = virtio_mmio_class_init,
756 /* virtio-mmio-bus. */
758 static char *virtio_mmio_bus_get_dev_path(DeviceState *dev)
760 BusState *virtio_mmio_bus;
761 VirtIOMMIOProxy *virtio_mmio_proxy;
762 char *proxy_path;
763 SysBusDevice *proxy_sbd;
764 char *path;
766 virtio_mmio_bus = qdev_get_parent_bus(dev);
767 virtio_mmio_proxy = VIRTIO_MMIO(virtio_mmio_bus->parent);
768 proxy_path = qdev_get_dev_path(DEVICE(virtio_mmio_proxy));
771 * If @format_transport_address is false, then we just perform the same as
772 * virtio_bus_get_dev_path(): we delegate the address formatting for the
773 * device on the virtio-mmio bus to the bus that the virtio-mmio proxy
774 * (i.e., the device that implements the virtio-mmio bus) resides on. In
775 * this case the base address of the virtio-mmio transport will be
776 * invisible.
778 if (!virtio_mmio_proxy->format_transport_address) {
779 return proxy_path;
782 /* Otherwise, we append the base address of the transport. */
783 proxy_sbd = SYS_BUS_DEVICE(virtio_mmio_proxy);
784 assert(proxy_sbd->num_mmio == 1);
785 assert(proxy_sbd->mmio[0].memory == &virtio_mmio_proxy->iomem);
787 if (proxy_path) {
788 path = g_strdup_printf("%s/virtio-mmio@" TARGET_FMT_plx, proxy_path,
789 proxy_sbd->mmio[0].addr);
790 } else {
791 path = g_strdup_printf("virtio-mmio@" TARGET_FMT_plx,
792 proxy_sbd->mmio[0].addr);
794 g_free(proxy_path);
795 return path;
798 static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
800 BusClass *bus_class = BUS_CLASS(klass);
801 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
803 k->notify = virtio_mmio_update_irq;
804 k->save_config = virtio_mmio_save_config;
805 k->load_config = virtio_mmio_load_config;
806 k->save_extra_state = virtio_mmio_save_extra_state;
807 k->load_extra_state = virtio_mmio_load_extra_state;
808 k->has_extra_state = virtio_mmio_has_extra_state;
809 k->set_guest_notifiers = virtio_mmio_set_guest_notifiers;
810 k->ioeventfd_enabled = virtio_mmio_ioeventfd_enabled;
811 k->ioeventfd_assign = virtio_mmio_ioeventfd_assign;
812 k->pre_plugged = virtio_mmio_pre_plugged;
813 k->has_variable_vring_alignment = true;
814 bus_class->max_dev = 1;
815 bus_class->get_dev_path = virtio_mmio_bus_get_dev_path;
818 static const TypeInfo virtio_mmio_bus_info = {
819 .name = TYPE_VIRTIO_MMIO_BUS,
820 .parent = TYPE_VIRTIO_BUS,
821 .instance_size = sizeof(VirtioBusState),
822 .class_init = virtio_mmio_bus_class_init,
825 static void virtio_mmio_register_types(void)
827 type_register_static(&virtio_mmio_bus_info);
828 type_register_static(&virtio_mmio_info);
831 type_init(virtio_mmio_register_types)