exec/memory_ldst_phys: Sort declarations
[qemu/ar7.git] / hw / virtio / virtio-mmio.c
blob5952471b38b86d986f00256ba2c2149df643e8bd
1 /*
2 * Virtio MMIO bindings
4 * Copyright (c) 2011 Linaro Limited
6 * Author:
7 * Peter Maydell <peter.maydell@linaro.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
23 #include "standard-headers/linux/virtio_mmio.h"
24 #include "hw/irq.h"
25 #include "hw/qdev-properties.h"
26 #include "hw/sysbus.h"
27 #include "hw/virtio/virtio.h"
28 #include "migration/qemu-file-types.h"
29 #include "qemu/host-utils.h"
30 #include "qemu/module.h"
31 #include "sysemu/kvm.h"
32 #include "hw/virtio/virtio-mmio.h"
33 #include "qemu/error-report.h"
34 #include "qemu/log.h"
35 #include "trace.h"
37 static bool virtio_mmio_ioeventfd_enabled(DeviceState *d)
39 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
41 return (proxy->flags & VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD) != 0;
44 static int virtio_mmio_ioeventfd_assign(DeviceState *d,
45 EventNotifier *notifier,
46 int n, bool assign)
48 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
50 if (assign) {
51 memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4,
52 true, n, notifier);
53 } else {
54 memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4,
55 true, n, notifier);
57 return 0;
60 static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy)
62 virtio_bus_start_ioeventfd(&proxy->bus);
65 static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy)
67 virtio_bus_stop_ioeventfd(&proxy->bus);
70 static void virtio_mmio_soft_reset(VirtIOMMIOProxy *proxy)
72 int i;
74 if (proxy->legacy) {
75 return;
78 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
79 proxy->vqs[i].enabled = 0;
83 static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size)
85 VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
86 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
88 trace_virtio_mmio_read(offset);
90 if (!vdev) {
91 /* If no backend is present, we treat most registers as
92 * read-as-zero, except for the magic number, version and
93 * vendor ID. This is not strictly sanctioned by the virtio
94 * spec, but it allows us to provide transports with no backend
95 * plugged in which don't confuse Linux's virtio code: the
96 * probe won't complain about the bad magic number, but the
97 * device ID of zero means no backend will claim it.
99 switch (offset) {
100 case VIRTIO_MMIO_MAGIC_VALUE:
101 return VIRT_MAGIC;
102 case VIRTIO_MMIO_VERSION:
103 if (proxy->legacy) {
104 return VIRT_VERSION_LEGACY;
105 } else {
106 return VIRT_VERSION;
108 case VIRTIO_MMIO_VENDOR_ID:
109 return VIRT_VENDOR;
110 default:
111 return 0;
115 if (offset >= VIRTIO_MMIO_CONFIG) {
116 offset -= VIRTIO_MMIO_CONFIG;
117 if (proxy->legacy) {
118 switch (size) {
119 case 1:
120 return virtio_config_readb(vdev, offset);
121 case 2:
122 return virtio_config_readw(vdev, offset);
123 case 4:
124 return virtio_config_readl(vdev, offset);
125 default:
126 abort();
128 } else {
129 switch (size) {
130 case 1:
131 return virtio_config_modern_readb(vdev, offset);
132 case 2:
133 return virtio_config_modern_readw(vdev, offset);
134 case 4:
135 return virtio_config_modern_readl(vdev, offset);
136 default:
137 abort();
141 if (size != 4) {
142 qemu_log_mask(LOG_GUEST_ERROR,
143 "%s: wrong size access to register!\n",
144 __func__);
145 return 0;
147 switch (offset) {
148 case VIRTIO_MMIO_MAGIC_VALUE:
149 return VIRT_MAGIC;
150 case VIRTIO_MMIO_VERSION:
151 if (proxy->legacy) {
152 return VIRT_VERSION_LEGACY;
153 } else {
154 return VIRT_VERSION;
156 case VIRTIO_MMIO_DEVICE_ID:
157 return vdev->device_id;
158 case VIRTIO_MMIO_VENDOR_ID:
159 return VIRT_VENDOR;
160 case VIRTIO_MMIO_DEVICE_FEATURES:
161 if (proxy->legacy) {
162 if (proxy->host_features_sel) {
163 return 0;
164 } else {
165 return vdev->host_features;
167 } else {
168 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
169 return (vdev->host_features & ~vdc->legacy_features)
170 >> (32 * proxy->host_features_sel);
172 case VIRTIO_MMIO_QUEUE_NUM_MAX:
173 if (!virtio_queue_get_num(vdev, vdev->queue_sel)) {
174 return 0;
176 return VIRTQUEUE_MAX_SIZE;
177 case VIRTIO_MMIO_QUEUE_PFN:
178 if (!proxy->legacy) {
179 qemu_log_mask(LOG_GUEST_ERROR,
180 "%s: read from legacy register (0x%"
181 HWADDR_PRIx ") in non-legacy mode\n",
182 __func__, offset);
183 return 0;
185 return virtio_queue_get_addr(vdev, vdev->queue_sel)
186 >> proxy->guest_page_shift;
187 case VIRTIO_MMIO_QUEUE_READY:
188 if (proxy->legacy) {
189 qemu_log_mask(LOG_GUEST_ERROR,
190 "%s: read from non-legacy register (0x%"
191 HWADDR_PRIx ") in legacy mode\n",
192 __func__, offset);
193 return 0;
195 return proxy->vqs[vdev->queue_sel].enabled;
196 case VIRTIO_MMIO_INTERRUPT_STATUS:
197 return qatomic_read(&vdev->isr);
198 case VIRTIO_MMIO_STATUS:
199 return vdev->status;
200 case VIRTIO_MMIO_CONFIG_GENERATION:
201 if (proxy->legacy) {
202 qemu_log_mask(LOG_GUEST_ERROR,
203 "%s: read from non-legacy register (0x%"
204 HWADDR_PRIx ") in legacy mode\n",
205 __func__, offset);
206 return 0;
208 return vdev->generation;
209 case VIRTIO_MMIO_SHM_LEN_LOW:
210 case VIRTIO_MMIO_SHM_LEN_HIGH:
212 * VIRTIO_MMIO_SHM_SEL is unimplemented
213 * according to the linux driver, if region length is -1
214 * the shared memory doesn't exist
216 return -1;
217 case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
218 case VIRTIO_MMIO_DRIVER_FEATURES:
219 case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
220 case VIRTIO_MMIO_GUEST_PAGE_SIZE:
221 case VIRTIO_MMIO_QUEUE_SEL:
222 case VIRTIO_MMIO_QUEUE_NUM:
223 case VIRTIO_MMIO_QUEUE_ALIGN:
224 case VIRTIO_MMIO_QUEUE_NOTIFY:
225 case VIRTIO_MMIO_INTERRUPT_ACK:
226 case VIRTIO_MMIO_QUEUE_DESC_LOW:
227 case VIRTIO_MMIO_QUEUE_DESC_HIGH:
228 case VIRTIO_MMIO_QUEUE_AVAIL_LOW:
229 case VIRTIO_MMIO_QUEUE_AVAIL_HIGH:
230 case VIRTIO_MMIO_QUEUE_USED_LOW:
231 case VIRTIO_MMIO_QUEUE_USED_HIGH:
232 qemu_log_mask(LOG_GUEST_ERROR,
233 "%s: read of write-only register (0x%" HWADDR_PRIx ")\n",
234 __func__, offset);
235 return 0;
236 default:
237 qemu_log_mask(LOG_GUEST_ERROR,
238 "%s: bad register offset (0x%" HWADDR_PRIx ")\n",
239 __func__, offset);
240 return 0;
242 return 0;
245 static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
246 unsigned size)
248 VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
249 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
251 trace_virtio_mmio_write_offset(offset, value);
253 if (!vdev) {
254 /* If no backend is present, we just make all registers
255 * write-ignored. This allows us to provide transports with
256 * no backend plugged in.
258 return;
261 if (offset >= VIRTIO_MMIO_CONFIG) {
262 offset -= VIRTIO_MMIO_CONFIG;
263 if (proxy->legacy) {
264 switch (size) {
265 case 1:
266 virtio_config_writeb(vdev, offset, value);
267 break;
268 case 2:
269 virtio_config_writew(vdev, offset, value);
270 break;
271 case 4:
272 virtio_config_writel(vdev, offset, value);
273 break;
274 default:
275 abort();
277 return;
278 } else {
279 switch (size) {
280 case 1:
281 virtio_config_modern_writeb(vdev, offset, value);
282 break;
283 case 2:
284 virtio_config_modern_writew(vdev, offset, value);
285 break;
286 case 4:
287 virtio_config_modern_writel(vdev, offset, value);
288 break;
289 default:
290 abort();
292 return;
295 if (size != 4) {
296 qemu_log_mask(LOG_GUEST_ERROR,
297 "%s: wrong size access to register!\n",
298 __func__);
299 return;
301 switch (offset) {
302 case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
303 if (value) {
304 proxy->host_features_sel = 1;
305 } else {
306 proxy->host_features_sel = 0;
308 break;
309 case VIRTIO_MMIO_DRIVER_FEATURES:
310 if (proxy->legacy) {
311 if (proxy->guest_features_sel) {
312 qemu_log_mask(LOG_GUEST_ERROR,
313 "%s: attempt to write guest features with "
314 "guest_features_sel > 0 in legacy mode\n",
315 __func__);
316 } else {
317 virtio_set_features(vdev, value);
319 } else {
320 proxy->guest_features[proxy->guest_features_sel] = value;
322 break;
323 case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
324 if (value) {
325 proxy->guest_features_sel = 1;
326 } else {
327 proxy->guest_features_sel = 0;
329 break;
330 case VIRTIO_MMIO_GUEST_PAGE_SIZE:
331 if (!proxy->legacy) {
332 qemu_log_mask(LOG_GUEST_ERROR,
333 "%s: write to legacy register (0x%"
334 HWADDR_PRIx ") in non-legacy mode\n",
335 __func__, offset);
336 return;
338 proxy->guest_page_shift = ctz32(value);
339 if (proxy->guest_page_shift > 31) {
340 proxy->guest_page_shift = 0;
342 trace_virtio_mmio_guest_page(value, proxy->guest_page_shift);
343 break;
344 case VIRTIO_MMIO_QUEUE_SEL:
345 if (value < VIRTIO_QUEUE_MAX) {
346 vdev->queue_sel = value;
348 break;
349 case VIRTIO_MMIO_QUEUE_NUM:
350 trace_virtio_mmio_queue_write(value, VIRTQUEUE_MAX_SIZE);
351 virtio_queue_set_num(vdev, vdev->queue_sel, value);
353 if (proxy->legacy) {
354 virtio_queue_update_rings(vdev, vdev->queue_sel);
355 } else {
356 proxy->vqs[vdev->queue_sel].num = value;
358 break;
359 case VIRTIO_MMIO_QUEUE_ALIGN:
360 if (!proxy->legacy) {
361 qemu_log_mask(LOG_GUEST_ERROR,
362 "%s: write to legacy register (0x%"
363 HWADDR_PRIx ") in non-legacy mode\n",
364 __func__, offset);
365 return;
367 virtio_queue_set_align(vdev, vdev->queue_sel, value);
368 break;
369 case VIRTIO_MMIO_QUEUE_PFN:
370 if (!proxy->legacy) {
371 qemu_log_mask(LOG_GUEST_ERROR,
372 "%s: write to legacy register (0x%"
373 HWADDR_PRIx ") in non-legacy mode\n",
374 __func__, offset);
375 return;
377 if (value == 0) {
378 virtio_reset(vdev);
379 } else {
380 virtio_queue_set_addr(vdev, vdev->queue_sel,
381 value << proxy->guest_page_shift);
383 break;
384 case VIRTIO_MMIO_QUEUE_READY:
385 if (proxy->legacy) {
386 qemu_log_mask(LOG_GUEST_ERROR,
387 "%s: write to non-legacy register (0x%"
388 HWADDR_PRIx ") in legacy mode\n",
389 __func__, offset);
390 return;
392 if (value) {
393 virtio_queue_set_num(vdev, vdev->queue_sel,
394 proxy->vqs[vdev->queue_sel].num);
395 virtio_queue_set_rings(vdev, vdev->queue_sel,
396 ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 |
397 proxy->vqs[vdev->queue_sel].desc[0],
398 ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 |
399 proxy->vqs[vdev->queue_sel].avail[0],
400 ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 |
401 proxy->vqs[vdev->queue_sel].used[0]);
402 proxy->vqs[vdev->queue_sel].enabled = 1;
403 } else {
404 proxy->vqs[vdev->queue_sel].enabled = 0;
406 break;
407 case VIRTIO_MMIO_QUEUE_NOTIFY:
408 if (value < VIRTIO_QUEUE_MAX) {
409 virtio_queue_notify(vdev, value);
411 break;
412 case VIRTIO_MMIO_INTERRUPT_ACK:
413 qatomic_and(&vdev->isr, ~value);
414 virtio_update_irq(vdev);
415 break;
416 case VIRTIO_MMIO_STATUS:
417 if (!(value & VIRTIO_CONFIG_S_DRIVER_OK)) {
418 virtio_mmio_stop_ioeventfd(proxy);
421 if (!proxy->legacy && (value & VIRTIO_CONFIG_S_FEATURES_OK)) {
422 virtio_set_features(vdev,
423 ((uint64_t)proxy->guest_features[1]) << 32 |
424 proxy->guest_features[0]);
427 virtio_set_status(vdev, value & 0xff);
429 if (value & VIRTIO_CONFIG_S_DRIVER_OK) {
430 virtio_mmio_start_ioeventfd(proxy);
433 if (vdev->status == 0) {
434 virtio_reset(vdev);
435 virtio_mmio_soft_reset(proxy);
437 break;
438 case VIRTIO_MMIO_QUEUE_DESC_LOW:
439 if (proxy->legacy) {
440 qemu_log_mask(LOG_GUEST_ERROR,
441 "%s: write to non-legacy register (0x%"
442 HWADDR_PRIx ") in legacy mode\n",
443 __func__, offset);
444 return;
446 proxy->vqs[vdev->queue_sel].desc[0] = value;
447 break;
448 case VIRTIO_MMIO_QUEUE_DESC_HIGH:
449 if (proxy->legacy) {
450 qemu_log_mask(LOG_GUEST_ERROR,
451 "%s: write to non-legacy register (0x%"
452 HWADDR_PRIx ") in legacy mode\n",
453 __func__, offset);
454 return;
456 proxy->vqs[vdev->queue_sel].desc[1] = value;
457 break;
458 case VIRTIO_MMIO_QUEUE_AVAIL_LOW:
459 if (proxy->legacy) {
460 qemu_log_mask(LOG_GUEST_ERROR,
461 "%s: write to non-legacy register (0x%"
462 HWADDR_PRIx ") in legacy mode\n",
463 __func__, offset);
464 return;
466 proxy->vqs[vdev->queue_sel].avail[0] = value;
467 break;
468 case VIRTIO_MMIO_QUEUE_AVAIL_HIGH:
469 if (proxy->legacy) {
470 qemu_log_mask(LOG_GUEST_ERROR,
471 "%s: write to non-legacy register (0x%"
472 HWADDR_PRIx ") in legacy mode\n",
473 __func__, offset);
474 return;
476 proxy->vqs[vdev->queue_sel].avail[1] = value;
477 break;
478 case VIRTIO_MMIO_QUEUE_USED_LOW:
479 if (proxy->legacy) {
480 qemu_log_mask(LOG_GUEST_ERROR,
481 "%s: write to non-legacy register (0x%"
482 HWADDR_PRIx ") in legacy mode\n",
483 __func__, offset);
484 return;
486 proxy->vqs[vdev->queue_sel].used[0] = value;
487 break;
488 case VIRTIO_MMIO_QUEUE_USED_HIGH:
489 if (proxy->legacy) {
490 qemu_log_mask(LOG_GUEST_ERROR,
491 "%s: write to non-legacy register (0x%"
492 HWADDR_PRIx ") in legacy mode\n",
493 __func__, offset);
494 return;
496 proxy->vqs[vdev->queue_sel].used[1] = value;
497 break;
498 case VIRTIO_MMIO_MAGIC_VALUE:
499 case VIRTIO_MMIO_VERSION:
500 case VIRTIO_MMIO_DEVICE_ID:
501 case VIRTIO_MMIO_VENDOR_ID:
502 case VIRTIO_MMIO_DEVICE_FEATURES:
503 case VIRTIO_MMIO_QUEUE_NUM_MAX:
504 case VIRTIO_MMIO_INTERRUPT_STATUS:
505 case VIRTIO_MMIO_CONFIG_GENERATION:
506 qemu_log_mask(LOG_GUEST_ERROR,
507 "%s: write to read-only register (0x%" HWADDR_PRIx ")\n",
508 __func__, offset);
509 break;
511 default:
512 qemu_log_mask(LOG_GUEST_ERROR,
513 "%s: bad register offset (0x%" HWADDR_PRIx ")\n",
514 __func__, offset);
518 static const MemoryRegionOps virtio_legacy_mem_ops = {
519 .read = virtio_mmio_read,
520 .write = virtio_mmio_write,
521 .endianness = DEVICE_NATIVE_ENDIAN,
524 static const MemoryRegionOps virtio_mem_ops = {
525 .read = virtio_mmio_read,
526 .write = virtio_mmio_write,
527 .endianness = DEVICE_LITTLE_ENDIAN,
530 static void virtio_mmio_update_irq(DeviceState *opaque, uint16_t vector)
532 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
533 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
534 int level;
536 if (!vdev) {
537 return;
539 level = (qatomic_read(&vdev->isr) != 0);
540 trace_virtio_mmio_setting_irq(level);
541 qemu_set_irq(proxy->irq, level);
544 static int virtio_mmio_load_config(DeviceState *opaque, QEMUFile *f)
546 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
548 proxy->host_features_sel = qemu_get_be32(f);
549 proxy->guest_features_sel = qemu_get_be32(f);
550 proxy->guest_page_shift = qemu_get_be32(f);
551 return 0;
554 static void virtio_mmio_save_config(DeviceState *opaque, QEMUFile *f)
556 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
558 qemu_put_be32(f, proxy->host_features_sel);
559 qemu_put_be32(f, proxy->guest_features_sel);
560 qemu_put_be32(f, proxy->guest_page_shift);
563 static const VMStateDescription vmstate_virtio_mmio_queue_state = {
564 .name = "virtio_mmio/queue_state",
565 .version_id = 1,
566 .minimum_version_id = 1,
567 .fields = (VMStateField[]) {
568 VMSTATE_UINT16(num, VirtIOMMIOQueue),
569 VMSTATE_BOOL(enabled, VirtIOMMIOQueue),
570 VMSTATE_UINT32_ARRAY(desc, VirtIOMMIOQueue, 2),
571 VMSTATE_UINT32_ARRAY(avail, VirtIOMMIOQueue, 2),
572 VMSTATE_UINT32_ARRAY(used, VirtIOMMIOQueue, 2),
573 VMSTATE_END_OF_LIST()
577 static const VMStateDescription vmstate_virtio_mmio_state_sub = {
578 .name = "virtio_mmio/state",
579 .version_id = 1,
580 .minimum_version_id = 1,
581 .fields = (VMStateField[]) {
582 VMSTATE_UINT32_ARRAY(guest_features, VirtIOMMIOProxy, 2),
583 VMSTATE_STRUCT_ARRAY(vqs, VirtIOMMIOProxy, VIRTIO_QUEUE_MAX, 0,
584 vmstate_virtio_mmio_queue_state,
585 VirtIOMMIOQueue),
586 VMSTATE_END_OF_LIST()
590 static const VMStateDescription vmstate_virtio_mmio = {
591 .name = "virtio_mmio",
592 .version_id = 1,
593 .minimum_version_id = 1,
594 .minimum_version_id_old = 1,
595 .fields = (VMStateField[]) {
596 VMSTATE_END_OF_LIST()
598 .subsections = (const VMStateDescription * []) {
599 &vmstate_virtio_mmio_state_sub,
600 NULL
604 static void virtio_mmio_save_extra_state(DeviceState *opaque, QEMUFile *f)
606 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
608 vmstate_save_state(f, &vmstate_virtio_mmio, proxy, NULL);
611 static int virtio_mmio_load_extra_state(DeviceState *opaque, QEMUFile *f)
613 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
615 return vmstate_load_state(f, &vmstate_virtio_mmio, proxy, 1);
618 static bool virtio_mmio_has_extra_state(DeviceState *opaque)
620 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
622 return !proxy->legacy;
625 static void virtio_mmio_reset(DeviceState *d)
627 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
628 int i;
630 virtio_mmio_stop_ioeventfd(proxy);
631 virtio_bus_reset(&proxy->bus);
632 proxy->host_features_sel = 0;
633 proxy->guest_features_sel = 0;
634 proxy->guest_page_shift = 0;
636 if (!proxy->legacy) {
637 proxy->guest_features[0] = proxy->guest_features[1] = 0;
639 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
640 proxy->vqs[i].enabled = 0;
641 proxy->vqs[i].num = 0;
642 proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0;
643 proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0;
644 proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0;
649 static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign,
650 bool with_irqfd)
652 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
653 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
654 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
655 VirtQueue *vq = virtio_get_queue(vdev, n);
656 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
658 if (assign) {
659 int r = event_notifier_init(notifier, 0);
660 if (r < 0) {
661 return r;
663 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
664 } else {
665 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
666 event_notifier_cleanup(notifier);
669 if (vdc->guest_notifier_mask && vdev->use_guest_notifier_mask) {
670 vdc->guest_notifier_mask(vdev, n, !assign);
673 return 0;
676 static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs,
677 bool assign)
679 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
680 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
681 /* TODO: need to check if kvm-arm supports irqfd */
682 bool with_irqfd = false;
683 int r, n;
685 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
687 for (n = 0; n < nvqs; n++) {
688 if (!virtio_queue_get_num(vdev, n)) {
689 break;
692 r = virtio_mmio_set_guest_notifier(d, n, assign, with_irqfd);
693 if (r < 0) {
694 goto assign_error;
698 return 0;
700 assign_error:
701 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
702 assert(assign);
703 while (--n >= 0) {
704 virtio_mmio_set_guest_notifier(d, n, !assign, false);
706 return r;
709 static void virtio_mmio_pre_plugged(DeviceState *d, Error **errp)
711 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
712 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
714 if (!proxy->legacy) {
715 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
719 /* virtio-mmio device */
721 static Property virtio_mmio_properties[] = {
722 DEFINE_PROP_BOOL("format_transport_address", VirtIOMMIOProxy,
723 format_transport_address, true),
724 DEFINE_PROP_BOOL("force-legacy", VirtIOMMIOProxy, legacy, true),
725 DEFINE_PROP_BIT("ioeventfd", VirtIOMMIOProxy, flags,
726 VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD_BIT, true),
727 DEFINE_PROP_END_OF_LIST(),
730 static void virtio_mmio_realizefn(DeviceState *d, Error **errp)
732 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
733 SysBusDevice *sbd = SYS_BUS_DEVICE(d);
735 qbus_create_inplace(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS,
736 d, NULL);
737 sysbus_init_irq(sbd, &proxy->irq);
739 if (!kvm_eventfds_enabled()) {
740 proxy->flags &= ~VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD;
743 if (proxy->legacy) {
744 memory_region_init_io(&proxy->iomem, OBJECT(d),
745 &virtio_legacy_mem_ops, proxy,
746 TYPE_VIRTIO_MMIO, 0x200);
747 } else {
748 memory_region_init_io(&proxy->iomem, OBJECT(d),
749 &virtio_mem_ops, proxy,
750 TYPE_VIRTIO_MMIO, 0x200);
752 sysbus_init_mmio(sbd, &proxy->iomem);
755 static void virtio_mmio_class_init(ObjectClass *klass, void *data)
757 DeviceClass *dc = DEVICE_CLASS(klass);
759 dc->realize = virtio_mmio_realizefn;
760 dc->reset = virtio_mmio_reset;
761 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
762 device_class_set_props(dc, virtio_mmio_properties);
765 static const TypeInfo virtio_mmio_info = {
766 .name = TYPE_VIRTIO_MMIO,
767 .parent = TYPE_SYS_BUS_DEVICE,
768 .instance_size = sizeof(VirtIOMMIOProxy),
769 .class_init = virtio_mmio_class_init,
772 /* virtio-mmio-bus. */
774 static char *virtio_mmio_bus_get_dev_path(DeviceState *dev)
776 BusState *virtio_mmio_bus;
777 VirtIOMMIOProxy *virtio_mmio_proxy;
778 char *proxy_path;
779 char *path;
780 MemoryRegionSection section;
782 virtio_mmio_bus = qdev_get_parent_bus(dev);
783 virtio_mmio_proxy = VIRTIO_MMIO(virtio_mmio_bus->parent);
784 proxy_path = qdev_get_dev_path(DEVICE(virtio_mmio_proxy));
787 * If @format_transport_address is false, then we just perform the same as
788 * virtio_bus_get_dev_path(): we delegate the address formatting for the
789 * device on the virtio-mmio bus to the bus that the virtio-mmio proxy
790 * (i.e., the device that implements the virtio-mmio bus) resides on. In
791 * this case the base address of the virtio-mmio transport will be
792 * invisible.
794 if (!virtio_mmio_proxy->format_transport_address) {
795 return proxy_path;
798 /* Otherwise, we append the base address of the transport. */
799 section = memory_region_find(&virtio_mmio_proxy->iomem, 0, 0x200);
800 assert(section.mr);
802 if (proxy_path) {
803 path = g_strdup_printf("%s/virtio-mmio@" TARGET_FMT_plx, proxy_path,
804 section.offset_within_address_space);
805 } else {
806 path = g_strdup_printf("virtio-mmio@" TARGET_FMT_plx,
807 section.offset_within_address_space);
809 memory_region_unref(section.mr);
811 g_free(proxy_path);
812 return path;
815 static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
817 BusClass *bus_class = BUS_CLASS(klass);
818 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
820 k->notify = virtio_mmio_update_irq;
821 k->save_config = virtio_mmio_save_config;
822 k->load_config = virtio_mmio_load_config;
823 k->save_extra_state = virtio_mmio_save_extra_state;
824 k->load_extra_state = virtio_mmio_load_extra_state;
825 k->has_extra_state = virtio_mmio_has_extra_state;
826 k->set_guest_notifiers = virtio_mmio_set_guest_notifiers;
827 k->ioeventfd_enabled = virtio_mmio_ioeventfd_enabled;
828 k->ioeventfd_assign = virtio_mmio_ioeventfd_assign;
829 k->pre_plugged = virtio_mmio_pre_plugged;
830 k->has_variable_vring_alignment = true;
831 bus_class->max_dev = 1;
832 bus_class->get_dev_path = virtio_mmio_bus_get_dev_path;
835 static const TypeInfo virtio_mmio_bus_info = {
836 .name = TYPE_VIRTIO_MMIO_BUS,
837 .parent = TYPE_VIRTIO_BUS,
838 .instance_size = sizeof(VirtioBusState),
839 .class_init = virtio_mmio_bus_class_init,
842 static void virtio_mmio_register_types(void)
844 type_register_static(&virtio_mmio_bus_info);
845 type_register_static(&virtio_mmio_info);
848 type_init(virtio_mmio_register_types)