virtio-net: fix rsc_ext compat handling
[qemu/ar7.git] / hw / virtio / virtio-mmio.c
blobf12d1595aa1bd8455a4bb9062b71bd7fadefeeb4
1 /*
2 * Virtio MMIO bindings
4 * Copyright (c) 2011 Linaro Limited
6 * Author:
7 * Peter Maydell <peter.maydell@linaro.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
23 #include "standard-headers/linux/virtio_mmio.h"
24 #include "hw/irq.h"
25 #include "hw/qdev-properties.h"
26 #include "hw/sysbus.h"
27 #include "hw/virtio/virtio.h"
28 #include "migration/qemu-file-types.h"
29 #include "qemu/host-utils.h"
30 #include "qemu/module.h"
31 #include "sysemu/kvm.h"
32 #include "hw/virtio/virtio-mmio.h"
33 #include "qemu/error-report.h"
34 #include "qemu/log.h"
35 #include "trace.h"
37 static bool virtio_mmio_ioeventfd_enabled(DeviceState *d)
39 return kvm_eventfds_enabled();
42 static int virtio_mmio_ioeventfd_assign(DeviceState *d,
43 EventNotifier *notifier,
44 int n, bool assign)
46 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
48 if (assign) {
49 memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4,
50 true, n, notifier);
51 } else {
52 memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4,
53 true, n, notifier);
55 return 0;
58 static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy)
60 virtio_bus_start_ioeventfd(&proxy->bus);
63 static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy)
65 virtio_bus_stop_ioeventfd(&proxy->bus);
68 static void virtio_mmio_soft_reset(VirtIOMMIOProxy *proxy)
70 int i;
72 if (proxy->legacy) {
73 return;
76 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
77 proxy->vqs[i].enabled = 0;
81 static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size)
83 VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
84 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
86 trace_virtio_mmio_read(offset);
88 if (!vdev) {
89 /* If no backend is present, we treat most registers as
90 * read-as-zero, except for the magic number, version and
91 * vendor ID. This is not strictly sanctioned by the virtio
92 * spec, but it allows us to provide transports with no backend
93 * plugged in which don't confuse Linux's virtio code: the
94 * probe won't complain about the bad magic number, but the
95 * device ID of zero means no backend will claim it.
97 switch (offset) {
98 case VIRTIO_MMIO_MAGIC_VALUE:
99 return VIRT_MAGIC;
100 case VIRTIO_MMIO_VERSION:
101 if (proxy->legacy) {
102 return VIRT_VERSION_LEGACY;
103 } else {
104 return VIRT_VERSION;
106 case VIRTIO_MMIO_VENDOR_ID:
107 return VIRT_VENDOR;
108 default:
109 return 0;
113 if (offset >= VIRTIO_MMIO_CONFIG) {
114 offset -= VIRTIO_MMIO_CONFIG;
115 switch (size) {
116 case 1:
117 return virtio_config_readb(vdev, offset);
118 case 2:
119 return virtio_config_readw(vdev, offset);
120 case 4:
121 return virtio_config_readl(vdev, offset);
122 default:
123 abort();
126 if (size != 4) {
127 qemu_log_mask(LOG_GUEST_ERROR,
128 "%s: wrong size access to register!\n",
129 __func__);
130 return 0;
132 switch (offset) {
133 case VIRTIO_MMIO_MAGIC_VALUE:
134 return VIRT_MAGIC;
135 case VIRTIO_MMIO_VERSION:
136 if (proxy->legacy) {
137 return VIRT_VERSION_LEGACY;
138 } else {
139 return VIRT_VERSION;
141 case VIRTIO_MMIO_DEVICE_ID:
142 return vdev->device_id;
143 case VIRTIO_MMIO_VENDOR_ID:
144 return VIRT_VENDOR;
145 case VIRTIO_MMIO_DEVICE_FEATURES:
146 if (proxy->legacy) {
147 if (proxy->host_features_sel) {
148 return 0;
149 } else {
150 return vdev->host_features;
152 } else {
153 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
154 return (vdev->host_features & ~vdc->legacy_features)
155 >> (32 * proxy->host_features_sel);
157 case VIRTIO_MMIO_QUEUE_NUM_MAX:
158 if (!virtio_queue_get_num(vdev, vdev->queue_sel)) {
159 return 0;
161 return VIRTQUEUE_MAX_SIZE;
162 case VIRTIO_MMIO_QUEUE_PFN:
163 if (!proxy->legacy) {
164 qemu_log_mask(LOG_GUEST_ERROR,
165 "%s: read from legacy register (0x%"
166 HWADDR_PRIx ") in non-legacy mode\n",
167 __func__, offset);
168 return 0;
170 return virtio_queue_get_addr(vdev, vdev->queue_sel)
171 >> proxy->guest_page_shift;
172 case VIRTIO_MMIO_QUEUE_READY:
173 if (proxy->legacy) {
174 qemu_log_mask(LOG_GUEST_ERROR,
175 "%s: read from non-legacy register (0x%"
176 HWADDR_PRIx ") in legacy mode\n",
177 __func__, offset);
178 return 0;
180 return proxy->vqs[vdev->queue_sel].enabled;
181 case VIRTIO_MMIO_INTERRUPT_STATUS:
182 return atomic_read(&vdev->isr);
183 case VIRTIO_MMIO_STATUS:
184 return vdev->status;
185 case VIRTIO_MMIO_CONFIG_GENERATION:
186 if (proxy->legacy) {
187 qemu_log_mask(LOG_GUEST_ERROR,
188 "%s: read from non-legacy register (0x%"
189 HWADDR_PRIx ") in legacy mode\n",
190 __func__, offset);
191 return 0;
193 return vdev->generation;
194 case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
195 case VIRTIO_MMIO_DRIVER_FEATURES:
196 case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
197 case VIRTIO_MMIO_GUEST_PAGE_SIZE:
198 case VIRTIO_MMIO_QUEUE_SEL:
199 case VIRTIO_MMIO_QUEUE_NUM:
200 case VIRTIO_MMIO_QUEUE_ALIGN:
201 case VIRTIO_MMIO_QUEUE_NOTIFY:
202 case VIRTIO_MMIO_INTERRUPT_ACK:
203 case VIRTIO_MMIO_QUEUE_DESC_LOW:
204 case VIRTIO_MMIO_QUEUE_DESC_HIGH:
205 case VIRTIO_MMIO_QUEUE_AVAIL_LOW:
206 case VIRTIO_MMIO_QUEUE_AVAIL_HIGH:
207 case VIRTIO_MMIO_QUEUE_USED_LOW:
208 case VIRTIO_MMIO_QUEUE_USED_HIGH:
209 qemu_log_mask(LOG_GUEST_ERROR,
210 "%s: read of write-only register (0x%" HWADDR_PRIx ")\n",
211 __func__, offset);
212 return 0;
213 default:
214 qemu_log_mask(LOG_GUEST_ERROR,
215 "%s: bad register offset (0x%" HWADDR_PRIx ")\n",
216 __func__, offset);
217 return 0;
219 return 0;
222 static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
223 unsigned size)
225 VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
226 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
228 trace_virtio_mmio_write_offset(offset, value);
230 if (!vdev) {
231 /* If no backend is present, we just make all registers
232 * write-ignored. This allows us to provide transports with
233 * no backend plugged in.
235 return;
238 if (offset >= VIRTIO_MMIO_CONFIG) {
239 offset -= VIRTIO_MMIO_CONFIG;
240 switch (size) {
241 case 1:
242 virtio_config_writeb(vdev, offset, value);
243 break;
244 case 2:
245 virtio_config_writew(vdev, offset, value);
246 break;
247 case 4:
248 virtio_config_writel(vdev, offset, value);
249 break;
250 default:
251 abort();
253 return;
255 if (size != 4) {
256 qemu_log_mask(LOG_GUEST_ERROR,
257 "%s: wrong size access to register!\n",
258 __func__);
259 return;
261 switch (offset) {
262 case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
263 if (value) {
264 proxy->host_features_sel = 1;
265 } else {
266 proxy->host_features_sel = 0;
268 break;
269 case VIRTIO_MMIO_DRIVER_FEATURES:
270 if (proxy->legacy) {
271 if (proxy->guest_features_sel) {
272 qemu_log_mask(LOG_GUEST_ERROR,
273 "%s: attempt to write guest features with "
274 "guest_features_sel > 0 in legacy mode\n",
275 __func__);
276 } else {
277 virtio_set_features(vdev, value);
279 } else {
280 proxy->guest_features[proxy->guest_features_sel] = value;
282 break;
283 case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
284 if (value) {
285 proxy->guest_features_sel = 1;
286 } else {
287 proxy->guest_features_sel = 0;
289 break;
290 case VIRTIO_MMIO_GUEST_PAGE_SIZE:
291 if (!proxy->legacy) {
292 qemu_log_mask(LOG_GUEST_ERROR,
293 "%s: write to legacy register (0x%"
294 HWADDR_PRIx ") in non-legacy mode\n",
295 __func__, offset);
296 return;
298 proxy->guest_page_shift = ctz32(value);
299 if (proxy->guest_page_shift > 31) {
300 proxy->guest_page_shift = 0;
302 trace_virtio_mmio_guest_page(value, proxy->guest_page_shift);
303 break;
304 case VIRTIO_MMIO_QUEUE_SEL:
305 if (value < VIRTIO_QUEUE_MAX) {
306 vdev->queue_sel = value;
308 break;
309 case VIRTIO_MMIO_QUEUE_NUM:
310 trace_virtio_mmio_queue_write(value, VIRTQUEUE_MAX_SIZE);
311 virtio_queue_set_num(vdev, vdev->queue_sel, value);
313 if (proxy->legacy) {
314 virtio_queue_update_rings(vdev, vdev->queue_sel);
315 } else {
316 proxy->vqs[vdev->queue_sel].num = value;
318 break;
319 case VIRTIO_MMIO_QUEUE_ALIGN:
320 if (!proxy->legacy) {
321 qemu_log_mask(LOG_GUEST_ERROR,
322 "%s: write to legacy register (0x%"
323 HWADDR_PRIx ") in non-legacy mode\n",
324 __func__, offset);
325 return;
327 virtio_queue_set_align(vdev, vdev->queue_sel, value);
328 break;
329 case VIRTIO_MMIO_QUEUE_PFN:
330 if (!proxy->legacy) {
331 qemu_log_mask(LOG_GUEST_ERROR,
332 "%s: write to legacy register (0x%"
333 HWADDR_PRIx ") in non-legacy mode\n",
334 __func__, offset);
335 return;
337 if (value == 0) {
338 virtio_reset(vdev);
339 } else {
340 virtio_queue_set_addr(vdev, vdev->queue_sel,
341 value << proxy->guest_page_shift);
343 break;
344 case VIRTIO_MMIO_QUEUE_READY:
345 if (proxy->legacy) {
346 qemu_log_mask(LOG_GUEST_ERROR,
347 "%s: write to non-legacy register (0x%"
348 HWADDR_PRIx ") in legacy mode\n",
349 __func__, offset);
350 return;
352 if (value) {
353 virtio_queue_set_num(vdev, vdev->queue_sel,
354 proxy->vqs[vdev->queue_sel].num);
355 virtio_queue_set_rings(vdev, vdev->queue_sel,
356 ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 |
357 proxy->vqs[vdev->queue_sel].desc[0],
358 ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 |
359 proxy->vqs[vdev->queue_sel].avail[0],
360 ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 |
361 proxy->vqs[vdev->queue_sel].used[0]);
362 proxy->vqs[vdev->queue_sel].enabled = 1;
363 } else {
364 proxy->vqs[vdev->queue_sel].enabled = 0;
366 break;
367 case VIRTIO_MMIO_QUEUE_NOTIFY:
368 if (value < VIRTIO_QUEUE_MAX) {
369 virtio_queue_notify(vdev, value);
371 break;
372 case VIRTIO_MMIO_INTERRUPT_ACK:
373 atomic_and(&vdev->isr, ~value);
374 virtio_update_irq(vdev);
375 break;
376 case VIRTIO_MMIO_STATUS:
377 if (!(value & VIRTIO_CONFIG_S_DRIVER_OK)) {
378 virtio_mmio_stop_ioeventfd(proxy);
381 if (!proxy->legacy && (value & VIRTIO_CONFIG_S_FEATURES_OK)) {
382 virtio_set_features(vdev,
383 ((uint64_t)proxy->guest_features[1]) << 32 |
384 proxy->guest_features[0]);
387 virtio_set_status(vdev, value & 0xff);
389 if (value & VIRTIO_CONFIG_S_DRIVER_OK) {
390 virtio_mmio_start_ioeventfd(proxy);
393 if (vdev->status == 0) {
394 virtio_reset(vdev);
395 virtio_mmio_soft_reset(proxy);
397 break;
398 case VIRTIO_MMIO_QUEUE_DESC_LOW:
399 if (proxy->legacy) {
400 qemu_log_mask(LOG_GUEST_ERROR,
401 "%s: write to non-legacy register (0x%"
402 HWADDR_PRIx ") in legacy mode\n",
403 __func__, offset);
404 return;
406 proxy->vqs[vdev->queue_sel].desc[0] = value;
407 break;
408 case VIRTIO_MMIO_QUEUE_DESC_HIGH:
409 if (proxy->legacy) {
410 qemu_log_mask(LOG_GUEST_ERROR,
411 "%s: write to non-legacy register (0x%"
412 HWADDR_PRIx ") in legacy mode\n",
413 __func__, offset);
414 return;
416 proxy->vqs[vdev->queue_sel].desc[1] = value;
417 break;
418 case VIRTIO_MMIO_QUEUE_AVAIL_LOW:
419 if (proxy->legacy) {
420 qemu_log_mask(LOG_GUEST_ERROR,
421 "%s: write to non-legacy register (0x%"
422 HWADDR_PRIx ") in legacy mode\n",
423 __func__, offset);
424 return;
426 proxy->vqs[vdev->queue_sel].avail[0] = value;
427 break;
428 case VIRTIO_MMIO_QUEUE_AVAIL_HIGH:
429 if (proxy->legacy) {
430 qemu_log_mask(LOG_GUEST_ERROR,
431 "%s: write to non-legacy register (0x%"
432 HWADDR_PRIx ") in legacy mode\n",
433 __func__, offset);
434 return;
436 proxy->vqs[vdev->queue_sel].avail[1] = value;
437 break;
438 case VIRTIO_MMIO_QUEUE_USED_LOW:
439 if (proxy->legacy) {
440 qemu_log_mask(LOG_GUEST_ERROR,
441 "%s: write to non-legacy register (0x%"
442 HWADDR_PRIx ") in legacy mode\n",
443 __func__, offset);
444 return;
446 proxy->vqs[vdev->queue_sel].used[0] = value;
447 break;
448 case VIRTIO_MMIO_QUEUE_USED_HIGH:
449 if (proxy->legacy) {
450 qemu_log_mask(LOG_GUEST_ERROR,
451 "%s: write to non-legacy register (0x%"
452 HWADDR_PRIx ") in legacy mode\n",
453 __func__, offset);
454 return;
456 proxy->vqs[vdev->queue_sel].used[1] = value;
457 break;
458 case VIRTIO_MMIO_MAGIC_VALUE:
459 case VIRTIO_MMIO_VERSION:
460 case VIRTIO_MMIO_DEVICE_ID:
461 case VIRTIO_MMIO_VENDOR_ID:
462 case VIRTIO_MMIO_DEVICE_FEATURES:
463 case VIRTIO_MMIO_QUEUE_NUM_MAX:
464 case VIRTIO_MMIO_INTERRUPT_STATUS:
465 case VIRTIO_MMIO_CONFIG_GENERATION:
466 qemu_log_mask(LOG_GUEST_ERROR,
467 "%s: write to read-only register (0x%" HWADDR_PRIx ")\n",
468 __func__, offset);
469 break;
471 default:
472 qemu_log_mask(LOG_GUEST_ERROR,
473 "%s: bad register offset (0x%" HWADDR_PRIx ")\n",
474 __func__, offset);
478 static const MemoryRegionOps virtio_legacy_mem_ops = {
479 .read = virtio_mmio_read,
480 .write = virtio_mmio_write,
481 .endianness = DEVICE_NATIVE_ENDIAN,
484 static const MemoryRegionOps virtio_mem_ops = {
485 .read = virtio_mmio_read,
486 .write = virtio_mmio_write,
487 .endianness = DEVICE_LITTLE_ENDIAN,
490 static void virtio_mmio_update_irq(DeviceState *opaque, uint16_t vector)
492 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
493 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
494 int level;
496 if (!vdev) {
497 return;
499 level = (atomic_read(&vdev->isr) != 0);
500 trace_virtio_mmio_setting_irq(level);
501 qemu_set_irq(proxy->irq, level);
504 static int virtio_mmio_load_config(DeviceState *opaque, QEMUFile *f)
506 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
508 proxy->host_features_sel = qemu_get_be32(f);
509 proxy->guest_features_sel = qemu_get_be32(f);
510 proxy->guest_page_shift = qemu_get_be32(f);
511 return 0;
514 static void virtio_mmio_save_config(DeviceState *opaque, QEMUFile *f)
516 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
518 qemu_put_be32(f, proxy->host_features_sel);
519 qemu_put_be32(f, proxy->guest_features_sel);
520 qemu_put_be32(f, proxy->guest_page_shift);
523 static const VMStateDescription vmstate_virtio_mmio_queue_state = {
524 .name = "virtio_mmio/queue_state",
525 .version_id = 1,
526 .minimum_version_id = 1,
527 .fields = (VMStateField[]) {
528 VMSTATE_UINT16(num, VirtIOMMIOQueue),
529 VMSTATE_BOOL(enabled, VirtIOMMIOQueue),
530 VMSTATE_UINT32_ARRAY(desc, VirtIOMMIOQueue, 2),
531 VMSTATE_UINT32_ARRAY(avail, VirtIOMMIOQueue, 2),
532 VMSTATE_UINT32_ARRAY(used, VirtIOMMIOQueue, 2),
533 VMSTATE_END_OF_LIST()
537 static const VMStateDescription vmstate_virtio_mmio_state_sub = {
538 .name = "virtio_mmio/state",
539 .version_id = 1,
540 .minimum_version_id = 1,
541 .fields = (VMStateField[]) {
542 VMSTATE_UINT32_ARRAY(guest_features, VirtIOMMIOProxy, 2),
543 VMSTATE_STRUCT_ARRAY(vqs, VirtIOMMIOProxy, VIRTIO_QUEUE_MAX, 0,
544 vmstate_virtio_mmio_queue_state,
545 VirtIOMMIOQueue),
546 VMSTATE_END_OF_LIST()
550 static const VMStateDescription vmstate_virtio_mmio = {
551 .name = "virtio_mmio",
552 .version_id = 1,
553 .minimum_version_id = 1,
554 .minimum_version_id_old = 1,
555 .fields = (VMStateField[]) {
556 VMSTATE_END_OF_LIST()
558 .subsections = (const VMStateDescription * []) {
559 &vmstate_virtio_mmio_state_sub,
560 NULL
564 static void virtio_mmio_save_extra_state(DeviceState *opaque, QEMUFile *f)
566 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
568 vmstate_save_state(f, &vmstate_virtio_mmio, proxy, NULL);
571 static int virtio_mmio_load_extra_state(DeviceState *opaque, QEMUFile *f)
573 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
575 return vmstate_load_state(f, &vmstate_virtio_mmio, proxy, 1);
578 static bool virtio_mmio_has_extra_state(DeviceState *opaque)
580 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
582 return !proxy->legacy;
585 static void virtio_mmio_reset(DeviceState *d)
587 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
588 int i;
590 virtio_mmio_stop_ioeventfd(proxy);
591 virtio_bus_reset(&proxy->bus);
592 proxy->host_features_sel = 0;
593 proxy->guest_features_sel = 0;
594 proxy->guest_page_shift = 0;
596 if (!proxy->legacy) {
597 proxy->guest_features[0] = proxy->guest_features[1] = 0;
599 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
600 proxy->vqs[i].enabled = 0;
601 proxy->vqs[i].num = 0;
602 proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0;
603 proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0;
604 proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0;
609 static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign,
610 bool with_irqfd)
612 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
613 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
614 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
615 VirtQueue *vq = virtio_get_queue(vdev, n);
616 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
618 if (assign) {
619 int r = event_notifier_init(notifier, 0);
620 if (r < 0) {
621 return r;
623 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
624 } else {
625 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
626 event_notifier_cleanup(notifier);
629 if (vdc->guest_notifier_mask && vdev->use_guest_notifier_mask) {
630 vdc->guest_notifier_mask(vdev, n, !assign);
633 return 0;
636 static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs,
637 bool assign)
639 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
640 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
641 /* TODO: need to check if kvm-arm supports irqfd */
642 bool with_irqfd = false;
643 int r, n;
645 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
647 for (n = 0; n < nvqs; n++) {
648 if (!virtio_queue_get_num(vdev, n)) {
649 break;
652 r = virtio_mmio_set_guest_notifier(d, n, assign, with_irqfd);
653 if (r < 0) {
654 goto assign_error;
658 return 0;
660 assign_error:
661 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
662 assert(assign);
663 while (--n >= 0) {
664 virtio_mmio_set_guest_notifier(d, n, !assign, false);
666 return r;
669 static void virtio_mmio_pre_plugged(DeviceState *d, Error **errp)
671 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
672 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
674 if (!proxy->legacy) {
675 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
679 /* virtio-mmio device */
681 static Property virtio_mmio_properties[] = {
682 DEFINE_PROP_BOOL("format_transport_address", VirtIOMMIOProxy,
683 format_transport_address, true),
684 DEFINE_PROP_BOOL("force-legacy", VirtIOMMIOProxy, legacy, true),
685 DEFINE_PROP_END_OF_LIST(),
688 static void virtio_mmio_realizefn(DeviceState *d, Error **errp)
690 VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
691 SysBusDevice *sbd = SYS_BUS_DEVICE(d);
693 qbus_create_inplace(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS,
694 d, NULL);
695 sysbus_init_irq(sbd, &proxy->irq);
696 if (proxy->legacy) {
697 memory_region_init_io(&proxy->iomem, OBJECT(d),
698 &virtio_legacy_mem_ops, proxy,
699 TYPE_VIRTIO_MMIO, 0x200);
700 } else {
701 memory_region_init_io(&proxy->iomem, OBJECT(d),
702 &virtio_mem_ops, proxy,
703 TYPE_VIRTIO_MMIO, 0x200);
705 sysbus_init_mmio(sbd, &proxy->iomem);
708 static void virtio_mmio_class_init(ObjectClass *klass, void *data)
710 DeviceClass *dc = DEVICE_CLASS(klass);
712 dc->realize = virtio_mmio_realizefn;
713 dc->reset = virtio_mmio_reset;
714 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
715 device_class_set_props(dc, virtio_mmio_properties);
718 static const TypeInfo virtio_mmio_info = {
719 .name = TYPE_VIRTIO_MMIO,
720 .parent = TYPE_SYS_BUS_DEVICE,
721 .instance_size = sizeof(VirtIOMMIOProxy),
722 .class_init = virtio_mmio_class_init,
725 /* virtio-mmio-bus. */
727 static char *virtio_mmio_bus_get_dev_path(DeviceState *dev)
729 BusState *virtio_mmio_bus;
730 VirtIOMMIOProxy *virtio_mmio_proxy;
731 char *proxy_path;
732 SysBusDevice *proxy_sbd;
733 char *path;
735 virtio_mmio_bus = qdev_get_parent_bus(dev);
736 virtio_mmio_proxy = VIRTIO_MMIO(virtio_mmio_bus->parent);
737 proxy_path = qdev_get_dev_path(DEVICE(virtio_mmio_proxy));
740 * If @format_transport_address is false, then we just perform the same as
741 * virtio_bus_get_dev_path(): we delegate the address formatting for the
742 * device on the virtio-mmio bus to the bus that the virtio-mmio proxy
743 * (i.e., the device that implements the virtio-mmio bus) resides on. In
744 * this case the base address of the virtio-mmio transport will be
745 * invisible.
747 if (!virtio_mmio_proxy->format_transport_address) {
748 return proxy_path;
751 /* Otherwise, we append the base address of the transport. */
752 proxy_sbd = SYS_BUS_DEVICE(virtio_mmio_proxy);
753 assert(proxy_sbd->num_mmio == 1);
754 assert(proxy_sbd->mmio[0].memory == &virtio_mmio_proxy->iomem);
756 if (proxy_path) {
757 path = g_strdup_printf("%s/virtio-mmio@" TARGET_FMT_plx, proxy_path,
758 proxy_sbd->mmio[0].addr);
759 } else {
760 path = g_strdup_printf("virtio-mmio@" TARGET_FMT_plx,
761 proxy_sbd->mmio[0].addr);
763 g_free(proxy_path);
764 return path;
767 static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
769 BusClass *bus_class = BUS_CLASS(klass);
770 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
772 k->notify = virtio_mmio_update_irq;
773 k->save_config = virtio_mmio_save_config;
774 k->load_config = virtio_mmio_load_config;
775 k->save_extra_state = virtio_mmio_save_extra_state;
776 k->load_extra_state = virtio_mmio_load_extra_state;
777 k->has_extra_state = virtio_mmio_has_extra_state;
778 k->set_guest_notifiers = virtio_mmio_set_guest_notifiers;
779 k->ioeventfd_enabled = virtio_mmio_ioeventfd_enabled;
780 k->ioeventfd_assign = virtio_mmio_ioeventfd_assign;
781 k->pre_plugged = virtio_mmio_pre_plugged;
782 k->has_variable_vring_alignment = true;
783 bus_class->max_dev = 1;
784 bus_class->get_dev_path = virtio_mmio_bus_get_dev_path;
787 static const TypeInfo virtio_mmio_bus_info = {
788 .name = TYPE_VIRTIO_MMIO_BUS,
789 .parent = TYPE_VIRTIO_BUS,
790 .instance_size = sizeof(VirtioBusState),
791 .class_init = virtio_mmio_bus_class_init,
794 static void virtio_mmio_register_types(void)
796 type_register_static(&virtio_mmio_bus_info);
797 type_register_static(&virtio_mmio_info);
800 type_init(virtio_mmio_register_types)