usb: Fix compilation for Windows
[qemu/ar7.git] / hw / virtio / virtio-pci.c
blob0dadb6687fe709d1c9cf1a39335f81c27a9bab52
1 /*
2 * Virtio PCI Bindings
4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2009 CodeSourcery
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paul Brook <paul@codesourcery.com>
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
18 #include "qemu/osdep.h"
20 #include "standard-headers/linux/virtio_pci.h"
21 #include "hw/virtio/virtio.h"
22 #include "hw/virtio/virtio-blk.h"
23 #include "hw/virtio/virtio-net.h"
24 #include "hw/virtio/virtio-serial.h"
25 #include "hw/virtio/virtio-scsi.h"
26 #include "hw/virtio/virtio-balloon.h"
27 #include "hw/virtio/virtio-input.h"
28 #include "hw/pci/pci.h"
29 #include "qemu/error-report.h"
30 #include "hw/pci/msi.h"
31 #include "hw/pci/msix.h"
32 #include "hw/loader.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/block-backend.h"
35 #include "virtio-pci.h"
36 #include "qemu/range.h"
37 #include "hw/virtio/virtio-bus.h"
38 #include "qapi/visitor.h"
40 #define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev))
42 #undef VIRTIO_PCI_CONFIG
44 /* The remaining space is defined by each driver as the per-driver
45 * configuration space */
46 #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
48 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
49 VirtIOPCIProxy *dev);
50 static void virtio_pci_reset(DeviceState *qdev);
52 /* virtio device */
53 /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
54 static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d)
56 return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
59 /* DeviceState to VirtIOPCIProxy. Note: used on datapath,
60 * be careful and test performance if you change this.
62 static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d)
64 return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
67 static void virtio_pci_notify(DeviceState *d, uint16_t vector)
69 VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d);
71 if (msix_enabled(&proxy->pci_dev))
72 msix_notify(&proxy->pci_dev, vector);
73 else {
74 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
75 pci_set_irq(&proxy->pci_dev, vdev->isr & 1);
79 static void virtio_pci_save_config(DeviceState *d, QEMUFile *f)
81 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
82 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
84 pci_device_save(&proxy->pci_dev, f);
85 msix_save(&proxy->pci_dev, f);
86 if (msix_present(&proxy->pci_dev))
87 qemu_put_be16(f, vdev->config_vector);
90 static void virtio_pci_load_modern_queue_state(VirtIOPCIQueue *vq,
91 QEMUFile *f)
93 vq->num = qemu_get_be16(f);
94 vq->enabled = qemu_get_be16(f);
95 vq->desc[0] = qemu_get_be32(f);
96 vq->desc[1] = qemu_get_be32(f);
97 vq->avail[0] = qemu_get_be32(f);
98 vq->avail[1] = qemu_get_be32(f);
99 vq->used[0] = qemu_get_be32(f);
100 vq->used[1] = qemu_get_be32(f);
103 static bool virtio_pci_has_extra_state(DeviceState *d)
105 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
107 return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA;
110 static int get_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size)
112 VirtIOPCIProxy *proxy = pv;
113 int i;
115 proxy->dfselect = qemu_get_be32(f);
116 proxy->gfselect = qemu_get_be32(f);
117 proxy->guest_features[0] = qemu_get_be32(f);
118 proxy->guest_features[1] = qemu_get_be32(f);
119 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
120 virtio_pci_load_modern_queue_state(&proxy->vqs[i], f);
123 return 0;
126 static void virtio_pci_save_modern_queue_state(VirtIOPCIQueue *vq,
127 QEMUFile *f)
129 qemu_put_be16(f, vq->num);
130 qemu_put_be16(f, vq->enabled);
131 qemu_put_be32(f, vq->desc[0]);
132 qemu_put_be32(f, vq->desc[1]);
133 qemu_put_be32(f, vq->avail[0]);
134 qemu_put_be32(f, vq->avail[1]);
135 qemu_put_be32(f, vq->used[0]);
136 qemu_put_be32(f, vq->used[1]);
139 static void put_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size)
141 VirtIOPCIProxy *proxy = pv;
142 int i;
144 qemu_put_be32(f, proxy->dfselect);
145 qemu_put_be32(f, proxy->gfselect);
146 qemu_put_be32(f, proxy->guest_features[0]);
147 qemu_put_be32(f, proxy->guest_features[1]);
148 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
149 virtio_pci_save_modern_queue_state(&proxy->vqs[i], f);
153 static const VMStateInfo vmstate_info_virtio_pci_modern_state = {
154 .name = "virtqueue_state",
155 .get = get_virtio_pci_modern_state,
156 .put = put_virtio_pci_modern_state,
159 static bool virtio_pci_modern_state_needed(void *opaque)
161 VirtIOPCIProxy *proxy = opaque;
163 return !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
166 static const VMStateDescription vmstate_virtio_pci_modern_state = {
167 .name = "virtio_pci/modern_state",
168 .version_id = 1,
169 .minimum_version_id = 1,
170 .needed = &virtio_pci_modern_state_needed,
171 .fields = (VMStateField[]) {
173 .name = "modern_state",
174 .version_id = 0,
175 .field_exists = NULL,
176 .size = 0,
177 .info = &vmstate_info_virtio_pci_modern_state,
178 .flags = VMS_SINGLE,
179 .offset = 0,
181 VMSTATE_END_OF_LIST()
185 static const VMStateDescription vmstate_virtio_pci = {
186 .name = "virtio_pci",
187 .version_id = 1,
188 .minimum_version_id = 1,
189 .minimum_version_id_old = 1,
190 .fields = (VMStateField[]) {
191 VMSTATE_END_OF_LIST()
193 .subsections = (const VMStateDescription*[]) {
194 &vmstate_virtio_pci_modern_state,
195 NULL
199 static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f)
201 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
203 vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL);
206 static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f)
208 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
210 return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1);
213 static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f)
215 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
216 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
218 if (msix_present(&proxy->pci_dev))
219 qemu_put_be16(f, virtio_queue_vector(vdev, n));
222 static int virtio_pci_load_config(DeviceState *d, QEMUFile *f)
224 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
225 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
227 int ret;
228 ret = pci_device_load(&proxy->pci_dev, f);
229 if (ret) {
230 return ret;
232 msix_unuse_all_vectors(&proxy->pci_dev);
233 msix_load(&proxy->pci_dev, f);
234 if (msix_present(&proxy->pci_dev)) {
235 qemu_get_be16s(f, &vdev->config_vector);
236 } else {
237 vdev->config_vector = VIRTIO_NO_VECTOR;
239 if (vdev->config_vector != VIRTIO_NO_VECTOR) {
240 return msix_vector_use(&proxy->pci_dev, vdev->config_vector);
242 return 0;
245 static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
247 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
248 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
250 uint16_t vector;
251 if (msix_present(&proxy->pci_dev)) {
252 qemu_get_be16s(f, &vector);
253 } else {
254 vector = VIRTIO_NO_VECTOR;
256 virtio_queue_set_vector(vdev, n, vector);
257 if (vector != VIRTIO_NO_VECTOR) {
258 return msix_vector_use(&proxy->pci_dev, vector);
261 return 0;
264 #define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000
266 static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
267 int n, bool assign, bool set_handler)
269 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
270 VirtQueue *vq = virtio_get_queue(vdev, n);
271 EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
272 bool legacy = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_LEGACY);
273 bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
274 bool fast_mmio = kvm_ioeventfd_any_length_enabled();
275 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
276 MemoryRegion *modern_mr = &proxy->notify.mr;
277 MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr;
278 MemoryRegion *legacy_mr = &proxy->bar;
279 hwaddr modern_addr = QEMU_VIRTIO_PCI_QUEUE_MEM_MULT *
280 virtio_get_queue_index(vq);
281 hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY;
282 int r = 0;
284 if (assign) {
285 r = event_notifier_init(notifier, 1);
286 if (r < 0) {
287 error_report("%s: unable to init event notifier: %d",
288 __func__, r);
289 return r;
291 virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
292 if (modern) {
293 if (fast_mmio) {
294 memory_region_add_eventfd(modern_mr, modern_addr, 0,
295 false, n, notifier);
296 } else {
297 memory_region_add_eventfd(modern_mr, modern_addr, 2,
298 false, n, notifier);
300 if (modern_pio) {
301 memory_region_add_eventfd(modern_notify_mr, 0, 2,
302 true, n, notifier);
305 if (legacy) {
306 memory_region_add_eventfd(legacy_mr, legacy_addr, 2,
307 true, n, notifier);
309 } else {
310 if (modern) {
311 if (fast_mmio) {
312 memory_region_del_eventfd(modern_mr, modern_addr, 0,
313 false, n, notifier);
314 } else {
315 memory_region_del_eventfd(modern_mr, modern_addr, 2,
316 false, n, notifier);
318 if (modern_pio) {
319 memory_region_del_eventfd(modern_notify_mr, 0, 2,
320 true, n, notifier);
323 if (legacy) {
324 memory_region_del_eventfd(legacy_mr, legacy_addr, 2,
325 true, n, notifier);
327 virtio_queue_set_host_notifier_fd_handler(vq, false, false);
328 event_notifier_cleanup(notifier);
330 return r;
333 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
335 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
336 int n, r;
338 if (!(proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) ||
339 proxy->ioeventfd_disabled ||
340 proxy->ioeventfd_started) {
341 return;
344 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
345 if (!virtio_queue_get_num(vdev, n)) {
346 continue;
349 r = virtio_pci_set_host_notifier_internal(proxy, n, true, true);
350 if (r < 0) {
351 goto assign_error;
354 proxy->ioeventfd_started = true;
355 return;
357 assign_error:
358 while (--n >= 0) {
359 if (!virtio_queue_get_num(vdev, n)) {
360 continue;
363 r = virtio_pci_set_host_notifier_internal(proxy, n, false, false);
364 assert(r >= 0);
366 proxy->ioeventfd_started = false;
367 error_report("%s: failed. Fallback to a userspace (slower).", __func__);
370 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
372 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
373 int r;
374 int n;
376 if (!proxy->ioeventfd_started) {
377 return;
380 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
381 if (!virtio_queue_get_num(vdev, n)) {
382 continue;
385 r = virtio_pci_set_host_notifier_internal(proxy, n, false, false);
386 assert(r >= 0);
388 proxy->ioeventfd_started = false;
391 static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
393 VirtIOPCIProxy *proxy = opaque;
394 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
395 hwaddr pa;
397 switch (addr) {
398 case VIRTIO_PCI_GUEST_FEATURES:
399 /* Guest does not negotiate properly? We have to assume nothing. */
400 if (val & (1 << VIRTIO_F_BAD_FEATURE)) {
401 val = virtio_bus_get_vdev_bad_features(&proxy->bus);
403 virtio_set_features(vdev, val);
404 break;
405 case VIRTIO_PCI_QUEUE_PFN:
406 pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
407 if (pa == 0) {
408 virtio_pci_reset(DEVICE(proxy));
410 else
411 virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
412 break;
413 case VIRTIO_PCI_QUEUE_SEL:
414 if (val < VIRTIO_QUEUE_MAX)
415 vdev->queue_sel = val;
416 break;
417 case VIRTIO_PCI_QUEUE_NOTIFY:
418 if (val < VIRTIO_QUEUE_MAX) {
419 virtio_queue_notify(vdev, val);
421 break;
422 case VIRTIO_PCI_STATUS:
423 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
424 virtio_pci_stop_ioeventfd(proxy);
427 virtio_set_status(vdev, val & 0xFF);
429 if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
430 virtio_pci_start_ioeventfd(proxy);
433 if (vdev->status == 0) {
434 virtio_pci_reset(DEVICE(proxy));
437 /* Linux before 2.6.34 drives the device without enabling
438 the PCI device bus master bit. Enable it automatically
439 for the guest. This is a PCI spec violation but so is
440 initiating DMA with bus master bit clear. */
441 if (val == (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER)) {
442 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND,
443 proxy->pci_dev.config[PCI_COMMAND] |
444 PCI_COMMAND_MASTER, 1);
446 break;
447 case VIRTIO_MSI_CONFIG_VECTOR:
448 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
449 /* Make it possible for guest to discover an error took place. */
450 if (msix_vector_use(&proxy->pci_dev, val) < 0)
451 val = VIRTIO_NO_VECTOR;
452 vdev->config_vector = val;
453 break;
454 case VIRTIO_MSI_QUEUE_VECTOR:
455 msix_vector_unuse(&proxy->pci_dev,
456 virtio_queue_vector(vdev, vdev->queue_sel));
457 /* Make it possible for guest to discover an error took place. */
458 if (msix_vector_use(&proxy->pci_dev, val) < 0)
459 val = VIRTIO_NO_VECTOR;
460 virtio_queue_set_vector(vdev, vdev->queue_sel, val);
461 break;
462 default:
463 error_report("%s: unexpected address 0x%x value 0x%x",
464 __func__, addr, val);
465 break;
469 static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
471 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
472 uint32_t ret = 0xFFFFFFFF;
474 switch (addr) {
475 case VIRTIO_PCI_HOST_FEATURES:
476 ret = vdev->host_features;
477 break;
478 case VIRTIO_PCI_GUEST_FEATURES:
479 ret = vdev->guest_features;
480 break;
481 case VIRTIO_PCI_QUEUE_PFN:
482 ret = virtio_queue_get_addr(vdev, vdev->queue_sel)
483 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
484 break;
485 case VIRTIO_PCI_QUEUE_NUM:
486 ret = virtio_queue_get_num(vdev, vdev->queue_sel);
487 break;
488 case VIRTIO_PCI_QUEUE_SEL:
489 ret = vdev->queue_sel;
490 break;
491 case VIRTIO_PCI_STATUS:
492 ret = vdev->status;
493 break;
494 case VIRTIO_PCI_ISR:
495 /* reading from the ISR also clears it. */
496 ret = vdev->isr;
497 vdev->isr = 0;
498 pci_irq_deassert(&proxy->pci_dev);
499 break;
500 case VIRTIO_MSI_CONFIG_VECTOR:
501 ret = vdev->config_vector;
502 break;
503 case VIRTIO_MSI_QUEUE_VECTOR:
504 ret = virtio_queue_vector(vdev, vdev->queue_sel);
505 break;
506 default:
507 break;
510 return ret;
513 static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr,
514 unsigned size)
516 VirtIOPCIProxy *proxy = opaque;
517 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
518 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev);
519 uint64_t val = 0;
520 if (addr < config) {
521 return virtio_ioport_read(proxy, addr);
523 addr -= config;
525 switch (size) {
526 case 1:
527 val = virtio_config_readb(vdev, addr);
528 break;
529 case 2:
530 val = virtio_config_readw(vdev, addr);
531 if (virtio_is_big_endian(vdev)) {
532 val = bswap16(val);
534 break;
535 case 4:
536 val = virtio_config_readl(vdev, addr);
537 if (virtio_is_big_endian(vdev)) {
538 val = bswap32(val);
540 break;
542 return val;
545 static void virtio_pci_config_write(void *opaque, hwaddr addr,
546 uint64_t val, unsigned size)
548 VirtIOPCIProxy *proxy = opaque;
549 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev);
550 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
551 if (addr < config) {
552 virtio_ioport_write(proxy, addr, val);
553 return;
555 addr -= config;
557 * Virtio-PCI is odd. Ioports are LE but config space is target native
558 * endian.
560 switch (size) {
561 case 1:
562 virtio_config_writeb(vdev, addr, val);
563 break;
564 case 2:
565 if (virtio_is_big_endian(vdev)) {
566 val = bswap16(val);
568 virtio_config_writew(vdev, addr, val);
569 break;
570 case 4:
571 if (virtio_is_big_endian(vdev)) {
572 val = bswap32(val);
574 virtio_config_writel(vdev, addr, val);
575 break;
579 static const MemoryRegionOps virtio_pci_config_ops = {
580 .read = virtio_pci_config_read,
581 .write = virtio_pci_config_write,
582 .impl = {
583 .min_access_size = 1,
584 .max_access_size = 4,
586 .endianness = DEVICE_LITTLE_ENDIAN,
589 /* Below are generic functions to do memcpy from/to an address space,
590 * without byteswaps, with input validation.
592 * As regular address_space_* APIs all do some kind of byteswap at least for
593 * some host/target combinations, we are forced to explicitly convert to a
594 * known-endianness integer value.
595 * It doesn't really matter which endian format to go through, so the code
596 * below selects the endian that causes the least amount of work on the given
597 * host.
599 * Note: host pointer must be aligned.
601 static
602 void virtio_address_space_write(AddressSpace *as, hwaddr addr,
603 const uint8_t *buf, int len)
605 uint32_t val;
607 /* address_space_* APIs assume an aligned address.
608 * As address is under guest control, handle illegal values.
610 addr &= ~(len - 1);
612 /* Make sure caller aligned buf properly */
613 assert(!(((uintptr_t)buf) & (len - 1)));
615 switch (len) {
616 case 1:
617 val = pci_get_byte(buf);
618 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
619 break;
620 case 2:
621 val = pci_get_word(buf);
622 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
623 break;
624 case 4:
625 val = pci_get_long(buf);
626 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
627 break;
628 default:
629 /* As length is under guest control, handle illegal values. */
630 break;
634 static void
635 virtio_address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
637 uint32_t val;
639 /* address_space_* APIs assume an aligned address.
640 * As address is under guest control, handle illegal values.
642 addr &= ~(len - 1);
644 /* Make sure caller aligned buf properly */
645 assert(!(((uintptr_t)buf) & (len - 1)));
647 switch (len) {
648 case 1:
649 val = address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
650 pci_set_byte(buf, val);
651 break;
652 case 2:
653 val = address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
654 pci_set_word(buf, val);
655 break;
656 case 4:
657 val = address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
658 pci_set_long(buf, val);
659 break;
660 default:
661 /* As length is under guest control, handle illegal values. */
662 break;
666 static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
667 uint32_t val, int len)
669 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
670 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
671 struct virtio_pci_cfg_cap *cfg;
673 pci_default_write_config(pci_dev, address, val, len);
675 if (range_covers_byte(address, len, PCI_COMMAND) &&
676 !(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
677 virtio_pci_stop_ioeventfd(proxy);
678 virtio_set_status(vdev, vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK);
681 if (proxy->config_cap &&
682 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
683 pci_cfg_data),
684 sizeof cfg->pci_cfg_data)) {
685 uint32_t off;
686 uint32_t len;
688 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
689 off = le32_to_cpu(cfg->cap.offset);
690 len = le32_to_cpu(cfg->cap.length);
692 if (len == 1 || len == 2 || len == 4) {
693 assert(len <= sizeof cfg->pci_cfg_data);
694 virtio_address_space_write(&proxy->modern_as, off,
695 cfg->pci_cfg_data, len);
700 static uint32_t virtio_read_config(PCIDevice *pci_dev,
701 uint32_t address, int len)
703 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
704 struct virtio_pci_cfg_cap *cfg;
706 if (proxy->config_cap &&
707 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
708 pci_cfg_data),
709 sizeof cfg->pci_cfg_data)) {
710 uint32_t off;
711 uint32_t len;
713 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
714 off = le32_to_cpu(cfg->cap.offset);
715 len = le32_to_cpu(cfg->cap.length);
717 if (len == 1 || len == 2 || len == 4) {
718 assert(len <= sizeof cfg->pci_cfg_data);
719 virtio_address_space_read(&proxy->modern_as, off,
720 cfg->pci_cfg_data, len);
724 return pci_default_read_config(pci_dev, address, len);
727 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
728 unsigned int queue_no,
729 unsigned int vector,
730 MSIMessage msg)
732 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
733 int ret;
735 if (irqfd->users == 0) {
736 ret = kvm_irqchip_add_msi_route(kvm_state, msg, &proxy->pci_dev);
737 if (ret < 0) {
738 return ret;
740 irqfd->virq = ret;
742 irqfd->users++;
743 return 0;
746 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
747 unsigned int vector)
749 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
750 if (--irqfd->users == 0) {
751 kvm_irqchip_release_virq(kvm_state, irqfd->virq);
755 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
756 unsigned int queue_no,
757 unsigned int vector)
759 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
760 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
761 VirtQueue *vq = virtio_get_queue(vdev, queue_no);
762 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
763 int ret;
764 ret = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq);
765 return ret;
768 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
769 unsigned int queue_no,
770 unsigned int vector)
772 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
773 VirtQueue *vq = virtio_get_queue(vdev, queue_no);
774 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
775 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
776 int ret;
778 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq);
779 assert(ret == 0);
782 static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
784 PCIDevice *dev = &proxy->pci_dev;
785 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
786 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
787 unsigned int vector;
788 int ret, queue_no;
789 MSIMessage msg;
791 for (queue_no = 0; queue_no < nvqs; queue_no++) {
792 if (!virtio_queue_get_num(vdev, queue_no)) {
793 break;
795 vector = virtio_queue_vector(vdev, queue_no);
796 if (vector >= msix_nr_vectors_allocated(dev)) {
797 continue;
799 msg = msix_get_message(dev, vector);
800 ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector, msg);
801 if (ret < 0) {
802 goto undo;
804 /* If guest supports masking, set up irqfd now.
805 * Otherwise, delay until unmasked in the frontend.
807 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
808 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
809 if (ret < 0) {
810 kvm_virtio_pci_vq_vector_release(proxy, vector);
811 goto undo;
815 return 0;
817 undo:
818 while (--queue_no >= 0) {
819 vector = virtio_queue_vector(vdev, queue_no);
820 if (vector >= msix_nr_vectors_allocated(dev)) {
821 continue;
823 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
824 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
826 kvm_virtio_pci_vq_vector_release(proxy, vector);
828 return ret;
831 static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
833 PCIDevice *dev = &proxy->pci_dev;
834 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
835 unsigned int vector;
836 int queue_no;
837 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
839 for (queue_no = 0; queue_no < nvqs; queue_no++) {
840 if (!virtio_queue_get_num(vdev, queue_no)) {
841 break;
843 vector = virtio_queue_vector(vdev, queue_no);
844 if (vector >= msix_nr_vectors_allocated(dev)) {
845 continue;
847 /* If guest supports masking, clean up irqfd now.
848 * Otherwise, it was cleaned when masked in the frontend.
850 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
851 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
853 kvm_virtio_pci_vq_vector_release(proxy, vector);
857 static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
858 unsigned int queue_no,
859 unsigned int vector,
860 MSIMessage msg)
862 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
863 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
864 VirtQueue *vq = virtio_get_queue(vdev, queue_no);
865 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
866 VirtIOIRQFD *irqfd;
867 int ret = 0;
869 if (proxy->vector_irqfd) {
870 irqfd = &proxy->vector_irqfd[vector];
871 if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) {
872 ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg,
873 &proxy->pci_dev);
874 if (ret < 0) {
875 return ret;
880 /* If guest supports masking, irqfd is already setup, unmask it.
881 * Otherwise, set it up now.
883 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
884 k->guest_notifier_mask(vdev, queue_no, false);
885 /* Test after unmasking to avoid losing events. */
886 if (k->guest_notifier_pending &&
887 k->guest_notifier_pending(vdev, queue_no)) {
888 event_notifier_set(n);
890 } else {
891 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
893 return ret;
896 static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
897 unsigned int queue_no,
898 unsigned int vector)
900 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
901 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
903 /* If guest supports masking, keep irqfd but mask it.
904 * Otherwise, clean it up now.
906 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
907 k->guest_notifier_mask(vdev, queue_no, true);
908 } else {
909 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
913 static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
914 MSIMessage msg)
916 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
917 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
918 VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
919 int ret, index, unmasked = 0;
921 while (vq) {
922 index = virtio_get_queue_index(vq);
923 if (!virtio_queue_get_num(vdev, index)) {
924 break;
926 if (index < proxy->nvqs_with_notifiers) {
927 ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg);
928 if (ret < 0) {
929 goto undo;
931 ++unmasked;
933 vq = virtio_vector_next_queue(vq);
936 return 0;
938 undo:
939 vq = virtio_vector_first_queue(vdev, vector);
940 while (vq && unmasked >= 0) {
941 index = virtio_get_queue_index(vq);
942 if (index < proxy->nvqs_with_notifiers) {
943 virtio_pci_vq_vector_mask(proxy, index, vector);
944 --unmasked;
946 vq = virtio_vector_next_queue(vq);
948 return ret;
951 static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
953 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
954 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
955 VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
956 int index;
958 while (vq) {
959 index = virtio_get_queue_index(vq);
960 if (!virtio_queue_get_num(vdev, index)) {
961 break;
963 if (index < proxy->nvqs_with_notifiers) {
964 virtio_pci_vq_vector_mask(proxy, index, vector);
966 vq = virtio_vector_next_queue(vq);
970 static void virtio_pci_vector_poll(PCIDevice *dev,
971 unsigned int vector_start,
972 unsigned int vector_end)
974 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
975 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
976 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
977 int queue_no;
978 unsigned int vector;
979 EventNotifier *notifier;
980 VirtQueue *vq;
982 for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
983 if (!virtio_queue_get_num(vdev, queue_no)) {
984 break;
986 vector = virtio_queue_vector(vdev, queue_no);
987 if (vector < vector_start || vector >= vector_end ||
988 !msix_is_masked(dev, vector)) {
989 continue;
991 vq = virtio_get_queue(vdev, queue_no);
992 notifier = virtio_queue_get_guest_notifier(vq);
993 if (k->guest_notifier_pending) {
994 if (k->guest_notifier_pending(vdev, queue_no)) {
995 msix_set_pending(dev, vector);
997 } else if (event_notifier_test_and_clear(notifier)) {
998 msix_set_pending(dev, vector);
1003 static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
1004 bool with_irqfd)
1006 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1007 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1008 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
1009 VirtQueue *vq = virtio_get_queue(vdev, n);
1010 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
1012 if (assign) {
1013 int r = event_notifier_init(notifier, 0);
1014 if (r < 0) {
1015 return r;
1017 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
1018 } else {
1019 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
1020 event_notifier_cleanup(notifier);
1023 if (!msix_enabled(&proxy->pci_dev) &&
1024 vdev->use_guest_notifier_mask &&
1025 vdc->guest_notifier_mask) {
1026 vdc->guest_notifier_mask(vdev, n, !assign);
1029 return 0;
1032 static bool virtio_pci_query_guest_notifiers(DeviceState *d)
1034 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1035 return msix_enabled(&proxy->pci_dev);
1038 static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
1040 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1041 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1042 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1043 int r, n;
1044 bool with_irqfd = msix_enabled(&proxy->pci_dev) &&
1045 kvm_msi_via_irqfd_enabled();
1047 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
1049 /* When deassigning, pass a consistent nvqs value
1050 * to avoid leaking notifiers.
1052 assert(assign || nvqs == proxy->nvqs_with_notifiers);
1054 proxy->nvqs_with_notifiers = nvqs;
1056 /* Must unset vector notifier while guest notifier is still assigned */
1057 if ((proxy->vector_irqfd || k->guest_notifier_mask) && !assign) {
1058 msix_unset_vector_notifiers(&proxy->pci_dev);
1059 if (proxy->vector_irqfd) {
1060 kvm_virtio_pci_vector_release(proxy, nvqs);
1061 g_free(proxy->vector_irqfd);
1062 proxy->vector_irqfd = NULL;
1066 for (n = 0; n < nvqs; n++) {
1067 if (!virtio_queue_get_num(vdev, n)) {
1068 break;
1071 r = virtio_pci_set_guest_notifier(d, n, assign, with_irqfd);
1072 if (r < 0) {
1073 goto assign_error;
1077 /* Must set vector notifier after guest notifier has been assigned */
1078 if ((with_irqfd || k->guest_notifier_mask) && assign) {
1079 if (with_irqfd) {
1080 proxy->vector_irqfd =
1081 g_malloc0(sizeof(*proxy->vector_irqfd) *
1082 msix_nr_vectors_allocated(&proxy->pci_dev));
1083 r = kvm_virtio_pci_vector_use(proxy, nvqs);
1084 if (r < 0) {
1085 goto assign_error;
1088 r = msix_set_vector_notifiers(&proxy->pci_dev,
1089 virtio_pci_vector_unmask,
1090 virtio_pci_vector_mask,
1091 virtio_pci_vector_poll);
1092 if (r < 0) {
1093 goto notifiers_error;
1097 return 0;
1099 notifiers_error:
1100 if (with_irqfd) {
1101 assert(assign);
1102 kvm_virtio_pci_vector_release(proxy, nvqs);
1105 assign_error:
1106 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
1107 assert(assign);
1108 while (--n >= 0) {
1109 virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd);
1111 return r;
1114 static int virtio_pci_set_host_notifier(DeviceState *d, int n, bool assign)
1116 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1118 /* Stop using ioeventfd for virtqueue kick if the device starts using host
1119 * notifiers. This makes it easy to avoid stepping on each others' toes.
1121 proxy->ioeventfd_disabled = assign;
1122 if (assign) {
1123 virtio_pci_stop_ioeventfd(proxy);
1125 /* We don't need to start here: it's not needed because backend
1126 * currently only stops on status change away from ok,
1127 * reset, vmstop and such. If we do add code to start here,
1128 * need to check vmstate, device state etc. */
1129 return virtio_pci_set_host_notifier_internal(proxy, n, assign, false);
1132 static void virtio_pci_vmstate_change(DeviceState *d, bool running)
1134 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1135 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1137 if (running) {
1138 /* Old QEMU versions did not set bus master enable on status write.
1139 * Detect DRIVER set and enable it.
1141 if ((proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION) &&
1142 (vdev->status & VIRTIO_CONFIG_S_DRIVER) &&
1143 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
1144 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND,
1145 proxy->pci_dev.config[PCI_COMMAND] |
1146 PCI_COMMAND_MASTER, 1);
1148 virtio_pci_start_ioeventfd(proxy);
1149 } else {
1150 virtio_pci_stop_ioeventfd(proxy);
1154 #ifdef CONFIG_VIRTFS
1155 static void virtio_9p_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
1157 V9fsPCIState *dev = VIRTIO_9P_PCI(vpci_dev);
1158 DeviceState *vdev = DEVICE(&dev->vdev);
1160 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1161 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
1164 static Property virtio_9p_pci_properties[] = {
1165 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
1166 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1167 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
1168 DEFINE_PROP_END_OF_LIST(),
1171 static void virtio_9p_pci_class_init(ObjectClass *klass, void *data)
1173 DeviceClass *dc = DEVICE_CLASS(klass);
1174 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
1175 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1177 k->realize = virtio_9p_pci_realize;
1178 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1179 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_9P;
1180 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
1181 pcidev_k->class_id = 0x2;
1182 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1183 dc->props = virtio_9p_pci_properties;
1186 static void virtio_9p_pci_instance_init(Object *obj)
1188 V9fsPCIState *dev = VIRTIO_9P_PCI(obj);
1190 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
1191 TYPE_VIRTIO_9P);
1194 static const TypeInfo virtio_9p_pci_info = {
1195 .name = TYPE_VIRTIO_9P_PCI,
1196 .parent = TYPE_VIRTIO_PCI,
1197 .instance_size = sizeof(V9fsPCIState),
1198 .instance_init = virtio_9p_pci_instance_init,
1199 .class_init = virtio_9p_pci_class_init,
1201 #endif /* CONFIG_VIRTFS */
1204 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
1207 static int virtio_pci_query_nvectors(DeviceState *d)
1209 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1211 return proxy->nvectors;
1214 static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy,
1215 struct virtio_pci_cap *cap)
1217 PCIDevice *dev = &proxy->pci_dev;
1218 int offset;
1220 offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, 0, cap->cap_len);
1221 assert(offset > 0);
1223 assert(cap->cap_len >= sizeof *cap);
1224 memcpy(dev->config + offset + PCI_CAP_FLAGS, &cap->cap_len,
1225 cap->cap_len - PCI_CAP_FLAGS);
1227 return offset;
1230 static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr,
1231 unsigned size)
1233 VirtIOPCIProxy *proxy = opaque;
1234 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1235 uint32_t val = 0;
1236 int i;
1238 switch (addr) {
1239 case VIRTIO_PCI_COMMON_DFSELECT:
1240 val = proxy->dfselect;
1241 break;
1242 case VIRTIO_PCI_COMMON_DF:
1243 if (proxy->dfselect <= 1) {
1244 val = (vdev->host_features & ~VIRTIO_LEGACY_FEATURES) >>
1245 (32 * proxy->dfselect);
1247 break;
1248 case VIRTIO_PCI_COMMON_GFSELECT:
1249 val = proxy->gfselect;
1250 break;
1251 case VIRTIO_PCI_COMMON_GF:
1252 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) {
1253 val = proxy->guest_features[proxy->gfselect];
1255 break;
1256 case VIRTIO_PCI_COMMON_MSIX:
1257 val = vdev->config_vector;
1258 break;
1259 case VIRTIO_PCI_COMMON_NUMQ:
1260 for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) {
1261 if (virtio_queue_get_num(vdev, i)) {
1262 val = i + 1;
1265 break;
1266 case VIRTIO_PCI_COMMON_STATUS:
1267 val = vdev->status;
1268 break;
1269 case VIRTIO_PCI_COMMON_CFGGENERATION:
1270 val = vdev->generation;
1271 break;
1272 case VIRTIO_PCI_COMMON_Q_SELECT:
1273 val = vdev->queue_sel;
1274 break;
1275 case VIRTIO_PCI_COMMON_Q_SIZE:
1276 val = virtio_queue_get_num(vdev, vdev->queue_sel);
1277 break;
1278 case VIRTIO_PCI_COMMON_Q_MSIX:
1279 val = virtio_queue_vector(vdev, vdev->queue_sel);
1280 break;
1281 case VIRTIO_PCI_COMMON_Q_ENABLE:
1282 val = proxy->vqs[vdev->queue_sel].enabled;
1283 break;
1284 case VIRTIO_PCI_COMMON_Q_NOFF:
1285 /* Simply map queues in order */
1286 val = vdev->queue_sel;
1287 break;
1288 case VIRTIO_PCI_COMMON_Q_DESCLO:
1289 val = proxy->vqs[vdev->queue_sel].desc[0];
1290 break;
1291 case VIRTIO_PCI_COMMON_Q_DESCHI:
1292 val = proxy->vqs[vdev->queue_sel].desc[1];
1293 break;
1294 case VIRTIO_PCI_COMMON_Q_AVAILLO:
1295 val = proxy->vqs[vdev->queue_sel].avail[0];
1296 break;
1297 case VIRTIO_PCI_COMMON_Q_AVAILHI:
1298 val = proxy->vqs[vdev->queue_sel].avail[1];
1299 break;
1300 case VIRTIO_PCI_COMMON_Q_USEDLO:
1301 val = proxy->vqs[vdev->queue_sel].used[0];
1302 break;
1303 case VIRTIO_PCI_COMMON_Q_USEDHI:
1304 val = proxy->vqs[vdev->queue_sel].used[1];
1305 break;
1306 default:
1307 val = 0;
1310 return val;
1313 static void virtio_pci_common_write(void *opaque, hwaddr addr,
1314 uint64_t val, unsigned size)
1316 VirtIOPCIProxy *proxy = opaque;
1317 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1319 switch (addr) {
1320 case VIRTIO_PCI_COMMON_DFSELECT:
1321 proxy->dfselect = val;
1322 break;
1323 case VIRTIO_PCI_COMMON_GFSELECT:
1324 proxy->gfselect = val;
1325 break;
1326 case VIRTIO_PCI_COMMON_GF:
1327 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) {
1328 proxy->guest_features[proxy->gfselect] = val;
1329 virtio_set_features(vdev,
1330 (((uint64_t)proxy->guest_features[1]) << 32) |
1331 proxy->guest_features[0]);
1333 break;
1334 case VIRTIO_PCI_COMMON_MSIX:
1335 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
1336 /* Make it possible for guest to discover an error took place. */
1337 if (msix_vector_use(&proxy->pci_dev, val) < 0) {
1338 val = VIRTIO_NO_VECTOR;
1340 vdev->config_vector = val;
1341 break;
1342 case VIRTIO_PCI_COMMON_STATUS:
1343 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
1344 virtio_pci_stop_ioeventfd(proxy);
1347 virtio_set_status(vdev, val & 0xFF);
1349 if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
1350 virtio_pci_start_ioeventfd(proxy);
1353 if (vdev->status == 0) {
1354 virtio_pci_reset(DEVICE(proxy));
1357 break;
1358 case VIRTIO_PCI_COMMON_Q_SELECT:
1359 if (val < VIRTIO_QUEUE_MAX) {
1360 vdev->queue_sel = val;
1362 break;
1363 case VIRTIO_PCI_COMMON_Q_SIZE:
1364 proxy->vqs[vdev->queue_sel].num = val;
1365 break;
1366 case VIRTIO_PCI_COMMON_Q_MSIX:
1367 msix_vector_unuse(&proxy->pci_dev,
1368 virtio_queue_vector(vdev, vdev->queue_sel));
1369 /* Make it possible for guest to discover an error took place. */
1370 if (msix_vector_use(&proxy->pci_dev, val) < 0) {
1371 val = VIRTIO_NO_VECTOR;
1373 virtio_queue_set_vector(vdev, vdev->queue_sel, val);
1374 break;
1375 case VIRTIO_PCI_COMMON_Q_ENABLE:
1376 /* TODO: need a way to put num back on reset. */
1377 virtio_queue_set_num(vdev, vdev->queue_sel,
1378 proxy->vqs[vdev->queue_sel].num);
1379 virtio_queue_set_rings(vdev, vdev->queue_sel,
1380 ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 |
1381 proxy->vqs[vdev->queue_sel].desc[0],
1382 ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 |
1383 proxy->vqs[vdev->queue_sel].avail[0],
1384 ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 |
1385 proxy->vqs[vdev->queue_sel].used[0]);
1386 proxy->vqs[vdev->queue_sel].enabled = 1;
1387 break;
1388 case VIRTIO_PCI_COMMON_Q_DESCLO:
1389 proxy->vqs[vdev->queue_sel].desc[0] = val;
1390 break;
1391 case VIRTIO_PCI_COMMON_Q_DESCHI:
1392 proxy->vqs[vdev->queue_sel].desc[1] = val;
1393 break;
1394 case VIRTIO_PCI_COMMON_Q_AVAILLO:
1395 proxy->vqs[vdev->queue_sel].avail[0] = val;
1396 break;
1397 case VIRTIO_PCI_COMMON_Q_AVAILHI:
1398 proxy->vqs[vdev->queue_sel].avail[1] = val;
1399 break;
1400 case VIRTIO_PCI_COMMON_Q_USEDLO:
1401 proxy->vqs[vdev->queue_sel].used[0] = val;
1402 break;
1403 case VIRTIO_PCI_COMMON_Q_USEDHI:
1404 proxy->vqs[vdev->queue_sel].used[1] = val;
1405 break;
1406 default:
1407 break;
1412 static uint64_t virtio_pci_notify_read(void *opaque, hwaddr addr,
1413 unsigned size)
1415 return 0;
1418 static void virtio_pci_notify_write(void *opaque, hwaddr addr,
1419 uint64_t val, unsigned size)
1421 VirtIODevice *vdev = opaque;
1422 unsigned queue = addr / QEMU_VIRTIO_PCI_QUEUE_MEM_MULT;
1424 if (queue < VIRTIO_QUEUE_MAX) {
1425 virtio_queue_notify(vdev, queue);
1429 static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr,
1430 uint64_t val, unsigned size)
1432 VirtIODevice *vdev = opaque;
1433 unsigned queue = val;
1435 if (queue < VIRTIO_QUEUE_MAX) {
1436 virtio_queue_notify(vdev, queue);
1440 static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr,
1441 unsigned size)
1443 VirtIOPCIProxy *proxy = opaque;
1444 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1445 uint64_t val = vdev->isr;
1447 vdev->isr = 0;
1448 pci_irq_deassert(&proxy->pci_dev);
1450 return val;
1453 static void virtio_pci_isr_write(void *opaque, hwaddr addr,
1454 uint64_t val, unsigned size)
1458 static uint64_t virtio_pci_device_read(void *opaque, hwaddr addr,
1459 unsigned size)
1461 VirtIODevice *vdev = opaque;
1462 uint64_t val = 0;
1464 switch (size) {
1465 case 1:
1466 val = virtio_config_modern_readb(vdev, addr);
1467 break;
1468 case 2:
1469 val = virtio_config_modern_readw(vdev, addr);
1470 break;
1471 case 4:
1472 val = virtio_config_modern_readl(vdev, addr);
1473 break;
1475 return val;
1478 static void virtio_pci_device_write(void *opaque, hwaddr addr,
1479 uint64_t val, unsigned size)
1481 VirtIODevice *vdev = opaque;
1482 switch (size) {
1483 case 1:
1484 virtio_config_modern_writeb(vdev, addr, val);
1485 break;
1486 case 2:
1487 virtio_config_modern_writew(vdev, addr, val);
1488 break;
1489 case 4:
1490 virtio_config_modern_writel(vdev, addr, val);
1491 break;
1495 static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy)
1497 static const MemoryRegionOps common_ops = {
1498 .read = virtio_pci_common_read,
1499 .write = virtio_pci_common_write,
1500 .impl = {
1501 .min_access_size = 1,
1502 .max_access_size = 4,
1504 .endianness = DEVICE_LITTLE_ENDIAN,
1506 static const MemoryRegionOps isr_ops = {
1507 .read = virtio_pci_isr_read,
1508 .write = virtio_pci_isr_write,
1509 .impl = {
1510 .min_access_size = 1,
1511 .max_access_size = 4,
1513 .endianness = DEVICE_LITTLE_ENDIAN,
1515 static const MemoryRegionOps device_ops = {
1516 .read = virtio_pci_device_read,
1517 .write = virtio_pci_device_write,
1518 .impl = {
1519 .min_access_size = 1,
1520 .max_access_size = 4,
1522 .endianness = DEVICE_LITTLE_ENDIAN,
1524 static const MemoryRegionOps notify_ops = {
1525 .read = virtio_pci_notify_read,
1526 .write = virtio_pci_notify_write,
1527 .impl = {
1528 .min_access_size = 1,
1529 .max_access_size = 4,
1531 .endianness = DEVICE_LITTLE_ENDIAN,
1533 static const MemoryRegionOps notify_pio_ops = {
1534 .read = virtio_pci_notify_read,
1535 .write = virtio_pci_notify_write_pio,
1536 .impl = {
1537 .min_access_size = 1,
1538 .max_access_size = 4,
1540 .endianness = DEVICE_LITTLE_ENDIAN,
1544 memory_region_init_io(&proxy->common.mr, OBJECT(proxy),
1545 &common_ops,
1546 proxy,
1547 "virtio-pci-common",
1548 proxy->common.size);
1550 memory_region_init_io(&proxy->isr.mr, OBJECT(proxy),
1551 &isr_ops,
1552 proxy,
1553 "virtio-pci-isr",
1554 proxy->isr.size);
1556 memory_region_init_io(&proxy->device.mr, OBJECT(proxy),
1557 &device_ops,
1558 virtio_bus_get_device(&proxy->bus),
1559 "virtio-pci-device",
1560 proxy->device.size);
1562 memory_region_init_io(&proxy->notify.mr, OBJECT(proxy),
1563 &notify_ops,
1564 virtio_bus_get_device(&proxy->bus),
1565 "virtio-pci-notify",
1566 proxy->notify.size);
1568 memory_region_init_io(&proxy->notify_pio.mr, OBJECT(proxy),
1569 &notify_pio_ops,
1570 virtio_bus_get_device(&proxy->bus),
1571 "virtio-pci-notify-pio",
1572 proxy->notify.size);
1575 static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy,
1576 VirtIOPCIRegion *region,
1577 struct virtio_pci_cap *cap,
1578 MemoryRegion *mr,
1579 uint8_t bar)
1581 memory_region_add_subregion(mr, region->offset, &region->mr);
1583 cap->cfg_type = region->type;
1584 cap->bar = bar;
1585 cap->offset = cpu_to_le32(region->offset);
1586 cap->length = cpu_to_le32(region->size);
1587 virtio_pci_add_mem_cap(proxy, cap);
1591 static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy,
1592 VirtIOPCIRegion *region,
1593 struct virtio_pci_cap *cap)
1595 virtio_pci_modern_region_map(proxy, region, cap,
1596 &proxy->modern_bar, proxy->modern_mem_bar);
1599 static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy,
1600 VirtIOPCIRegion *region,
1601 struct virtio_pci_cap *cap)
1603 virtio_pci_modern_region_map(proxy, region, cap,
1604 &proxy->io_bar, proxy->modern_io_bar);
1607 static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy,
1608 VirtIOPCIRegion *region)
1610 memory_region_del_subregion(&proxy->modern_bar,
1611 &region->mr);
1614 static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy,
1615 VirtIOPCIRegion *region)
1617 memory_region_del_subregion(&proxy->io_bar,
1618 &region->mr);
1621 /* This is called by virtio-bus just after the device is plugged. */
1622 static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
1624 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1625 VirtioBusState *bus = &proxy->bus;
1626 bool legacy = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_LEGACY);
1627 bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
1628 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
1629 uint8_t *config;
1630 uint32_t size;
1631 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1633 config = proxy->pci_dev.config;
1634 if (proxy->class_code) {
1635 pci_config_set_class(config, proxy->class_code);
1638 if (legacy) {
1639 /* legacy and transitional */
1640 pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID,
1641 pci_get_word(config + PCI_VENDOR_ID));
1642 pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus));
1643 } else {
1644 /* pure virtio-1.0 */
1645 pci_set_word(config + PCI_VENDOR_ID,
1646 PCI_VENDOR_ID_REDHAT_QUMRANET);
1647 pci_set_word(config + PCI_DEVICE_ID,
1648 0x1040 + virtio_bus_get_vdev_id(bus));
1649 pci_config_set_revision(config, 1);
1651 config[PCI_INTERRUPT_PIN] = 1;
1654 if (modern) {
1655 struct virtio_pci_cap cap = {
1656 .cap_len = sizeof cap,
1658 struct virtio_pci_notify_cap notify = {
1659 .cap.cap_len = sizeof notify,
1660 .notify_off_multiplier =
1661 cpu_to_le32(QEMU_VIRTIO_PCI_QUEUE_MEM_MULT),
1663 struct virtio_pci_cfg_cap cfg = {
1664 .cap.cap_len = sizeof cfg,
1665 .cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG,
1667 struct virtio_pci_notify_cap notify_pio = {
1668 .cap.cap_len = sizeof notify,
1669 .notify_off_multiplier = cpu_to_le32(0x0),
1672 struct virtio_pci_cfg_cap *cfg_mask;
1674 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
1675 virtio_pci_modern_regions_init(proxy);
1677 virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap);
1678 virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap);
1679 virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap);
1680 virtio_pci_modern_mem_region_map(proxy, &proxy->notify, &notify.cap);
1682 if (modern_pio) {
1683 memory_region_init(&proxy->io_bar, OBJECT(proxy),
1684 "virtio-pci-io", 0x4);
1686 pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar,
1687 PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar);
1689 virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio,
1690 &notify_pio.cap);
1693 pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar,
1694 PCI_BASE_ADDRESS_SPACE_MEMORY |
1695 PCI_BASE_ADDRESS_MEM_PREFETCH |
1696 PCI_BASE_ADDRESS_MEM_TYPE_64,
1697 &proxy->modern_bar);
1699 proxy->config_cap = virtio_pci_add_mem_cap(proxy, &cfg.cap);
1700 cfg_mask = (void *)(proxy->pci_dev.wmask + proxy->config_cap);
1701 pci_set_byte(&cfg_mask->cap.bar, ~0x0);
1702 pci_set_long((uint8_t *)&cfg_mask->cap.offset, ~0x0);
1703 pci_set_long((uint8_t *)&cfg_mask->cap.length, ~0x0);
1704 pci_set_long(cfg_mask->pci_cfg_data, ~0x0);
1707 if (proxy->nvectors) {
1708 int err = msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors,
1709 proxy->msix_bar);
1710 if (err) {
1711 /* Notice when a system that supports MSIx can't initialize it. */
1712 if (err != -ENOTSUP) {
1713 error_report("unable to init msix vectors to %" PRIu32,
1714 proxy->nvectors);
1716 proxy->nvectors = 0;
1720 proxy->pci_dev.config_write = virtio_write_config;
1721 proxy->pci_dev.config_read = virtio_read_config;
1723 if (legacy) {
1724 size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev)
1725 + virtio_bus_get_vdev_config_len(bus);
1726 size = pow2ceil(size);
1728 memory_region_init_io(&proxy->bar, OBJECT(proxy),
1729 &virtio_pci_config_ops,
1730 proxy, "virtio-pci", size);
1732 pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar,
1733 PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar);
1736 if (!kvm_has_many_ioeventfds()) {
1737 proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
1740 virtio_add_feature(&vdev->host_features, VIRTIO_F_BAD_FEATURE);
1743 static void virtio_pci_device_unplugged(DeviceState *d)
1745 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1746 bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
1747 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
1749 virtio_pci_stop_ioeventfd(proxy);
1751 if (modern) {
1752 virtio_pci_modern_mem_region_unmap(proxy, &proxy->common);
1753 virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr);
1754 virtio_pci_modern_mem_region_unmap(proxy, &proxy->device);
1755 virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify);
1756 if (modern_pio) {
1757 virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio);
1762 static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
1764 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
1765 VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev);
1768 * virtio pci bar layout used by default.
1769 * subclasses can re-arrange things if needed.
1771 * region 0 -- virtio legacy io bar
1772 * region 1 -- msi-x bar
1773 * region 4+5 -- virtio modern memory (64bit) bar
1776 proxy->legacy_io_bar = 0;
1777 proxy->msix_bar = 1;
1778 proxy->modern_io_bar = 2;
1779 proxy->modern_mem_bar = 4;
1781 proxy->common.offset = 0x0;
1782 proxy->common.size = 0x1000;
1783 proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG;
1785 proxy->isr.offset = 0x1000;
1786 proxy->isr.size = 0x1000;
1787 proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG;
1789 proxy->device.offset = 0x2000;
1790 proxy->device.size = 0x1000;
1791 proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG;
1793 proxy->notify.offset = 0x3000;
1794 proxy->notify.size =
1795 QEMU_VIRTIO_PCI_QUEUE_MEM_MULT * VIRTIO_QUEUE_MAX;
1796 proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
1798 proxy->notify_pio.offset = 0x0;
1799 proxy->notify_pio.size = 0x4;
1800 proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
1802 /* subclasses can enforce modern, so do this unconditionally */
1803 memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci",
1804 2 * QEMU_VIRTIO_PCI_QUEUE_MEM_MULT *
1805 VIRTIO_QUEUE_MAX);
1807 memory_region_init_alias(&proxy->modern_cfg,
1808 OBJECT(proxy),
1809 "virtio-pci-cfg",
1810 &proxy->modern_bar,
1812 memory_region_size(&proxy->modern_bar));
1814 address_space_init(&proxy->modern_as, &proxy->modern_cfg, "virtio-pci-cfg-as");
1816 if (pci_is_express(pci_dev) && pci_bus_is_express(pci_dev->bus) &&
1817 !pci_bus_is_root(pci_dev->bus)) {
1818 int pos;
1820 pos = pcie_endpoint_cap_init(pci_dev, 0);
1821 assert(pos > 0);
1823 pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0, PCI_PM_SIZEOF);
1824 assert(pos > 0);
1827 * Indicates that this function complies with revision 1.2 of the
1828 * PCI Power Management Interface Specification.
1830 pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3);
1831 } else {
1833 * make future invocations of pci_is_express() return false
1834 * and pci_config_size() return PCI_CONFIG_SPACE_SIZE.
1836 pci_dev->cap_present &= ~QEMU_PCI_CAP_EXPRESS;
1839 virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy);
1840 if (k->realize) {
1841 k->realize(proxy, errp);
1845 static void virtio_pci_exit(PCIDevice *pci_dev)
1847 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
1849 msix_uninit_exclusive_bar(pci_dev);
1850 address_space_destroy(&proxy->modern_as);
1853 static void virtio_pci_reset(DeviceState *qdev)
1855 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
1856 VirtioBusState *bus = VIRTIO_BUS(&proxy->bus);
1857 int i;
1859 virtio_pci_stop_ioeventfd(proxy);
1860 virtio_bus_reset(bus);
1861 msix_unuse_all_vectors(&proxy->pci_dev);
1863 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1864 proxy->vqs[i].enabled = 0;
1868 static Property virtio_pci_properties[] = {
1869 DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags,
1870 VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false),
1871 DEFINE_PROP_BIT("disable-legacy", VirtIOPCIProxy, flags,
1872 VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT, false),
1873 DEFINE_PROP_BIT("disable-modern", VirtIOPCIProxy, flags,
1874 VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT, true),
1875 DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags,
1876 VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true),
1877 DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags,
1878 VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false),
1879 DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags,
1880 VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false),
1881 DEFINE_PROP_END_OF_LIST(),
1884 static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp)
1886 VirtioPCIClass *vpciklass = VIRTIO_PCI_GET_CLASS(qdev);
1887 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
1888 PCIDevice *pci_dev = &proxy->pci_dev;
1890 if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) &&
1891 !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN)) {
1892 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
1895 vpciklass->parent_dc_realize(qdev, errp);
1898 static void virtio_pci_class_init(ObjectClass *klass, void *data)
1900 DeviceClass *dc = DEVICE_CLASS(klass);
1901 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1902 VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass);
1904 dc->props = virtio_pci_properties;
1905 k->realize = virtio_pci_realize;
1906 k->exit = virtio_pci_exit;
1907 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1908 k->revision = VIRTIO_PCI_ABI_VERSION;
1909 k->class_id = PCI_CLASS_OTHERS;
1910 vpciklass->parent_dc_realize = dc->realize;
1911 dc->realize = virtio_pci_dc_realize;
1912 dc->reset = virtio_pci_reset;
1915 static const TypeInfo virtio_pci_info = {
1916 .name = TYPE_VIRTIO_PCI,
1917 .parent = TYPE_PCI_DEVICE,
1918 .instance_size = sizeof(VirtIOPCIProxy),
1919 .class_init = virtio_pci_class_init,
1920 .class_size = sizeof(VirtioPCIClass),
1921 .abstract = true,
1924 /* virtio-blk-pci */
1926 static Property virtio_blk_pci_properties[] = {
1927 DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
1928 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
1929 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1930 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
1931 DEFINE_PROP_END_OF_LIST(),
1934 static void virtio_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
1936 VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(vpci_dev);
1937 DeviceState *vdev = DEVICE(&dev->vdev);
1939 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1940 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
1943 static void virtio_blk_pci_class_init(ObjectClass *klass, void *data)
1945 DeviceClass *dc = DEVICE_CLASS(klass);
1946 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1947 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
1949 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1950 dc->props = virtio_blk_pci_properties;
1951 k->realize = virtio_blk_pci_realize;
1952 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1953 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BLOCK;
1954 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
1955 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
1958 static void virtio_blk_pci_instance_init(Object *obj)
1960 VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(obj);
1962 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
1963 TYPE_VIRTIO_BLK);
1964 object_property_add_alias(obj, "iothread", OBJECT(&dev->vdev),"iothread",
1965 &error_abort);
1966 object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
1967 "bootindex", &error_abort);
1970 static const TypeInfo virtio_blk_pci_info = {
1971 .name = TYPE_VIRTIO_BLK_PCI,
1972 .parent = TYPE_VIRTIO_PCI,
1973 .instance_size = sizeof(VirtIOBlkPCI),
1974 .instance_init = virtio_blk_pci_instance_init,
1975 .class_init = virtio_blk_pci_class_init,
1978 /* virtio-scsi-pci */
1980 static Property virtio_scsi_pci_properties[] = {
1981 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
1982 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1983 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
1984 DEV_NVECTORS_UNSPECIFIED),
1985 DEFINE_PROP_END_OF_LIST(),
1988 static void virtio_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
1990 VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(vpci_dev);
1991 DeviceState *vdev = DEVICE(&dev->vdev);
1992 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
1993 DeviceState *proxy = DEVICE(vpci_dev);
1994 char *bus_name;
1996 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
1997 vpci_dev->nvectors = vs->conf.num_queues + 3;
2001 * For command line compatibility, this sets the virtio-scsi-device bus
2002 * name as before.
2004 if (proxy->id) {
2005 bus_name = g_strdup_printf("%s.0", proxy->id);
2006 virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name);
2007 g_free(bus_name);
2010 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2011 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2014 static void virtio_scsi_pci_class_init(ObjectClass *klass, void *data)
2016 DeviceClass *dc = DEVICE_CLASS(klass);
2017 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2018 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2020 k->realize = virtio_scsi_pci_realize;
2021 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
2022 dc->props = virtio_scsi_pci_properties;
2023 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2024 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
2025 pcidev_k->revision = 0x00;
2026 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
2029 static void virtio_scsi_pci_instance_init(Object *obj)
2031 VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(obj);
2033 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2034 TYPE_VIRTIO_SCSI);
2035 object_property_add_alias(obj, "iothread", OBJECT(&dev->vdev), "iothread",
2036 &error_abort);
2039 static const TypeInfo virtio_scsi_pci_info = {
2040 .name = TYPE_VIRTIO_SCSI_PCI,
2041 .parent = TYPE_VIRTIO_PCI,
2042 .instance_size = sizeof(VirtIOSCSIPCI),
2043 .instance_init = virtio_scsi_pci_instance_init,
2044 .class_init = virtio_scsi_pci_class_init,
2047 /* vhost-scsi-pci */
2049 #ifdef CONFIG_VHOST_SCSI
2050 static Property vhost_scsi_pci_properties[] = {
2051 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
2052 DEV_NVECTORS_UNSPECIFIED),
2053 DEFINE_PROP_END_OF_LIST(),
2056 static void vhost_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2058 VHostSCSIPCI *dev = VHOST_SCSI_PCI(vpci_dev);
2059 DeviceState *vdev = DEVICE(&dev->vdev);
2060 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
2062 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
2063 vpci_dev->nvectors = vs->conf.num_queues + 3;
2066 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2067 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2070 static void vhost_scsi_pci_class_init(ObjectClass *klass, void *data)
2072 DeviceClass *dc = DEVICE_CLASS(klass);
2073 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2074 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2075 k->realize = vhost_scsi_pci_realize;
2076 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
2077 dc->props = vhost_scsi_pci_properties;
2078 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2079 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
2080 pcidev_k->revision = 0x00;
2081 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
2084 static void vhost_scsi_pci_instance_init(Object *obj)
2086 VHostSCSIPCI *dev = VHOST_SCSI_PCI(obj);
2088 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2089 TYPE_VHOST_SCSI);
2090 object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
2091 "bootindex", &error_abort);
2094 static const TypeInfo vhost_scsi_pci_info = {
2095 .name = TYPE_VHOST_SCSI_PCI,
2096 .parent = TYPE_VIRTIO_PCI,
2097 .instance_size = sizeof(VHostSCSIPCI),
2098 .instance_init = vhost_scsi_pci_instance_init,
2099 .class_init = vhost_scsi_pci_class_init,
2101 #endif
2103 /* virtio-balloon-pci */
2105 static Property virtio_balloon_pci_properties[] = {
2106 DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
2107 DEFINE_PROP_END_OF_LIST(),
2110 static void virtio_balloon_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2112 VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(vpci_dev);
2113 DeviceState *vdev = DEVICE(&dev->vdev);
2115 if (vpci_dev->class_code != PCI_CLASS_OTHERS &&
2116 vpci_dev->class_code != PCI_CLASS_MEMORY_RAM) { /* qemu < 1.1 */
2117 vpci_dev->class_code = PCI_CLASS_OTHERS;
2120 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2121 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2124 static void virtio_balloon_pci_class_init(ObjectClass *klass, void *data)
2126 DeviceClass *dc = DEVICE_CLASS(klass);
2127 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2128 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2129 k->realize = virtio_balloon_pci_realize;
2130 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
2131 dc->props = virtio_balloon_pci_properties;
2132 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2133 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BALLOON;
2134 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
2135 pcidev_k->class_id = PCI_CLASS_OTHERS;
2138 static void virtio_balloon_pci_instance_init(Object *obj)
2140 VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(obj);
2142 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2143 TYPE_VIRTIO_BALLOON);
2144 object_property_add_alias(obj, "guest-stats", OBJECT(&dev->vdev),
2145 "guest-stats", &error_abort);
2146 object_property_add_alias(obj, "guest-stats-polling-interval",
2147 OBJECT(&dev->vdev),
2148 "guest-stats-polling-interval", &error_abort);
2151 static const TypeInfo virtio_balloon_pci_info = {
2152 .name = TYPE_VIRTIO_BALLOON_PCI,
2153 .parent = TYPE_VIRTIO_PCI,
2154 .instance_size = sizeof(VirtIOBalloonPCI),
2155 .instance_init = virtio_balloon_pci_instance_init,
2156 .class_init = virtio_balloon_pci_class_init,
2159 /* virtio-serial-pci */
2161 static void virtio_serial_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2163 VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(vpci_dev);
2164 DeviceState *vdev = DEVICE(&dev->vdev);
2165 DeviceState *proxy = DEVICE(vpci_dev);
2166 char *bus_name;
2168 if (vpci_dev->class_code != PCI_CLASS_COMMUNICATION_OTHER &&
2169 vpci_dev->class_code != PCI_CLASS_DISPLAY_OTHER && /* qemu 0.10 */
2170 vpci_dev->class_code != PCI_CLASS_OTHERS) { /* qemu-kvm */
2171 vpci_dev->class_code = PCI_CLASS_COMMUNICATION_OTHER;
2174 /* backwards-compatibility with machines that were created with
2175 DEV_NVECTORS_UNSPECIFIED */
2176 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
2177 vpci_dev->nvectors = dev->vdev.serial.max_virtserial_ports + 1;
2181 * For command line compatibility, this sets the virtio-serial-device bus
2182 * name as before.
2184 if (proxy->id) {
2185 bus_name = g_strdup_printf("%s.0", proxy->id);
2186 virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name);
2187 g_free(bus_name);
2190 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2191 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2194 static Property virtio_serial_pci_properties[] = {
2195 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
2196 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
2197 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
2198 DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
2199 DEFINE_PROP_END_OF_LIST(),
2202 static void virtio_serial_pci_class_init(ObjectClass *klass, void *data)
2204 DeviceClass *dc = DEVICE_CLASS(klass);
2205 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2206 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2207 k->realize = virtio_serial_pci_realize;
2208 set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
2209 dc->props = virtio_serial_pci_properties;
2210 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2211 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_CONSOLE;
2212 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
2213 pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER;
2216 static void virtio_serial_pci_instance_init(Object *obj)
2218 VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(obj);
2220 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2221 TYPE_VIRTIO_SERIAL);
2224 static const TypeInfo virtio_serial_pci_info = {
2225 .name = TYPE_VIRTIO_SERIAL_PCI,
2226 .parent = TYPE_VIRTIO_PCI,
2227 .instance_size = sizeof(VirtIOSerialPCI),
2228 .instance_init = virtio_serial_pci_instance_init,
2229 .class_init = virtio_serial_pci_class_init,
2232 /* virtio-net-pci */
2234 static Property virtio_net_properties[] = {
2235 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
2236 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false),
2237 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
2238 DEFINE_PROP_END_OF_LIST(),
2241 static void virtio_net_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2243 DeviceState *qdev = DEVICE(vpci_dev);
2244 VirtIONetPCI *dev = VIRTIO_NET_PCI(vpci_dev);
2245 DeviceState *vdev = DEVICE(&dev->vdev);
2247 virtio_net_set_netclient_name(&dev->vdev, qdev->id,
2248 object_get_typename(OBJECT(qdev)));
2249 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2250 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2253 static void virtio_net_pci_class_init(ObjectClass *klass, void *data)
2255 DeviceClass *dc = DEVICE_CLASS(klass);
2256 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
2257 VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass);
2259 k->romfile = "efi-virtio.rom";
2260 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2261 k->device_id = PCI_DEVICE_ID_VIRTIO_NET;
2262 k->revision = VIRTIO_PCI_ABI_VERSION;
2263 k->class_id = PCI_CLASS_NETWORK_ETHERNET;
2264 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
2265 dc->props = virtio_net_properties;
2266 vpciklass->realize = virtio_net_pci_realize;
2269 static void virtio_net_pci_instance_init(Object *obj)
2271 VirtIONetPCI *dev = VIRTIO_NET_PCI(obj);
2273 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2274 TYPE_VIRTIO_NET);
2275 object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
2276 "bootindex", &error_abort);
2279 static const TypeInfo virtio_net_pci_info = {
2280 .name = TYPE_VIRTIO_NET_PCI,
2281 .parent = TYPE_VIRTIO_PCI,
2282 .instance_size = sizeof(VirtIONetPCI),
2283 .instance_init = virtio_net_pci_instance_init,
2284 .class_init = virtio_net_pci_class_init,
2287 /* virtio-rng-pci */
2289 static void virtio_rng_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2291 VirtIORngPCI *vrng = VIRTIO_RNG_PCI(vpci_dev);
2292 DeviceState *vdev = DEVICE(&vrng->vdev);
2293 Error *err = NULL;
2295 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2296 object_property_set_bool(OBJECT(vdev), true, "realized", &err);
2297 if (err) {
2298 error_propagate(errp, err);
2299 return;
2302 object_property_set_link(OBJECT(vrng),
2303 OBJECT(vrng->vdev.conf.rng), "rng",
2304 NULL);
2307 static void virtio_rng_pci_class_init(ObjectClass *klass, void *data)
2309 DeviceClass *dc = DEVICE_CLASS(klass);
2310 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2311 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2313 k->realize = virtio_rng_pci_realize;
2314 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
2316 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2317 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_RNG;
2318 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
2319 pcidev_k->class_id = PCI_CLASS_OTHERS;
2322 static void virtio_rng_initfn(Object *obj)
2324 VirtIORngPCI *dev = VIRTIO_RNG_PCI(obj);
2326 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2327 TYPE_VIRTIO_RNG);
2328 object_property_add_alias(obj, "rng", OBJECT(&dev->vdev), "rng",
2329 &error_abort);
2332 static const TypeInfo virtio_rng_pci_info = {
2333 .name = TYPE_VIRTIO_RNG_PCI,
2334 .parent = TYPE_VIRTIO_PCI,
2335 .instance_size = sizeof(VirtIORngPCI),
2336 .instance_init = virtio_rng_initfn,
2337 .class_init = virtio_rng_pci_class_init,
2340 /* virtio-input-pci */
2342 static Property virtio_input_pci_properties[] = {
2343 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
2344 DEFINE_PROP_END_OF_LIST(),
2347 static void virtio_input_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2349 VirtIOInputPCI *vinput = VIRTIO_INPUT_PCI(vpci_dev);
2350 DeviceState *vdev = DEVICE(&vinput->vdev);
2352 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2353 /* force virtio-1.0 */
2354 vpci_dev->flags &= ~VIRTIO_PCI_FLAG_DISABLE_MODERN;
2355 vpci_dev->flags |= VIRTIO_PCI_FLAG_DISABLE_LEGACY;
2356 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2359 static void virtio_input_pci_class_init(ObjectClass *klass, void *data)
2361 DeviceClass *dc = DEVICE_CLASS(klass);
2362 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2363 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2365 dc->props = virtio_input_pci_properties;
2366 k->realize = virtio_input_pci_realize;
2367 set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
2369 pcidev_k->class_id = PCI_CLASS_INPUT_OTHER;
2372 static void virtio_input_hid_kbd_pci_class_init(ObjectClass *klass, void *data)
2374 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2376 pcidev_k->class_id = PCI_CLASS_INPUT_KEYBOARD;
2379 static void virtio_input_hid_mouse_pci_class_init(ObjectClass *klass,
2380 void *data)
2382 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2384 pcidev_k->class_id = PCI_CLASS_INPUT_MOUSE;
2387 static void virtio_keyboard_initfn(Object *obj)
2389 VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
2391 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2392 TYPE_VIRTIO_KEYBOARD);
2395 static void virtio_mouse_initfn(Object *obj)
2397 VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
2399 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2400 TYPE_VIRTIO_MOUSE);
2403 static void virtio_tablet_initfn(Object *obj)
2405 VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
2407 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2408 TYPE_VIRTIO_TABLET);
2411 static const TypeInfo virtio_input_pci_info = {
2412 .name = TYPE_VIRTIO_INPUT_PCI,
2413 .parent = TYPE_VIRTIO_PCI,
2414 .instance_size = sizeof(VirtIOInputPCI),
2415 .class_init = virtio_input_pci_class_init,
2416 .abstract = true,
2419 static const TypeInfo virtio_input_hid_pci_info = {
2420 .name = TYPE_VIRTIO_INPUT_HID_PCI,
2421 .parent = TYPE_VIRTIO_INPUT_PCI,
2422 .instance_size = sizeof(VirtIOInputHIDPCI),
2423 .abstract = true,
2426 static const TypeInfo virtio_keyboard_pci_info = {
2427 .name = TYPE_VIRTIO_KEYBOARD_PCI,
2428 .parent = TYPE_VIRTIO_INPUT_HID_PCI,
2429 .class_init = virtio_input_hid_kbd_pci_class_init,
2430 .instance_size = sizeof(VirtIOInputHIDPCI),
2431 .instance_init = virtio_keyboard_initfn,
2434 static const TypeInfo virtio_mouse_pci_info = {
2435 .name = TYPE_VIRTIO_MOUSE_PCI,
2436 .parent = TYPE_VIRTIO_INPUT_HID_PCI,
2437 .class_init = virtio_input_hid_mouse_pci_class_init,
2438 .instance_size = sizeof(VirtIOInputHIDPCI),
2439 .instance_init = virtio_mouse_initfn,
2442 static const TypeInfo virtio_tablet_pci_info = {
2443 .name = TYPE_VIRTIO_TABLET_PCI,
2444 .parent = TYPE_VIRTIO_INPUT_HID_PCI,
2445 .instance_size = sizeof(VirtIOInputHIDPCI),
2446 .instance_init = virtio_tablet_initfn,
2449 #ifdef CONFIG_LINUX
2450 static void virtio_host_initfn(Object *obj)
2452 VirtIOInputHostPCI *dev = VIRTIO_INPUT_HOST_PCI(obj);
2454 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2455 TYPE_VIRTIO_INPUT_HOST);
2458 static const TypeInfo virtio_host_pci_info = {
2459 .name = TYPE_VIRTIO_INPUT_HOST_PCI,
2460 .parent = TYPE_VIRTIO_INPUT_PCI,
2461 .instance_size = sizeof(VirtIOInputHostPCI),
2462 .instance_init = virtio_host_initfn,
2464 #endif
2466 /* virtio-pci-bus */
2468 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
2469 VirtIOPCIProxy *dev)
2471 DeviceState *qdev = DEVICE(dev);
2472 char virtio_bus_name[] = "virtio-bus";
2474 qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev,
2475 virtio_bus_name);
2478 static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
2480 BusClass *bus_class = BUS_CLASS(klass);
2481 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
2482 bus_class->max_dev = 1;
2483 k->notify = virtio_pci_notify;
2484 k->save_config = virtio_pci_save_config;
2485 k->load_config = virtio_pci_load_config;
2486 k->save_queue = virtio_pci_save_queue;
2487 k->load_queue = virtio_pci_load_queue;
2488 k->save_extra_state = virtio_pci_save_extra_state;
2489 k->load_extra_state = virtio_pci_load_extra_state;
2490 k->has_extra_state = virtio_pci_has_extra_state;
2491 k->query_guest_notifiers = virtio_pci_query_guest_notifiers;
2492 k->set_host_notifier = virtio_pci_set_host_notifier;
2493 k->set_guest_notifiers = virtio_pci_set_guest_notifiers;
2494 k->vmstate_change = virtio_pci_vmstate_change;
2495 k->device_plugged = virtio_pci_device_plugged;
2496 k->device_unplugged = virtio_pci_device_unplugged;
2497 k->query_nvectors = virtio_pci_query_nvectors;
2500 static const TypeInfo virtio_pci_bus_info = {
2501 .name = TYPE_VIRTIO_PCI_BUS,
2502 .parent = TYPE_VIRTIO_BUS,
2503 .instance_size = sizeof(VirtioPCIBusState),
2504 .class_init = virtio_pci_bus_class_init,
2507 static void virtio_pci_register_types(void)
2509 type_register_static(&virtio_rng_pci_info);
2510 type_register_static(&virtio_input_pci_info);
2511 type_register_static(&virtio_input_hid_pci_info);
2512 type_register_static(&virtio_keyboard_pci_info);
2513 type_register_static(&virtio_mouse_pci_info);
2514 type_register_static(&virtio_tablet_pci_info);
2515 #ifdef CONFIG_LINUX
2516 type_register_static(&virtio_host_pci_info);
2517 #endif
2518 type_register_static(&virtio_pci_bus_info);
2519 type_register_static(&virtio_pci_info);
2520 #ifdef CONFIG_VIRTFS
2521 type_register_static(&virtio_9p_pci_info);
2522 #endif
2523 type_register_static(&virtio_blk_pci_info);
2524 type_register_static(&virtio_scsi_pci_info);
2525 type_register_static(&virtio_balloon_pci_info);
2526 type_register_static(&virtio_serial_pci_info);
2527 type_register_static(&virtio_net_pci_info);
2528 #ifdef CONFIG_VHOST_SCSI
2529 type_register_static(&vhost_scsi_pci_info);
2530 #endif
2533 type_init(virtio_pci_register_types)