virtio: introduce virtqueue_alloc_element
[qemu.git] / hw / virtio / virtio-pci.c
blob5494ff4a4974d72e2b94c156e50050f768c050f9
1 /*
2 * Virtio PCI Bindings
4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2009 CodeSourcery
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paul Brook <paul@codesourcery.com>
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
18 #include "qemu/osdep.h"
20 #include "standard-headers/linux/virtio_pci.h"
21 #include "hw/virtio/virtio.h"
22 #include "hw/virtio/virtio-blk.h"
23 #include "hw/virtio/virtio-net.h"
24 #include "hw/virtio/virtio-serial.h"
25 #include "hw/virtio/virtio-scsi.h"
26 #include "hw/virtio/virtio-balloon.h"
27 #include "hw/virtio/virtio-input.h"
28 #include "hw/pci/pci.h"
29 #include "qemu/error-report.h"
30 #include "hw/pci/msi.h"
31 #include "hw/pci/msix.h"
32 #include "hw/loader.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/block-backend.h"
35 #include "virtio-pci.h"
36 #include "qemu/range.h"
37 #include "hw/virtio/virtio-bus.h"
38 #include "qapi/visitor.h"
40 #define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev))
42 #undef VIRTIO_PCI_CONFIG
44 /* The remaining space is defined by each driver as the per-driver
45 * configuration space */
46 #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
48 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
49 VirtIOPCIProxy *dev);
51 /* virtio device */
52 /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
53 static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d)
55 return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
58 /* DeviceState to VirtIOPCIProxy. Note: used on datapath,
59 * be careful and test performance if you change this.
61 static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d)
63 return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
66 static void virtio_pci_notify(DeviceState *d, uint16_t vector)
68 VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d);
70 if (msix_enabled(&proxy->pci_dev))
71 msix_notify(&proxy->pci_dev, vector);
72 else {
73 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
74 pci_set_irq(&proxy->pci_dev, vdev->isr & 1);
78 static void virtio_pci_save_config(DeviceState *d, QEMUFile *f)
80 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
81 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
83 pci_device_save(&proxy->pci_dev, f);
84 msix_save(&proxy->pci_dev, f);
85 if (msix_present(&proxy->pci_dev))
86 qemu_put_be16(f, vdev->config_vector);
89 static void virtio_pci_load_modern_queue_state(VirtIOPCIQueue *vq,
90 QEMUFile *f)
92 vq->num = qemu_get_be16(f);
93 vq->enabled = qemu_get_be16(f);
94 vq->desc[0] = qemu_get_be32(f);
95 vq->desc[1] = qemu_get_be32(f);
96 vq->avail[0] = qemu_get_be32(f);
97 vq->avail[1] = qemu_get_be32(f);
98 vq->used[0] = qemu_get_be32(f);
99 vq->used[1] = qemu_get_be32(f);
102 static bool virtio_pci_has_extra_state(DeviceState *d)
104 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
106 return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA;
109 static int get_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size)
111 VirtIOPCIProxy *proxy = pv;
112 int i;
114 proxy->dfselect = qemu_get_be32(f);
115 proxy->gfselect = qemu_get_be32(f);
116 proxy->guest_features[0] = qemu_get_be32(f);
117 proxy->guest_features[1] = qemu_get_be32(f);
118 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
119 virtio_pci_load_modern_queue_state(&proxy->vqs[i], f);
122 return 0;
125 static void virtio_pci_save_modern_queue_state(VirtIOPCIQueue *vq,
126 QEMUFile *f)
128 qemu_put_be16(f, vq->num);
129 qemu_put_be16(f, vq->enabled);
130 qemu_put_be32(f, vq->desc[0]);
131 qemu_put_be32(f, vq->desc[1]);
132 qemu_put_be32(f, vq->avail[0]);
133 qemu_put_be32(f, vq->avail[1]);
134 qemu_put_be32(f, vq->used[0]);
135 qemu_put_be32(f, vq->used[1]);
138 static void put_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size)
140 VirtIOPCIProxy *proxy = pv;
141 int i;
143 qemu_put_be32(f, proxy->dfselect);
144 qemu_put_be32(f, proxy->gfselect);
145 qemu_put_be32(f, proxy->guest_features[0]);
146 qemu_put_be32(f, proxy->guest_features[1]);
147 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
148 virtio_pci_save_modern_queue_state(&proxy->vqs[i], f);
152 static const VMStateInfo vmstate_info_virtio_pci_modern_state = {
153 .name = "virtqueue_state",
154 .get = get_virtio_pci_modern_state,
155 .put = put_virtio_pci_modern_state,
158 static bool virtio_pci_modern_state_needed(void *opaque)
160 VirtIOPCIProxy *proxy = opaque;
162 return !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
165 static const VMStateDescription vmstate_virtio_pci_modern_state = {
166 .name = "virtio_pci/modern_state",
167 .version_id = 1,
168 .minimum_version_id = 1,
169 .needed = &virtio_pci_modern_state_needed,
170 .fields = (VMStateField[]) {
172 .name = "modern_state",
173 .version_id = 0,
174 .field_exists = NULL,
175 .size = 0,
176 .info = &vmstate_info_virtio_pci_modern_state,
177 .flags = VMS_SINGLE,
178 .offset = 0,
180 VMSTATE_END_OF_LIST()
184 static const VMStateDescription vmstate_virtio_pci = {
185 .name = "virtio_pci",
186 .version_id = 1,
187 .minimum_version_id = 1,
188 .minimum_version_id_old = 1,
189 .fields = (VMStateField[]) {
190 VMSTATE_END_OF_LIST()
192 .subsections = (const VMStateDescription*[]) {
193 &vmstate_virtio_pci_modern_state,
194 NULL
198 static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f)
200 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
202 vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL);
205 static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f)
207 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
209 return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1);
212 static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f)
214 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
215 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
217 if (msix_present(&proxy->pci_dev))
218 qemu_put_be16(f, virtio_queue_vector(vdev, n));
221 static int virtio_pci_load_config(DeviceState *d, QEMUFile *f)
223 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
224 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
226 int ret;
227 ret = pci_device_load(&proxy->pci_dev, f);
228 if (ret) {
229 return ret;
231 msix_unuse_all_vectors(&proxy->pci_dev);
232 msix_load(&proxy->pci_dev, f);
233 if (msix_present(&proxy->pci_dev)) {
234 qemu_get_be16s(f, &vdev->config_vector);
235 } else {
236 vdev->config_vector = VIRTIO_NO_VECTOR;
238 if (vdev->config_vector != VIRTIO_NO_VECTOR) {
239 return msix_vector_use(&proxy->pci_dev, vdev->config_vector);
241 return 0;
244 static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
246 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
247 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
249 uint16_t vector;
250 if (msix_present(&proxy->pci_dev)) {
251 qemu_get_be16s(f, &vector);
252 } else {
253 vector = VIRTIO_NO_VECTOR;
255 virtio_queue_set_vector(vdev, n, vector);
256 if (vector != VIRTIO_NO_VECTOR) {
257 return msix_vector_use(&proxy->pci_dev, vector);
260 return 0;
263 #define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000
265 static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
266 int n, bool assign, bool set_handler)
268 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
269 VirtQueue *vq = virtio_get_queue(vdev, n);
270 EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
271 bool legacy = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_LEGACY);
272 bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
273 bool fast_mmio = kvm_ioeventfd_any_length_enabled();
274 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
275 MemoryRegion *modern_mr = &proxy->notify.mr;
276 MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr;
277 MemoryRegion *legacy_mr = &proxy->bar;
278 hwaddr modern_addr = QEMU_VIRTIO_PCI_QUEUE_MEM_MULT *
279 virtio_get_queue_index(vq);
280 hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY;
281 int r = 0;
283 if (assign) {
284 r = event_notifier_init(notifier, 1);
285 if (r < 0) {
286 error_report("%s: unable to init event notifier: %d",
287 __func__, r);
288 return r;
290 virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
291 if (modern) {
292 if (fast_mmio) {
293 memory_region_add_eventfd(modern_mr, modern_addr, 0,
294 false, n, notifier);
295 } else {
296 memory_region_add_eventfd(modern_mr, modern_addr, 2,
297 false, n, notifier);
299 if (modern_pio) {
300 memory_region_add_eventfd(modern_notify_mr, 0, 2,
301 true, n, notifier);
304 if (legacy) {
305 memory_region_add_eventfd(legacy_mr, legacy_addr, 2,
306 true, n, notifier);
308 } else {
309 if (modern) {
310 if (fast_mmio) {
311 memory_region_del_eventfd(modern_mr, modern_addr, 0,
312 false, n, notifier);
313 } else {
314 memory_region_del_eventfd(modern_mr, modern_addr, 2,
315 false, n, notifier);
317 if (modern_pio) {
318 memory_region_del_eventfd(modern_notify_mr, 0, 2,
319 true, n, notifier);
322 if (legacy) {
323 memory_region_del_eventfd(legacy_mr, legacy_addr, 2,
324 true, n, notifier);
326 virtio_queue_set_host_notifier_fd_handler(vq, false, false);
327 event_notifier_cleanup(notifier);
329 return r;
332 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
334 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
335 int n, r;
337 if (!(proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) ||
338 proxy->ioeventfd_disabled ||
339 proxy->ioeventfd_started) {
340 return;
343 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
344 if (!virtio_queue_get_num(vdev, n)) {
345 continue;
348 r = virtio_pci_set_host_notifier_internal(proxy, n, true, true);
349 if (r < 0) {
350 goto assign_error;
353 proxy->ioeventfd_started = true;
354 return;
356 assign_error:
357 while (--n >= 0) {
358 if (!virtio_queue_get_num(vdev, n)) {
359 continue;
362 r = virtio_pci_set_host_notifier_internal(proxy, n, false, false);
363 assert(r >= 0);
365 proxy->ioeventfd_started = false;
366 error_report("%s: failed. Fallback to a userspace (slower).", __func__);
369 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
371 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
372 int r;
373 int n;
375 if (!proxy->ioeventfd_started) {
376 return;
379 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
380 if (!virtio_queue_get_num(vdev, n)) {
381 continue;
384 r = virtio_pci_set_host_notifier_internal(proxy, n, false, false);
385 assert(r >= 0);
387 proxy->ioeventfd_started = false;
390 static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
392 VirtIOPCIProxy *proxy = opaque;
393 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
394 hwaddr pa;
396 switch (addr) {
397 case VIRTIO_PCI_GUEST_FEATURES:
398 /* Guest does not negotiate properly? We have to assume nothing. */
399 if (val & (1 << VIRTIO_F_BAD_FEATURE)) {
400 val = virtio_bus_get_vdev_bad_features(&proxy->bus);
402 virtio_set_features(vdev, val);
403 break;
404 case VIRTIO_PCI_QUEUE_PFN:
405 pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
406 if (pa == 0) {
407 virtio_pci_stop_ioeventfd(proxy);
408 virtio_reset(vdev);
409 msix_unuse_all_vectors(&proxy->pci_dev);
411 else
412 virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
413 break;
414 case VIRTIO_PCI_QUEUE_SEL:
415 if (val < VIRTIO_QUEUE_MAX)
416 vdev->queue_sel = val;
417 break;
418 case VIRTIO_PCI_QUEUE_NOTIFY:
419 if (val < VIRTIO_QUEUE_MAX) {
420 virtio_queue_notify(vdev, val);
422 break;
423 case VIRTIO_PCI_STATUS:
424 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
425 virtio_pci_stop_ioeventfd(proxy);
428 virtio_set_status(vdev, val & 0xFF);
430 if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
431 virtio_pci_start_ioeventfd(proxy);
434 if (vdev->status == 0) {
435 virtio_reset(vdev);
436 msix_unuse_all_vectors(&proxy->pci_dev);
439 /* Linux before 2.6.34 drives the device without enabling
440 the PCI device bus master bit. Enable it automatically
441 for the guest. This is a PCI spec violation but so is
442 initiating DMA with bus master bit clear. */
443 if (val == (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER)) {
444 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND,
445 proxy->pci_dev.config[PCI_COMMAND] |
446 PCI_COMMAND_MASTER, 1);
448 break;
449 case VIRTIO_MSI_CONFIG_VECTOR:
450 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
451 /* Make it possible for guest to discover an error took place. */
452 if (msix_vector_use(&proxy->pci_dev, val) < 0)
453 val = VIRTIO_NO_VECTOR;
454 vdev->config_vector = val;
455 break;
456 case VIRTIO_MSI_QUEUE_VECTOR:
457 msix_vector_unuse(&proxy->pci_dev,
458 virtio_queue_vector(vdev, vdev->queue_sel));
459 /* Make it possible for guest to discover an error took place. */
460 if (msix_vector_use(&proxy->pci_dev, val) < 0)
461 val = VIRTIO_NO_VECTOR;
462 virtio_queue_set_vector(vdev, vdev->queue_sel, val);
463 break;
464 default:
465 error_report("%s: unexpected address 0x%x value 0x%x",
466 __func__, addr, val);
467 break;
471 static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
473 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
474 uint32_t ret = 0xFFFFFFFF;
476 switch (addr) {
477 case VIRTIO_PCI_HOST_FEATURES:
478 ret = vdev->host_features;
479 break;
480 case VIRTIO_PCI_GUEST_FEATURES:
481 ret = vdev->guest_features;
482 break;
483 case VIRTIO_PCI_QUEUE_PFN:
484 ret = virtio_queue_get_addr(vdev, vdev->queue_sel)
485 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
486 break;
487 case VIRTIO_PCI_QUEUE_NUM:
488 ret = virtio_queue_get_num(vdev, vdev->queue_sel);
489 break;
490 case VIRTIO_PCI_QUEUE_SEL:
491 ret = vdev->queue_sel;
492 break;
493 case VIRTIO_PCI_STATUS:
494 ret = vdev->status;
495 break;
496 case VIRTIO_PCI_ISR:
497 /* reading from the ISR also clears it. */
498 ret = vdev->isr;
499 vdev->isr = 0;
500 pci_irq_deassert(&proxy->pci_dev);
501 break;
502 case VIRTIO_MSI_CONFIG_VECTOR:
503 ret = vdev->config_vector;
504 break;
505 case VIRTIO_MSI_QUEUE_VECTOR:
506 ret = virtio_queue_vector(vdev, vdev->queue_sel);
507 break;
508 default:
509 break;
512 return ret;
515 static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr,
516 unsigned size)
518 VirtIOPCIProxy *proxy = opaque;
519 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
520 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev);
521 uint64_t val = 0;
522 if (addr < config) {
523 return virtio_ioport_read(proxy, addr);
525 addr -= config;
527 switch (size) {
528 case 1:
529 val = virtio_config_readb(vdev, addr);
530 break;
531 case 2:
532 val = virtio_config_readw(vdev, addr);
533 if (virtio_is_big_endian(vdev)) {
534 val = bswap16(val);
536 break;
537 case 4:
538 val = virtio_config_readl(vdev, addr);
539 if (virtio_is_big_endian(vdev)) {
540 val = bswap32(val);
542 break;
544 return val;
547 static void virtio_pci_config_write(void *opaque, hwaddr addr,
548 uint64_t val, unsigned size)
550 VirtIOPCIProxy *proxy = opaque;
551 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev);
552 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
553 if (addr < config) {
554 virtio_ioport_write(proxy, addr, val);
555 return;
557 addr -= config;
559 * Virtio-PCI is odd. Ioports are LE but config space is target native
560 * endian.
562 switch (size) {
563 case 1:
564 virtio_config_writeb(vdev, addr, val);
565 break;
566 case 2:
567 if (virtio_is_big_endian(vdev)) {
568 val = bswap16(val);
570 virtio_config_writew(vdev, addr, val);
571 break;
572 case 4:
573 if (virtio_is_big_endian(vdev)) {
574 val = bswap32(val);
576 virtio_config_writel(vdev, addr, val);
577 break;
581 static const MemoryRegionOps virtio_pci_config_ops = {
582 .read = virtio_pci_config_read,
583 .write = virtio_pci_config_write,
584 .impl = {
585 .min_access_size = 1,
586 .max_access_size = 4,
588 .endianness = DEVICE_LITTLE_ENDIAN,
591 /* Below are generic functions to do memcpy from/to an address space,
592 * without byteswaps, with input validation.
594 * As regular address_space_* APIs all do some kind of byteswap at least for
595 * some host/target combinations, we are forced to explicitly convert to a
596 * known-endianness integer value.
597 * It doesn't really matter which endian format to go through, so the code
598 * below selects the endian that causes the least amount of work on the given
599 * host.
601 * Note: host pointer must be aligned.
603 static
604 void virtio_address_space_write(AddressSpace *as, hwaddr addr,
605 const uint8_t *buf, int len)
607 uint32_t val;
609 /* address_space_* APIs assume an aligned address.
610 * As address is under guest control, handle illegal values.
612 addr &= ~(len - 1);
614 /* Make sure caller aligned buf properly */
615 assert(!(((uintptr_t)buf) & (len - 1)));
617 switch (len) {
618 case 1:
619 val = pci_get_byte(buf);
620 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
621 break;
622 case 2:
623 val = pci_get_word(buf);
624 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
625 break;
626 case 4:
627 val = pci_get_long(buf);
628 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
629 break;
630 default:
631 /* As length is under guest control, handle illegal values. */
632 break;
636 static void
637 virtio_address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
639 uint32_t val;
641 /* address_space_* APIs assume an aligned address.
642 * As address is under guest control, handle illegal values.
644 addr &= ~(len - 1);
646 /* Make sure caller aligned buf properly */
647 assert(!(((uintptr_t)buf) & (len - 1)));
649 switch (len) {
650 case 1:
651 val = address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
652 pci_set_byte(buf, val);
653 break;
654 case 2:
655 val = address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
656 pci_set_word(buf, val);
657 break;
658 case 4:
659 val = address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
660 pci_set_long(buf, val);
661 break;
662 default:
663 /* As length is under guest control, handle illegal values. */
664 break;
668 static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
669 uint32_t val, int len)
671 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
672 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
673 struct virtio_pci_cfg_cap *cfg;
675 pci_default_write_config(pci_dev, address, val, len);
677 if (range_covers_byte(address, len, PCI_COMMAND) &&
678 !(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
679 virtio_pci_stop_ioeventfd(proxy);
680 virtio_set_status(vdev, vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK);
683 if (proxy->config_cap &&
684 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
685 pci_cfg_data),
686 sizeof cfg->pci_cfg_data)) {
687 uint32_t off;
688 uint32_t len;
690 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
691 off = le32_to_cpu(cfg->cap.offset);
692 len = le32_to_cpu(cfg->cap.length);
694 if (len == 1 || len == 2 || len == 4) {
695 assert(len <= sizeof cfg->pci_cfg_data);
696 virtio_address_space_write(&proxy->modern_as, off,
697 cfg->pci_cfg_data, len);
702 static uint32_t virtio_read_config(PCIDevice *pci_dev,
703 uint32_t address, int len)
705 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
706 struct virtio_pci_cfg_cap *cfg;
708 if (proxy->config_cap &&
709 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
710 pci_cfg_data),
711 sizeof cfg->pci_cfg_data)) {
712 uint32_t off;
713 uint32_t len;
715 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
716 off = le32_to_cpu(cfg->cap.offset);
717 len = le32_to_cpu(cfg->cap.length);
719 if (len == 1 || len == 2 || len == 4) {
720 assert(len <= sizeof cfg->pci_cfg_data);
721 virtio_address_space_read(&proxy->modern_as, off,
722 cfg->pci_cfg_data, len);
726 return pci_default_read_config(pci_dev, address, len);
729 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
730 unsigned int queue_no,
731 unsigned int vector,
732 MSIMessage msg)
734 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
735 int ret;
737 if (irqfd->users == 0) {
738 ret = kvm_irqchip_add_msi_route(kvm_state, msg, &proxy->pci_dev);
739 if (ret < 0) {
740 return ret;
742 irqfd->virq = ret;
744 irqfd->users++;
745 return 0;
748 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
749 unsigned int vector)
751 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
752 if (--irqfd->users == 0) {
753 kvm_irqchip_release_virq(kvm_state, irqfd->virq);
757 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
758 unsigned int queue_no,
759 unsigned int vector)
761 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
762 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
763 VirtQueue *vq = virtio_get_queue(vdev, queue_no);
764 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
765 int ret;
766 ret = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq);
767 return ret;
770 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
771 unsigned int queue_no,
772 unsigned int vector)
774 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
775 VirtQueue *vq = virtio_get_queue(vdev, queue_no);
776 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
777 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
778 int ret;
780 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq);
781 assert(ret == 0);
784 static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
786 PCIDevice *dev = &proxy->pci_dev;
787 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
788 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
789 unsigned int vector;
790 int ret, queue_no;
791 MSIMessage msg;
793 for (queue_no = 0; queue_no < nvqs; queue_no++) {
794 if (!virtio_queue_get_num(vdev, queue_no)) {
795 break;
797 vector = virtio_queue_vector(vdev, queue_no);
798 if (vector >= msix_nr_vectors_allocated(dev)) {
799 continue;
801 msg = msix_get_message(dev, vector);
802 ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector, msg);
803 if (ret < 0) {
804 goto undo;
806 /* If guest supports masking, set up irqfd now.
807 * Otherwise, delay until unmasked in the frontend.
809 if (k->guest_notifier_mask) {
810 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
811 if (ret < 0) {
812 kvm_virtio_pci_vq_vector_release(proxy, vector);
813 goto undo;
817 return 0;
819 undo:
820 while (--queue_no >= 0) {
821 vector = virtio_queue_vector(vdev, queue_no);
822 if (vector >= msix_nr_vectors_allocated(dev)) {
823 continue;
825 if (k->guest_notifier_mask) {
826 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
828 kvm_virtio_pci_vq_vector_release(proxy, vector);
830 return ret;
833 static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
835 PCIDevice *dev = &proxy->pci_dev;
836 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
837 unsigned int vector;
838 int queue_no;
839 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
841 for (queue_no = 0; queue_no < nvqs; queue_no++) {
842 if (!virtio_queue_get_num(vdev, queue_no)) {
843 break;
845 vector = virtio_queue_vector(vdev, queue_no);
846 if (vector >= msix_nr_vectors_allocated(dev)) {
847 continue;
849 /* If guest supports masking, clean up irqfd now.
850 * Otherwise, it was cleaned when masked in the frontend.
852 if (k->guest_notifier_mask) {
853 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
855 kvm_virtio_pci_vq_vector_release(proxy, vector);
859 static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
860 unsigned int queue_no,
861 unsigned int vector,
862 MSIMessage msg)
864 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
865 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
866 VirtQueue *vq = virtio_get_queue(vdev, queue_no);
867 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
868 VirtIOIRQFD *irqfd;
869 int ret = 0;
871 if (proxy->vector_irqfd) {
872 irqfd = &proxy->vector_irqfd[vector];
873 if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) {
874 ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg,
875 &proxy->pci_dev);
876 if (ret < 0) {
877 return ret;
882 /* If guest supports masking, irqfd is already setup, unmask it.
883 * Otherwise, set it up now.
885 if (k->guest_notifier_mask) {
886 k->guest_notifier_mask(vdev, queue_no, false);
887 /* Test after unmasking to avoid losing events. */
888 if (k->guest_notifier_pending &&
889 k->guest_notifier_pending(vdev, queue_no)) {
890 event_notifier_set(n);
892 } else {
893 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
895 return ret;
898 static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
899 unsigned int queue_no,
900 unsigned int vector)
902 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
903 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
905 /* If guest supports masking, keep irqfd but mask it.
906 * Otherwise, clean it up now.
908 if (k->guest_notifier_mask) {
909 k->guest_notifier_mask(vdev, queue_no, true);
910 } else {
911 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
915 static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
916 MSIMessage msg)
918 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
919 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
920 VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
921 int ret, index, unmasked = 0;
923 while (vq) {
924 index = virtio_get_queue_index(vq);
925 if (!virtio_queue_get_num(vdev, index)) {
926 break;
928 if (index < proxy->nvqs_with_notifiers) {
929 ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg);
930 if (ret < 0) {
931 goto undo;
933 ++unmasked;
935 vq = virtio_vector_next_queue(vq);
938 return 0;
940 undo:
941 vq = virtio_vector_first_queue(vdev, vector);
942 while (vq && unmasked >= 0) {
943 index = virtio_get_queue_index(vq);
944 if (index < proxy->nvqs_with_notifiers) {
945 virtio_pci_vq_vector_mask(proxy, index, vector);
946 --unmasked;
948 vq = virtio_vector_next_queue(vq);
950 return ret;
953 static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
955 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
956 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
957 VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
958 int index;
960 while (vq) {
961 index = virtio_get_queue_index(vq);
962 if (!virtio_queue_get_num(vdev, index)) {
963 break;
965 if (index < proxy->nvqs_with_notifiers) {
966 virtio_pci_vq_vector_mask(proxy, index, vector);
968 vq = virtio_vector_next_queue(vq);
972 static void virtio_pci_vector_poll(PCIDevice *dev,
973 unsigned int vector_start,
974 unsigned int vector_end)
976 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
977 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
978 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
979 int queue_no;
980 unsigned int vector;
981 EventNotifier *notifier;
982 VirtQueue *vq;
984 for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
985 if (!virtio_queue_get_num(vdev, queue_no)) {
986 break;
988 vector = virtio_queue_vector(vdev, queue_no);
989 if (vector < vector_start || vector >= vector_end ||
990 !msix_is_masked(dev, vector)) {
991 continue;
993 vq = virtio_get_queue(vdev, queue_no);
994 notifier = virtio_queue_get_guest_notifier(vq);
995 if (k->guest_notifier_pending) {
996 if (k->guest_notifier_pending(vdev, queue_no)) {
997 msix_set_pending(dev, vector);
999 } else if (event_notifier_test_and_clear(notifier)) {
1000 msix_set_pending(dev, vector);
1005 static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
1006 bool with_irqfd)
1008 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1009 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1010 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
1011 VirtQueue *vq = virtio_get_queue(vdev, n);
1012 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
1014 if (assign) {
1015 int r = event_notifier_init(notifier, 0);
1016 if (r < 0) {
1017 return r;
1019 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
1020 } else {
1021 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
1022 event_notifier_cleanup(notifier);
1025 if (!msix_enabled(&proxy->pci_dev) && vdc->guest_notifier_mask) {
1026 vdc->guest_notifier_mask(vdev, n, !assign);
1029 return 0;
1032 static bool virtio_pci_query_guest_notifiers(DeviceState *d)
1034 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1035 return msix_enabled(&proxy->pci_dev);
1038 static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
1040 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1041 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1042 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1043 int r, n;
1044 bool with_irqfd = msix_enabled(&proxy->pci_dev) &&
1045 kvm_msi_via_irqfd_enabled();
1047 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
1049 /* When deassigning, pass a consistent nvqs value
1050 * to avoid leaking notifiers.
1052 assert(assign || nvqs == proxy->nvqs_with_notifiers);
1054 proxy->nvqs_with_notifiers = nvqs;
1056 /* Must unset vector notifier while guest notifier is still assigned */
1057 if ((proxy->vector_irqfd || k->guest_notifier_mask) && !assign) {
1058 msix_unset_vector_notifiers(&proxy->pci_dev);
1059 if (proxy->vector_irqfd) {
1060 kvm_virtio_pci_vector_release(proxy, nvqs);
1061 g_free(proxy->vector_irqfd);
1062 proxy->vector_irqfd = NULL;
1066 for (n = 0; n < nvqs; n++) {
1067 if (!virtio_queue_get_num(vdev, n)) {
1068 break;
1071 r = virtio_pci_set_guest_notifier(d, n, assign, with_irqfd);
1072 if (r < 0) {
1073 goto assign_error;
1077 /* Must set vector notifier after guest notifier has been assigned */
1078 if ((with_irqfd || k->guest_notifier_mask) && assign) {
1079 if (with_irqfd) {
1080 proxy->vector_irqfd =
1081 g_malloc0(sizeof(*proxy->vector_irqfd) *
1082 msix_nr_vectors_allocated(&proxy->pci_dev));
1083 r = kvm_virtio_pci_vector_use(proxy, nvqs);
1084 if (r < 0) {
1085 goto assign_error;
1088 r = msix_set_vector_notifiers(&proxy->pci_dev,
1089 virtio_pci_vector_unmask,
1090 virtio_pci_vector_mask,
1091 virtio_pci_vector_poll);
1092 if (r < 0) {
1093 goto notifiers_error;
1097 return 0;
1099 notifiers_error:
1100 if (with_irqfd) {
1101 assert(assign);
1102 kvm_virtio_pci_vector_release(proxy, nvqs);
1105 assign_error:
1106 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
1107 assert(assign);
1108 while (--n >= 0) {
1109 virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd);
1111 return r;
1114 static int virtio_pci_set_host_notifier(DeviceState *d, int n, bool assign)
1116 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1118 /* Stop using ioeventfd for virtqueue kick if the device starts using host
1119 * notifiers. This makes it easy to avoid stepping on each others' toes.
1121 proxy->ioeventfd_disabled = assign;
1122 if (assign) {
1123 virtio_pci_stop_ioeventfd(proxy);
1125 /* We don't need to start here: it's not needed because backend
1126 * currently only stops on status change away from ok,
1127 * reset, vmstop and such. If we do add code to start here,
1128 * need to check vmstate, device state etc. */
1129 return virtio_pci_set_host_notifier_internal(proxy, n, assign, false);
1132 static void virtio_pci_vmstate_change(DeviceState *d, bool running)
1134 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1135 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1137 if (running) {
1138 /* Old QEMU versions did not set bus master enable on status write.
1139 * Detect DRIVER set and enable it.
1141 if ((proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION) &&
1142 (vdev->status & VIRTIO_CONFIG_S_DRIVER) &&
1143 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
1144 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND,
1145 proxy->pci_dev.config[PCI_COMMAND] |
1146 PCI_COMMAND_MASTER, 1);
1148 virtio_pci_start_ioeventfd(proxy);
1149 } else {
1150 virtio_pci_stop_ioeventfd(proxy);
1154 #ifdef CONFIG_VIRTFS
1155 static void virtio_9p_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
1157 V9fsPCIState *dev = VIRTIO_9P_PCI(vpci_dev);
1158 DeviceState *vdev = DEVICE(&dev->vdev);
1160 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1161 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
1164 static Property virtio_9p_pci_properties[] = {
1165 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
1166 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1167 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
1168 DEFINE_PROP_END_OF_LIST(),
1171 static void virtio_9p_pci_class_init(ObjectClass *klass, void *data)
1173 DeviceClass *dc = DEVICE_CLASS(klass);
1174 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
1175 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1177 k->realize = virtio_9p_pci_realize;
1178 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1179 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_9P;
1180 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
1181 pcidev_k->class_id = 0x2;
1182 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1183 dc->props = virtio_9p_pci_properties;
1186 static void virtio_9p_pci_instance_init(Object *obj)
1188 V9fsPCIState *dev = VIRTIO_9P_PCI(obj);
1190 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
1191 TYPE_VIRTIO_9P);
1194 static const TypeInfo virtio_9p_pci_info = {
1195 .name = TYPE_VIRTIO_9P_PCI,
1196 .parent = TYPE_VIRTIO_PCI,
1197 .instance_size = sizeof(V9fsPCIState),
1198 .instance_init = virtio_9p_pci_instance_init,
1199 .class_init = virtio_9p_pci_class_init,
1201 #endif /* CONFIG_VIRTFS */
1204 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
1207 static int virtio_pci_query_nvectors(DeviceState *d)
1209 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1211 return proxy->nvectors;
1214 static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy,
1215 struct virtio_pci_cap *cap)
1217 PCIDevice *dev = &proxy->pci_dev;
1218 int offset;
1220 offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, 0, cap->cap_len);
1221 assert(offset > 0);
1223 assert(cap->cap_len >= sizeof *cap);
1224 memcpy(dev->config + offset + PCI_CAP_FLAGS, &cap->cap_len,
1225 cap->cap_len - PCI_CAP_FLAGS);
1227 return offset;
1230 static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr,
1231 unsigned size)
1233 VirtIOPCIProxy *proxy = opaque;
1234 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1235 uint32_t val = 0;
1236 int i;
1238 switch (addr) {
1239 case VIRTIO_PCI_COMMON_DFSELECT:
1240 val = proxy->dfselect;
1241 break;
1242 case VIRTIO_PCI_COMMON_DF:
1243 if (proxy->dfselect <= 1) {
1244 val = (vdev->host_features & ~VIRTIO_LEGACY_FEATURES) >>
1245 (32 * proxy->dfselect);
1247 break;
1248 case VIRTIO_PCI_COMMON_GFSELECT:
1249 val = proxy->gfselect;
1250 break;
1251 case VIRTIO_PCI_COMMON_GF:
1252 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) {
1253 val = proxy->guest_features[proxy->gfselect];
1255 break;
1256 case VIRTIO_PCI_COMMON_MSIX:
1257 val = vdev->config_vector;
1258 break;
1259 case VIRTIO_PCI_COMMON_NUMQ:
1260 for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) {
1261 if (virtio_queue_get_num(vdev, i)) {
1262 val = i + 1;
1265 break;
1266 case VIRTIO_PCI_COMMON_STATUS:
1267 val = vdev->status;
1268 break;
1269 case VIRTIO_PCI_COMMON_CFGGENERATION:
1270 val = vdev->generation;
1271 break;
1272 case VIRTIO_PCI_COMMON_Q_SELECT:
1273 val = vdev->queue_sel;
1274 break;
1275 case VIRTIO_PCI_COMMON_Q_SIZE:
1276 val = virtio_queue_get_num(vdev, vdev->queue_sel);
1277 break;
1278 case VIRTIO_PCI_COMMON_Q_MSIX:
1279 val = virtio_queue_vector(vdev, vdev->queue_sel);
1280 break;
1281 case VIRTIO_PCI_COMMON_Q_ENABLE:
1282 val = proxy->vqs[vdev->queue_sel].enabled;
1283 break;
1284 case VIRTIO_PCI_COMMON_Q_NOFF:
1285 /* Simply map queues in order */
1286 val = vdev->queue_sel;
1287 break;
1288 case VIRTIO_PCI_COMMON_Q_DESCLO:
1289 val = proxy->vqs[vdev->queue_sel].desc[0];
1290 break;
1291 case VIRTIO_PCI_COMMON_Q_DESCHI:
1292 val = proxy->vqs[vdev->queue_sel].desc[1];
1293 break;
1294 case VIRTIO_PCI_COMMON_Q_AVAILLO:
1295 val = proxy->vqs[vdev->queue_sel].avail[0];
1296 break;
1297 case VIRTIO_PCI_COMMON_Q_AVAILHI:
1298 val = proxy->vqs[vdev->queue_sel].avail[1];
1299 break;
1300 case VIRTIO_PCI_COMMON_Q_USEDLO:
1301 val = proxy->vqs[vdev->queue_sel].used[0];
1302 break;
1303 case VIRTIO_PCI_COMMON_Q_USEDHI:
1304 val = proxy->vqs[vdev->queue_sel].used[1];
1305 break;
1306 default:
1307 val = 0;
1310 return val;
1313 static void virtio_pci_common_write(void *opaque, hwaddr addr,
1314 uint64_t val, unsigned size)
1316 VirtIOPCIProxy *proxy = opaque;
1317 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1319 switch (addr) {
1320 case VIRTIO_PCI_COMMON_DFSELECT:
1321 proxy->dfselect = val;
1322 break;
1323 case VIRTIO_PCI_COMMON_GFSELECT:
1324 proxy->gfselect = val;
1325 break;
1326 case VIRTIO_PCI_COMMON_GF:
1327 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) {
1328 proxy->guest_features[proxy->gfselect] = val;
1329 virtio_set_features(vdev,
1330 (((uint64_t)proxy->guest_features[1]) << 32) |
1331 proxy->guest_features[0]);
1333 break;
1334 case VIRTIO_PCI_COMMON_MSIX:
1335 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
1336 /* Make it possible for guest to discover an error took place. */
1337 if (msix_vector_use(&proxy->pci_dev, val) < 0) {
1338 val = VIRTIO_NO_VECTOR;
1340 vdev->config_vector = val;
1341 break;
1342 case VIRTIO_PCI_COMMON_STATUS:
1343 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
1344 virtio_pci_stop_ioeventfd(proxy);
1347 virtio_set_status(vdev, val & 0xFF);
1349 if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
1350 virtio_pci_start_ioeventfd(proxy);
1353 if (vdev->status == 0) {
1354 virtio_reset(vdev);
1355 msix_unuse_all_vectors(&proxy->pci_dev);
1358 break;
1359 case VIRTIO_PCI_COMMON_Q_SELECT:
1360 if (val < VIRTIO_QUEUE_MAX) {
1361 vdev->queue_sel = val;
1363 break;
1364 case VIRTIO_PCI_COMMON_Q_SIZE:
1365 proxy->vqs[vdev->queue_sel].num = val;
1366 break;
1367 case VIRTIO_PCI_COMMON_Q_MSIX:
1368 msix_vector_unuse(&proxy->pci_dev,
1369 virtio_queue_vector(vdev, vdev->queue_sel));
1370 /* Make it possible for guest to discover an error took place. */
1371 if (msix_vector_use(&proxy->pci_dev, val) < 0) {
1372 val = VIRTIO_NO_VECTOR;
1374 virtio_queue_set_vector(vdev, vdev->queue_sel, val);
1375 break;
1376 case VIRTIO_PCI_COMMON_Q_ENABLE:
1377 /* TODO: need a way to put num back on reset. */
1378 virtio_queue_set_num(vdev, vdev->queue_sel,
1379 proxy->vqs[vdev->queue_sel].num);
1380 virtio_queue_set_rings(vdev, vdev->queue_sel,
1381 ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 |
1382 proxy->vqs[vdev->queue_sel].desc[0],
1383 ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 |
1384 proxy->vqs[vdev->queue_sel].avail[0],
1385 ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 |
1386 proxy->vqs[vdev->queue_sel].used[0]);
1387 proxy->vqs[vdev->queue_sel].enabled = 1;
1388 break;
1389 case VIRTIO_PCI_COMMON_Q_DESCLO:
1390 proxy->vqs[vdev->queue_sel].desc[0] = val;
1391 break;
1392 case VIRTIO_PCI_COMMON_Q_DESCHI:
1393 proxy->vqs[vdev->queue_sel].desc[1] = val;
1394 break;
1395 case VIRTIO_PCI_COMMON_Q_AVAILLO:
1396 proxy->vqs[vdev->queue_sel].avail[0] = val;
1397 break;
1398 case VIRTIO_PCI_COMMON_Q_AVAILHI:
1399 proxy->vqs[vdev->queue_sel].avail[1] = val;
1400 break;
1401 case VIRTIO_PCI_COMMON_Q_USEDLO:
1402 proxy->vqs[vdev->queue_sel].used[0] = val;
1403 break;
1404 case VIRTIO_PCI_COMMON_Q_USEDHI:
1405 proxy->vqs[vdev->queue_sel].used[1] = val;
1406 break;
1407 default:
1408 break;
1413 static uint64_t virtio_pci_notify_read(void *opaque, hwaddr addr,
1414 unsigned size)
1416 return 0;
1419 static void virtio_pci_notify_write(void *opaque, hwaddr addr,
1420 uint64_t val, unsigned size)
1422 VirtIODevice *vdev = opaque;
1423 unsigned queue = addr / QEMU_VIRTIO_PCI_QUEUE_MEM_MULT;
1425 if (queue < VIRTIO_QUEUE_MAX) {
1426 virtio_queue_notify(vdev, queue);
1430 static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr,
1431 uint64_t val, unsigned size)
1433 VirtIODevice *vdev = opaque;
1434 unsigned queue = val;
1436 if (queue < VIRTIO_QUEUE_MAX) {
1437 virtio_queue_notify(vdev, queue);
1441 static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr,
1442 unsigned size)
1444 VirtIOPCIProxy *proxy = opaque;
1445 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1446 uint64_t val = vdev->isr;
1448 vdev->isr = 0;
1449 pci_irq_deassert(&proxy->pci_dev);
1451 return val;
1454 static void virtio_pci_isr_write(void *opaque, hwaddr addr,
1455 uint64_t val, unsigned size)
1459 static uint64_t virtio_pci_device_read(void *opaque, hwaddr addr,
1460 unsigned size)
1462 VirtIODevice *vdev = opaque;
1463 uint64_t val = 0;
1465 switch (size) {
1466 case 1:
1467 val = virtio_config_modern_readb(vdev, addr);
1468 break;
1469 case 2:
1470 val = virtio_config_modern_readw(vdev, addr);
1471 break;
1472 case 4:
1473 val = virtio_config_modern_readl(vdev, addr);
1474 break;
1476 return val;
1479 static void virtio_pci_device_write(void *opaque, hwaddr addr,
1480 uint64_t val, unsigned size)
1482 VirtIODevice *vdev = opaque;
1483 switch (size) {
1484 case 1:
1485 virtio_config_modern_writeb(vdev, addr, val);
1486 break;
1487 case 2:
1488 virtio_config_modern_writew(vdev, addr, val);
1489 break;
1490 case 4:
1491 virtio_config_modern_writel(vdev, addr, val);
1492 break;
1496 static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy)
1498 static const MemoryRegionOps common_ops = {
1499 .read = virtio_pci_common_read,
1500 .write = virtio_pci_common_write,
1501 .impl = {
1502 .min_access_size = 1,
1503 .max_access_size = 4,
1505 .endianness = DEVICE_LITTLE_ENDIAN,
1507 static const MemoryRegionOps isr_ops = {
1508 .read = virtio_pci_isr_read,
1509 .write = virtio_pci_isr_write,
1510 .impl = {
1511 .min_access_size = 1,
1512 .max_access_size = 4,
1514 .endianness = DEVICE_LITTLE_ENDIAN,
1516 static const MemoryRegionOps device_ops = {
1517 .read = virtio_pci_device_read,
1518 .write = virtio_pci_device_write,
1519 .impl = {
1520 .min_access_size = 1,
1521 .max_access_size = 4,
1523 .endianness = DEVICE_LITTLE_ENDIAN,
1525 static const MemoryRegionOps notify_ops = {
1526 .read = virtio_pci_notify_read,
1527 .write = virtio_pci_notify_write,
1528 .impl = {
1529 .min_access_size = 1,
1530 .max_access_size = 4,
1532 .endianness = DEVICE_LITTLE_ENDIAN,
1534 static const MemoryRegionOps notify_pio_ops = {
1535 .read = virtio_pci_notify_read,
1536 .write = virtio_pci_notify_write_pio,
1537 .impl = {
1538 .min_access_size = 1,
1539 .max_access_size = 4,
1541 .endianness = DEVICE_LITTLE_ENDIAN,
1545 memory_region_init_io(&proxy->common.mr, OBJECT(proxy),
1546 &common_ops,
1547 proxy,
1548 "virtio-pci-common",
1549 proxy->common.size);
1551 memory_region_init_io(&proxy->isr.mr, OBJECT(proxy),
1552 &isr_ops,
1553 proxy,
1554 "virtio-pci-isr",
1555 proxy->isr.size);
1557 memory_region_init_io(&proxy->device.mr, OBJECT(proxy),
1558 &device_ops,
1559 virtio_bus_get_device(&proxy->bus),
1560 "virtio-pci-device",
1561 proxy->device.size);
1563 memory_region_init_io(&proxy->notify.mr, OBJECT(proxy),
1564 &notify_ops,
1565 virtio_bus_get_device(&proxy->bus),
1566 "virtio-pci-notify",
1567 proxy->notify.size);
1569 memory_region_init_io(&proxy->notify_pio.mr, OBJECT(proxy),
1570 &notify_pio_ops,
1571 virtio_bus_get_device(&proxy->bus),
1572 "virtio-pci-notify-pio",
1573 proxy->notify.size);
1576 static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy,
1577 VirtIOPCIRegion *region,
1578 struct virtio_pci_cap *cap,
1579 MemoryRegion *mr,
1580 uint8_t bar)
1582 memory_region_add_subregion(mr, region->offset, &region->mr);
1584 cap->cfg_type = region->type;
1585 cap->bar = bar;
1586 cap->offset = cpu_to_le32(region->offset);
1587 cap->length = cpu_to_le32(region->size);
1588 virtio_pci_add_mem_cap(proxy, cap);
1592 static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy,
1593 VirtIOPCIRegion *region,
1594 struct virtio_pci_cap *cap)
1596 virtio_pci_modern_region_map(proxy, region, cap,
1597 &proxy->modern_bar, proxy->modern_mem_bar);
1600 static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy,
1601 VirtIOPCIRegion *region,
1602 struct virtio_pci_cap *cap)
1604 virtio_pci_modern_region_map(proxy, region, cap,
1605 &proxy->io_bar, proxy->modern_io_bar);
1608 static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy,
1609 VirtIOPCIRegion *region)
1611 memory_region_del_subregion(&proxy->modern_bar,
1612 &region->mr);
1615 static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy,
1616 VirtIOPCIRegion *region)
1618 memory_region_del_subregion(&proxy->io_bar,
1619 &region->mr);
1622 /* This is called by virtio-bus just after the device is plugged. */
1623 static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
1625 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1626 VirtioBusState *bus = &proxy->bus;
1627 bool legacy = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_LEGACY);
1628 bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
1629 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
1630 uint8_t *config;
1631 uint32_t size;
1632 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1634 config = proxy->pci_dev.config;
1635 if (proxy->class_code) {
1636 pci_config_set_class(config, proxy->class_code);
1639 if (legacy) {
1640 /* legacy and transitional */
1641 pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID,
1642 pci_get_word(config + PCI_VENDOR_ID));
1643 pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus));
1644 } else {
1645 /* pure virtio-1.0 */
1646 pci_set_word(config + PCI_VENDOR_ID,
1647 PCI_VENDOR_ID_REDHAT_QUMRANET);
1648 pci_set_word(config + PCI_DEVICE_ID,
1649 0x1040 + virtio_bus_get_vdev_id(bus));
1650 pci_config_set_revision(config, 1);
1652 config[PCI_INTERRUPT_PIN] = 1;
1655 if (modern) {
1656 struct virtio_pci_cap cap = {
1657 .cap_len = sizeof cap,
1659 struct virtio_pci_notify_cap notify = {
1660 .cap.cap_len = sizeof notify,
1661 .notify_off_multiplier =
1662 cpu_to_le32(QEMU_VIRTIO_PCI_QUEUE_MEM_MULT),
1664 struct virtio_pci_cfg_cap cfg = {
1665 .cap.cap_len = sizeof cfg,
1666 .cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG,
1668 struct virtio_pci_notify_cap notify_pio = {
1669 .cap.cap_len = sizeof notify,
1670 .notify_off_multiplier = cpu_to_le32(0x0),
1673 struct virtio_pci_cfg_cap *cfg_mask;
1675 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
1676 virtio_pci_modern_regions_init(proxy);
1678 virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap);
1679 virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap);
1680 virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap);
1681 virtio_pci_modern_mem_region_map(proxy, &proxy->notify, &notify.cap);
1683 if (modern_pio) {
1684 memory_region_init(&proxy->io_bar, OBJECT(proxy),
1685 "virtio-pci-io", 0x4);
1687 pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar,
1688 PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar);
1690 virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio,
1691 &notify_pio.cap);
1694 pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar,
1695 PCI_BASE_ADDRESS_SPACE_MEMORY |
1696 PCI_BASE_ADDRESS_MEM_PREFETCH |
1697 PCI_BASE_ADDRESS_MEM_TYPE_64,
1698 &proxy->modern_bar);
1700 proxy->config_cap = virtio_pci_add_mem_cap(proxy, &cfg.cap);
1701 cfg_mask = (void *)(proxy->pci_dev.wmask + proxy->config_cap);
1702 pci_set_byte(&cfg_mask->cap.bar, ~0x0);
1703 pci_set_long((uint8_t *)&cfg_mask->cap.offset, ~0x0);
1704 pci_set_long((uint8_t *)&cfg_mask->cap.length, ~0x0);
1705 pci_set_long(cfg_mask->pci_cfg_data, ~0x0);
1708 if (proxy->nvectors) {
1709 int err = msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors,
1710 proxy->msix_bar);
1711 if (err) {
1712 /* Notice when a system that supports MSIx can't initialize it. */
1713 if (err != -ENOTSUP) {
1714 error_report("unable to init msix vectors to %" PRIu32,
1715 proxy->nvectors);
1717 proxy->nvectors = 0;
1721 proxy->pci_dev.config_write = virtio_write_config;
1722 proxy->pci_dev.config_read = virtio_read_config;
1724 if (legacy) {
1725 size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev)
1726 + virtio_bus_get_vdev_config_len(bus);
1727 size = pow2ceil(size);
1729 memory_region_init_io(&proxy->bar, OBJECT(proxy),
1730 &virtio_pci_config_ops,
1731 proxy, "virtio-pci", size);
1733 pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar,
1734 PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar);
1737 if (!kvm_has_many_ioeventfds()) {
1738 proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
1741 virtio_add_feature(&vdev->host_features, VIRTIO_F_BAD_FEATURE);
1744 static void virtio_pci_device_unplugged(DeviceState *d)
1746 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1747 bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
1748 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
1750 virtio_pci_stop_ioeventfd(proxy);
1752 if (modern) {
1753 virtio_pci_modern_mem_region_unmap(proxy, &proxy->common);
1754 virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr);
1755 virtio_pci_modern_mem_region_unmap(proxy, &proxy->device);
1756 virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify);
1757 if (modern_pio) {
1758 virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio);
1763 static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
1765 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
1766 VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev);
1769 * virtio pci bar layout used by default.
1770 * subclasses can re-arrange things if needed.
1772 * region 0 -- virtio legacy io bar
1773 * region 1 -- msi-x bar
1774 * region 4+5 -- virtio modern memory (64bit) bar
1777 proxy->legacy_io_bar = 0;
1778 proxy->msix_bar = 1;
1779 proxy->modern_io_bar = 2;
1780 proxy->modern_mem_bar = 4;
1782 proxy->common.offset = 0x0;
1783 proxy->common.size = 0x1000;
1784 proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG;
1786 proxy->isr.offset = 0x1000;
1787 proxy->isr.size = 0x1000;
1788 proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG;
1790 proxy->device.offset = 0x2000;
1791 proxy->device.size = 0x1000;
1792 proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG;
1794 proxy->notify.offset = 0x3000;
1795 proxy->notify.size =
1796 QEMU_VIRTIO_PCI_QUEUE_MEM_MULT * VIRTIO_QUEUE_MAX;
1797 proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
1799 proxy->notify_pio.offset = 0x0;
1800 proxy->notify_pio.size = 0x4;
1801 proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
1803 /* subclasses can enforce modern, so do this unconditionally */
1804 memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci",
1805 2 * QEMU_VIRTIO_PCI_QUEUE_MEM_MULT *
1806 VIRTIO_QUEUE_MAX);
1808 memory_region_init_alias(&proxy->modern_cfg,
1809 OBJECT(proxy),
1810 "virtio-pci-cfg",
1811 &proxy->modern_bar,
1813 memory_region_size(&proxy->modern_bar));
1815 address_space_init(&proxy->modern_as, &proxy->modern_cfg, "virtio-pci-cfg-as");
1817 if (pci_is_express(pci_dev) && pci_bus_is_express(pci_dev->bus) &&
1818 !pci_bus_is_root(pci_dev->bus)) {
1819 int pos;
1821 pos = pcie_endpoint_cap_init(pci_dev, 0);
1822 assert(pos > 0);
1824 pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0, PCI_PM_SIZEOF);
1825 assert(pos > 0);
1828 * Indicates that this function complies with revision 1.2 of the
1829 * PCI Power Management Interface Specification.
1831 pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3);
1832 } else {
1834 * make future invocations of pci_is_express() return false
1835 * and pci_config_size() return PCI_CONFIG_SPACE_SIZE.
1837 pci_dev->cap_present &= ~QEMU_PCI_CAP_EXPRESS;
1840 virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy);
1841 if (k->realize) {
1842 k->realize(proxy, errp);
1846 static void virtio_pci_exit(PCIDevice *pci_dev)
1848 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
1850 msix_uninit_exclusive_bar(pci_dev);
1851 address_space_destroy(&proxy->modern_as);
1854 static void virtio_pci_reset(DeviceState *qdev)
1856 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
1857 VirtioBusState *bus = VIRTIO_BUS(&proxy->bus);
1858 int i;
1860 virtio_pci_stop_ioeventfd(proxy);
1861 virtio_bus_reset(bus);
1862 msix_unuse_all_vectors(&proxy->pci_dev);
1864 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1865 proxy->vqs[i].enabled = 0;
1869 static Property virtio_pci_properties[] = {
1870 DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags,
1871 VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false),
1872 DEFINE_PROP_BIT("disable-legacy", VirtIOPCIProxy, flags,
1873 VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT, false),
1874 DEFINE_PROP_BIT("disable-modern", VirtIOPCIProxy, flags,
1875 VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT, true),
1876 DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags,
1877 VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true),
1878 DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags,
1879 VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false),
1880 DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags,
1881 VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false),
1882 DEFINE_PROP_END_OF_LIST(),
1885 static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp)
1887 VirtioPCIClass *vpciklass = VIRTIO_PCI_GET_CLASS(qdev);
1888 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
1889 PCIDevice *pci_dev = &proxy->pci_dev;
1891 if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) &&
1892 !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN)) {
1893 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
1896 vpciklass->parent_dc_realize(qdev, errp);
1899 static void virtio_pci_class_init(ObjectClass *klass, void *data)
1901 DeviceClass *dc = DEVICE_CLASS(klass);
1902 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1903 VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass);
1905 dc->props = virtio_pci_properties;
1906 k->realize = virtio_pci_realize;
1907 k->exit = virtio_pci_exit;
1908 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1909 k->revision = VIRTIO_PCI_ABI_VERSION;
1910 k->class_id = PCI_CLASS_OTHERS;
1911 vpciklass->parent_dc_realize = dc->realize;
1912 dc->realize = virtio_pci_dc_realize;
1913 dc->reset = virtio_pci_reset;
1916 static const TypeInfo virtio_pci_info = {
1917 .name = TYPE_VIRTIO_PCI,
1918 .parent = TYPE_PCI_DEVICE,
1919 .instance_size = sizeof(VirtIOPCIProxy),
1920 .class_init = virtio_pci_class_init,
1921 .class_size = sizeof(VirtioPCIClass),
1922 .abstract = true,
1925 /* virtio-blk-pci */
1927 static Property virtio_blk_pci_properties[] = {
1928 DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
1929 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
1930 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1931 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
1932 DEFINE_PROP_END_OF_LIST(),
1935 static void virtio_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
1937 VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(vpci_dev);
1938 DeviceState *vdev = DEVICE(&dev->vdev);
1940 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1941 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
1944 static void virtio_blk_pci_class_init(ObjectClass *klass, void *data)
1946 DeviceClass *dc = DEVICE_CLASS(klass);
1947 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1948 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
1950 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1951 dc->props = virtio_blk_pci_properties;
1952 k->realize = virtio_blk_pci_realize;
1953 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1954 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BLOCK;
1955 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
1956 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
1959 static void virtio_blk_pci_instance_init(Object *obj)
1961 VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(obj);
1963 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
1964 TYPE_VIRTIO_BLK);
1965 object_property_add_alias(obj, "iothread", OBJECT(&dev->vdev),"iothread",
1966 &error_abort);
1967 object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
1968 "bootindex", &error_abort);
1971 static const TypeInfo virtio_blk_pci_info = {
1972 .name = TYPE_VIRTIO_BLK_PCI,
1973 .parent = TYPE_VIRTIO_PCI,
1974 .instance_size = sizeof(VirtIOBlkPCI),
1975 .instance_init = virtio_blk_pci_instance_init,
1976 .class_init = virtio_blk_pci_class_init,
1979 /* virtio-scsi-pci */
1981 static Property virtio_scsi_pci_properties[] = {
1982 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
1983 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1984 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
1985 DEV_NVECTORS_UNSPECIFIED),
1986 DEFINE_PROP_END_OF_LIST(),
1989 static void virtio_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
1991 VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(vpci_dev);
1992 DeviceState *vdev = DEVICE(&dev->vdev);
1993 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
1994 DeviceState *proxy = DEVICE(vpci_dev);
1995 char *bus_name;
1997 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
1998 vpci_dev->nvectors = vs->conf.num_queues + 3;
2002 * For command line compatibility, this sets the virtio-scsi-device bus
2003 * name as before.
2005 if (proxy->id) {
2006 bus_name = g_strdup_printf("%s.0", proxy->id);
2007 virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name);
2008 g_free(bus_name);
2011 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2012 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2015 static void virtio_scsi_pci_class_init(ObjectClass *klass, void *data)
2017 DeviceClass *dc = DEVICE_CLASS(klass);
2018 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2019 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2021 k->realize = virtio_scsi_pci_realize;
2022 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
2023 dc->props = virtio_scsi_pci_properties;
2024 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2025 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
2026 pcidev_k->revision = 0x00;
2027 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
2030 static void virtio_scsi_pci_instance_init(Object *obj)
2032 VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(obj);
2034 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2035 TYPE_VIRTIO_SCSI);
2036 object_property_add_alias(obj, "iothread", OBJECT(&dev->vdev), "iothread",
2037 &error_abort);
2040 static const TypeInfo virtio_scsi_pci_info = {
2041 .name = TYPE_VIRTIO_SCSI_PCI,
2042 .parent = TYPE_VIRTIO_PCI,
2043 .instance_size = sizeof(VirtIOSCSIPCI),
2044 .instance_init = virtio_scsi_pci_instance_init,
2045 .class_init = virtio_scsi_pci_class_init,
2048 /* vhost-scsi-pci */
2050 #ifdef CONFIG_VHOST_SCSI
2051 static Property vhost_scsi_pci_properties[] = {
2052 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
2053 DEV_NVECTORS_UNSPECIFIED),
2054 DEFINE_PROP_END_OF_LIST(),
2057 static void vhost_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2059 VHostSCSIPCI *dev = VHOST_SCSI_PCI(vpci_dev);
2060 DeviceState *vdev = DEVICE(&dev->vdev);
2061 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
2063 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
2064 vpci_dev->nvectors = vs->conf.num_queues + 3;
2067 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2068 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2071 static void vhost_scsi_pci_class_init(ObjectClass *klass, void *data)
2073 DeviceClass *dc = DEVICE_CLASS(klass);
2074 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2075 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2076 k->realize = vhost_scsi_pci_realize;
2077 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
2078 dc->props = vhost_scsi_pci_properties;
2079 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2080 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
2081 pcidev_k->revision = 0x00;
2082 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
2085 static void vhost_scsi_pci_instance_init(Object *obj)
2087 VHostSCSIPCI *dev = VHOST_SCSI_PCI(obj);
2089 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2090 TYPE_VHOST_SCSI);
2091 object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
2092 "bootindex", &error_abort);
2095 static const TypeInfo vhost_scsi_pci_info = {
2096 .name = TYPE_VHOST_SCSI_PCI,
2097 .parent = TYPE_VIRTIO_PCI,
2098 .instance_size = sizeof(VHostSCSIPCI),
2099 .instance_init = vhost_scsi_pci_instance_init,
2100 .class_init = vhost_scsi_pci_class_init,
2102 #endif
2104 /* virtio-balloon-pci */
2106 static Property virtio_balloon_pci_properties[] = {
2107 DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
2108 DEFINE_PROP_END_OF_LIST(),
2111 static void virtio_balloon_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2113 VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(vpci_dev);
2114 DeviceState *vdev = DEVICE(&dev->vdev);
2116 if (vpci_dev->class_code != PCI_CLASS_OTHERS &&
2117 vpci_dev->class_code != PCI_CLASS_MEMORY_RAM) { /* qemu < 1.1 */
2118 vpci_dev->class_code = PCI_CLASS_OTHERS;
2121 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2122 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2125 static void virtio_balloon_pci_class_init(ObjectClass *klass, void *data)
2127 DeviceClass *dc = DEVICE_CLASS(klass);
2128 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2129 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2130 k->realize = virtio_balloon_pci_realize;
2131 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
2132 dc->props = virtio_balloon_pci_properties;
2133 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2134 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BALLOON;
2135 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
2136 pcidev_k->class_id = PCI_CLASS_OTHERS;
2139 static void virtio_balloon_pci_instance_init(Object *obj)
2141 VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(obj);
2143 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2144 TYPE_VIRTIO_BALLOON);
2145 object_property_add_alias(obj, "guest-stats", OBJECT(&dev->vdev),
2146 "guest-stats", &error_abort);
2147 object_property_add_alias(obj, "guest-stats-polling-interval",
2148 OBJECT(&dev->vdev),
2149 "guest-stats-polling-interval", &error_abort);
2152 static const TypeInfo virtio_balloon_pci_info = {
2153 .name = TYPE_VIRTIO_BALLOON_PCI,
2154 .parent = TYPE_VIRTIO_PCI,
2155 .instance_size = sizeof(VirtIOBalloonPCI),
2156 .instance_init = virtio_balloon_pci_instance_init,
2157 .class_init = virtio_balloon_pci_class_init,
2160 /* virtio-serial-pci */
2162 static void virtio_serial_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2164 VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(vpci_dev);
2165 DeviceState *vdev = DEVICE(&dev->vdev);
2166 DeviceState *proxy = DEVICE(vpci_dev);
2167 char *bus_name;
2169 if (vpci_dev->class_code != PCI_CLASS_COMMUNICATION_OTHER &&
2170 vpci_dev->class_code != PCI_CLASS_DISPLAY_OTHER && /* qemu 0.10 */
2171 vpci_dev->class_code != PCI_CLASS_OTHERS) { /* qemu-kvm */
2172 vpci_dev->class_code = PCI_CLASS_COMMUNICATION_OTHER;
2175 /* backwards-compatibility with machines that were created with
2176 DEV_NVECTORS_UNSPECIFIED */
2177 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
2178 vpci_dev->nvectors = dev->vdev.serial.max_virtserial_ports + 1;
2182 * For command line compatibility, this sets the virtio-serial-device bus
2183 * name as before.
2185 if (proxy->id) {
2186 bus_name = g_strdup_printf("%s.0", proxy->id);
2187 virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name);
2188 g_free(bus_name);
2191 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2192 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2195 static Property virtio_serial_pci_properties[] = {
2196 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
2197 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
2198 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
2199 DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
2200 DEFINE_PROP_END_OF_LIST(),
2203 static void virtio_serial_pci_class_init(ObjectClass *klass, void *data)
2205 DeviceClass *dc = DEVICE_CLASS(klass);
2206 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2207 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2208 k->realize = virtio_serial_pci_realize;
2209 set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
2210 dc->props = virtio_serial_pci_properties;
2211 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2212 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_CONSOLE;
2213 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
2214 pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER;
2217 static void virtio_serial_pci_instance_init(Object *obj)
2219 VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(obj);
2221 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2222 TYPE_VIRTIO_SERIAL);
2225 static const TypeInfo virtio_serial_pci_info = {
2226 .name = TYPE_VIRTIO_SERIAL_PCI,
2227 .parent = TYPE_VIRTIO_PCI,
2228 .instance_size = sizeof(VirtIOSerialPCI),
2229 .instance_init = virtio_serial_pci_instance_init,
2230 .class_init = virtio_serial_pci_class_init,
2233 /* virtio-net-pci */
2235 static Property virtio_net_properties[] = {
2236 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
2237 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false),
2238 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
2239 DEFINE_PROP_END_OF_LIST(),
2242 static void virtio_net_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2244 DeviceState *qdev = DEVICE(vpci_dev);
2245 VirtIONetPCI *dev = VIRTIO_NET_PCI(vpci_dev);
2246 DeviceState *vdev = DEVICE(&dev->vdev);
2248 virtio_net_set_netclient_name(&dev->vdev, qdev->id,
2249 object_get_typename(OBJECT(qdev)));
2250 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2251 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2254 static void virtio_net_pci_class_init(ObjectClass *klass, void *data)
2256 DeviceClass *dc = DEVICE_CLASS(klass);
2257 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
2258 VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass);
2260 k->romfile = "efi-virtio.rom";
2261 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2262 k->device_id = PCI_DEVICE_ID_VIRTIO_NET;
2263 k->revision = VIRTIO_PCI_ABI_VERSION;
2264 k->class_id = PCI_CLASS_NETWORK_ETHERNET;
2265 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
2266 dc->props = virtio_net_properties;
2267 vpciklass->realize = virtio_net_pci_realize;
2270 static void virtio_net_pci_instance_init(Object *obj)
2272 VirtIONetPCI *dev = VIRTIO_NET_PCI(obj);
2274 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2275 TYPE_VIRTIO_NET);
2276 object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
2277 "bootindex", &error_abort);
2280 static const TypeInfo virtio_net_pci_info = {
2281 .name = TYPE_VIRTIO_NET_PCI,
2282 .parent = TYPE_VIRTIO_PCI,
2283 .instance_size = sizeof(VirtIONetPCI),
2284 .instance_init = virtio_net_pci_instance_init,
2285 .class_init = virtio_net_pci_class_init,
2288 /* virtio-rng-pci */
2290 static void virtio_rng_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2292 VirtIORngPCI *vrng = VIRTIO_RNG_PCI(vpci_dev);
2293 DeviceState *vdev = DEVICE(&vrng->vdev);
2294 Error *err = NULL;
2296 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2297 object_property_set_bool(OBJECT(vdev), true, "realized", &err);
2298 if (err) {
2299 error_propagate(errp, err);
2300 return;
2303 object_property_set_link(OBJECT(vrng),
2304 OBJECT(vrng->vdev.conf.rng), "rng",
2305 NULL);
2308 static void virtio_rng_pci_class_init(ObjectClass *klass, void *data)
2310 DeviceClass *dc = DEVICE_CLASS(klass);
2311 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2312 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2314 k->realize = virtio_rng_pci_realize;
2315 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
2317 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2318 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_RNG;
2319 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
2320 pcidev_k->class_id = PCI_CLASS_OTHERS;
2323 static void virtio_rng_initfn(Object *obj)
2325 VirtIORngPCI *dev = VIRTIO_RNG_PCI(obj);
2327 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2328 TYPE_VIRTIO_RNG);
2329 object_property_add_alias(obj, "rng", OBJECT(&dev->vdev), "rng",
2330 &error_abort);
2333 static const TypeInfo virtio_rng_pci_info = {
2334 .name = TYPE_VIRTIO_RNG_PCI,
2335 .parent = TYPE_VIRTIO_PCI,
2336 .instance_size = sizeof(VirtIORngPCI),
2337 .instance_init = virtio_rng_initfn,
2338 .class_init = virtio_rng_pci_class_init,
2341 /* virtio-input-pci */
2343 static Property virtio_input_pci_properties[] = {
2344 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
2345 DEFINE_PROP_END_OF_LIST(),
2348 static void virtio_input_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2350 VirtIOInputPCI *vinput = VIRTIO_INPUT_PCI(vpci_dev);
2351 DeviceState *vdev = DEVICE(&vinput->vdev);
2353 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2354 /* force virtio-1.0 */
2355 vpci_dev->flags &= ~VIRTIO_PCI_FLAG_DISABLE_MODERN;
2356 vpci_dev->flags |= VIRTIO_PCI_FLAG_DISABLE_LEGACY;
2357 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2360 static void virtio_input_pci_class_init(ObjectClass *klass, void *data)
2362 DeviceClass *dc = DEVICE_CLASS(klass);
2363 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2364 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2366 dc->props = virtio_input_pci_properties;
2367 k->realize = virtio_input_pci_realize;
2368 set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
2370 pcidev_k->class_id = PCI_CLASS_INPUT_OTHER;
2373 static void virtio_input_hid_kbd_pci_class_init(ObjectClass *klass, void *data)
2375 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2377 pcidev_k->class_id = PCI_CLASS_INPUT_KEYBOARD;
2380 static void virtio_input_hid_mouse_pci_class_init(ObjectClass *klass,
2381 void *data)
2383 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2385 pcidev_k->class_id = PCI_CLASS_INPUT_MOUSE;
2388 static void virtio_keyboard_initfn(Object *obj)
2390 VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
2392 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2393 TYPE_VIRTIO_KEYBOARD);
2396 static void virtio_mouse_initfn(Object *obj)
2398 VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
2400 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2401 TYPE_VIRTIO_MOUSE);
2404 static void virtio_tablet_initfn(Object *obj)
2406 VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
2408 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2409 TYPE_VIRTIO_TABLET);
2412 static const TypeInfo virtio_input_pci_info = {
2413 .name = TYPE_VIRTIO_INPUT_PCI,
2414 .parent = TYPE_VIRTIO_PCI,
2415 .instance_size = sizeof(VirtIOInputPCI),
2416 .class_init = virtio_input_pci_class_init,
2417 .abstract = true,
2420 static const TypeInfo virtio_input_hid_pci_info = {
2421 .name = TYPE_VIRTIO_INPUT_HID_PCI,
2422 .parent = TYPE_VIRTIO_INPUT_PCI,
2423 .instance_size = sizeof(VirtIOInputHIDPCI),
2424 .abstract = true,
2427 static const TypeInfo virtio_keyboard_pci_info = {
2428 .name = TYPE_VIRTIO_KEYBOARD_PCI,
2429 .parent = TYPE_VIRTIO_INPUT_HID_PCI,
2430 .class_init = virtio_input_hid_kbd_pci_class_init,
2431 .instance_size = sizeof(VirtIOInputHIDPCI),
2432 .instance_init = virtio_keyboard_initfn,
2435 static const TypeInfo virtio_mouse_pci_info = {
2436 .name = TYPE_VIRTIO_MOUSE_PCI,
2437 .parent = TYPE_VIRTIO_INPUT_HID_PCI,
2438 .class_init = virtio_input_hid_mouse_pci_class_init,
2439 .instance_size = sizeof(VirtIOInputHIDPCI),
2440 .instance_init = virtio_mouse_initfn,
2443 static const TypeInfo virtio_tablet_pci_info = {
2444 .name = TYPE_VIRTIO_TABLET_PCI,
2445 .parent = TYPE_VIRTIO_INPUT_HID_PCI,
2446 .instance_size = sizeof(VirtIOInputHIDPCI),
2447 .instance_init = virtio_tablet_initfn,
2450 #ifdef CONFIG_LINUX
2451 static void virtio_host_initfn(Object *obj)
2453 VirtIOInputHostPCI *dev = VIRTIO_INPUT_HOST_PCI(obj);
2455 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2456 TYPE_VIRTIO_INPUT_HOST);
2459 static const TypeInfo virtio_host_pci_info = {
2460 .name = TYPE_VIRTIO_INPUT_HOST_PCI,
2461 .parent = TYPE_VIRTIO_INPUT_PCI,
2462 .instance_size = sizeof(VirtIOInputHostPCI),
2463 .instance_init = virtio_host_initfn,
2465 #endif
2467 /* virtio-pci-bus */
2469 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
2470 VirtIOPCIProxy *dev)
2472 DeviceState *qdev = DEVICE(dev);
2473 char virtio_bus_name[] = "virtio-bus";
2475 qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev,
2476 virtio_bus_name);
2479 static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
2481 BusClass *bus_class = BUS_CLASS(klass);
2482 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
2483 bus_class->max_dev = 1;
2484 k->notify = virtio_pci_notify;
2485 k->save_config = virtio_pci_save_config;
2486 k->load_config = virtio_pci_load_config;
2487 k->save_queue = virtio_pci_save_queue;
2488 k->load_queue = virtio_pci_load_queue;
2489 k->save_extra_state = virtio_pci_save_extra_state;
2490 k->load_extra_state = virtio_pci_load_extra_state;
2491 k->has_extra_state = virtio_pci_has_extra_state;
2492 k->query_guest_notifiers = virtio_pci_query_guest_notifiers;
2493 k->set_host_notifier = virtio_pci_set_host_notifier;
2494 k->set_guest_notifiers = virtio_pci_set_guest_notifiers;
2495 k->vmstate_change = virtio_pci_vmstate_change;
2496 k->device_plugged = virtio_pci_device_plugged;
2497 k->device_unplugged = virtio_pci_device_unplugged;
2498 k->query_nvectors = virtio_pci_query_nvectors;
2501 static const TypeInfo virtio_pci_bus_info = {
2502 .name = TYPE_VIRTIO_PCI_BUS,
2503 .parent = TYPE_VIRTIO_BUS,
2504 .instance_size = sizeof(VirtioPCIBusState),
2505 .class_init = virtio_pci_bus_class_init,
2508 static void virtio_pci_register_types(void)
2510 type_register_static(&virtio_rng_pci_info);
2511 type_register_static(&virtio_input_pci_info);
2512 type_register_static(&virtio_input_hid_pci_info);
2513 type_register_static(&virtio_keyboard_pci_info);
2514 type_register_static(&virtio_mouse_pci_info);
2515 type_register_static(&virtio_tablet_pci_info);
2516 #ifdef CONFIG_LINUX
2517 type_register_static(&virtio_host_pci_info);
2518 #endif
2519 type_register_static(&virtio_pci_bus_info);
2520 type_register_static(&virtio_pci_info);
2521 #ifdef CONFIG_VIRTFS
2522 type_register_static(&virtio_9p_pci_info);
2523 #endif
2524 type_register_static(&virtio_blk_pci_info);
2525 type_register_static(&virtio_scsi_pci_info);
2526 type_register_static(&virtio_balloon_pci_info);
2527 type_register_static(&virtio_serial_pci_info);
2528 type_register_static(&virtio_net_pci_info);
2529 #ifdef CONFIG_VHOST_SCSI
2530 type_register_static(&vhost_scsi_pci_info);
2531 #endif
2534 type_init(virtio_pci_register_types)