fuzz: add documentation to docs/devel/
[qemu/ar7.git] / hw / virtio / virtio-pci.c
blob4cb784389c5144f617bf477988cf1e4ac1330861
1 /*
2 * Virtio PCI Bindings
4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2009 CodeSourcery
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paul Brook <paul@codesourcery.com>
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
18 #include "qemu/osdep.h"
20 #include "exec/memop.h"
21 #include "standard-headers/linux/virtio_pci.h"
22 #include "hw/virtio/virtio.h"
23 #include "migration/qemu-file-types.h"
24 #include "hw/pci/pci.h"
25 #include "hw/pci/pci_bus.h"
26 #include "hw/qdev-properties.h"
27 #include "qapi/error.h"
28 #include "qemu/error-report.h"
29 #include "qemu/module.h"
30 #include "hw/pci/msi.h"
31 #include "hw/pci/msix.h"
32 #include "hw/loader.h"
33 #include "sysemu/kvm.h"
34 #include "virtio-pci.h"
35 #include "qemu/range.h"
36 #include "hw/virtio/virtio-bus.h"
37 #include "qapi/visitor.h"
39 #define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev))
41 #undef VIRTIO_PCI_CONFIG
43 /* The remaining space is defined by each driver as the per-driver
44 * configuration space */
45 #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
47 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
48 VirtIOPCIProxy *dev);
49 static void virtio_pci_reset(DeviceState *qdev);
51 /* virtio device */
52 /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
53 static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d)
55 return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
58 /* DeviceState to VirtIOPCIProxy. Note: used on datapath,
59 * be careful and test performance if you change this.
61 static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d)
63 return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
66 static void virtio_pci_notify(DeviceState *d, uint16_t vector)
68 VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d);
70 if (msix_enabled(&proxy->pci_dev))
71 msix_notify(&proxy->pci_dev, vector);
72 else {
73 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
74 pci_set_irq(&proxy->pci_dev, atomic_read(&vdev->isr) & 1);
78 static void virtio_pci_save_config(DeviceState *d, QEMUFile *f)
80 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
81 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
83 pci_device_save(&proxy->pci_dev, f);
84 msix_save(&proxy->pci_dev, f);
85 if (msix_present(&proxy->pci_dev))
86 qemu_put_be16(f, vdev->config_vector);
89 static const VMStateDescription vmstate_virtio_pci_modern_queue_state = {
90 .name = "virtio_pci/modern_queue_state",
91 .version_id = 1,
92 .minimum_version_id = 1,
93 .fields = (VMStateField[]) {
94 VMSTATE_UINT16(num, VirtIOPCIQueue),
95 VMSTATE_UNUSED(1), /* enabled was stored as be16 */
96 VMSTATE_BOOL(enabled, VirtIOPCIQueue),
97 VMSTATE_UINT32_ARRAY(desc, VirtIOPCIQueue, 2),
98 VMSTATE_UINT32_ARRAY(avail, VirtIOPCIQueue, 2),
99 VMSTATE_UINT32_ARRAY(used, VirtIOPCIQueue, 2),
100 VMSTATE_END_OF_LIST()
104 static bool virtio_pci_modern_state_needed(void *opaque)
106 VirtIOPCIProxy *proxy = opaque;
108 return virtio_pci_modern(proxy);
111 static const VMStateDescription vmstate_virtio_pci_modern_state_sub = {
112 .name = "virtio_pci/modern_state",
113 .version_id = 1,
114 .minimum_version_id = 1,
115 .needed = &virtio_pci_modern_state_needed,
116 .fields = (VMStateField[]) {
117 VMSTATE_UINT32(dfselect, VirtIOPCIProxy),
118 VMSTATE_UINT32(gfselect, VirtIOPCIProxy),
119 VMSTATE_UINT32_ARRAY(guest_features, VirtIOPCIProxy, 2),
120 VMSTATE_STRUCT_ARRAY(vqs, VirtIOPCIProxy, VIRTIO_QUEUE_MAX, 0,
121 vmstate_virtio_pci_modern_queue_state,
122 VirtIOPCIQueue),
123 VMSTATE_END_OF_LIST()
127 static const VMStateDescription vmstate_virtio_pci = {
128 .name = "virtio_pci",
129 .version_id = 1,
130 .minimum_version_id = 1,
131 .minimum_version_id_old = 1,
132 .fields = (VMStateField[]) {
133 VMSTATE_END_OF_LIST()
135 .subsections = (const VMStateDescription*[]) {
136 &vmstate_virtio_pci_modern_state_sub,
137 NULL
141 static bool virtio_pci_has_extra_state(DeviceState *d)
143 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
145 return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA;
148 static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f)
150 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
152 vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL);
155 static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f)
157 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
159 return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1);
162 static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f)
164 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
165 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
167 if (msix_present(&proxy->pci_dev))
168 qemu_put_be16(f, virtio_queue_vector(vdev, n));
171 static int virtio_pci_load_config(DeviceState *d, QEMUFile *f)
173 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
174 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
176 int ret;
177 ret = pci_device_load(&proxy->pci_dev, f);
178 if (ret) {
179 return ret;
181 msix_unuse_all_vectors(&proxy->pci_dev);
182 msix_load(&proxy->pci_dev, f);
183 if (msix_present(&proxy->pci_dev)) {
184 qemu_get_be16s(f, &vdev->config_vector);
185 } else {
186 vdev->config_vector = VIRTIO_NO_VECTOR;
188 if (vdev->config_vector != VIRTIO_NO_VECTOR) {
189 return msix_vector_use(&proxy->pci_dev, vdev->config_vector);
191 return 0;
194 static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
196 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
197 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
199 uint16_t vector;
200 if (msix_present(&proxy->pci_dev)) {
201 qemu_get_be16s(f, &vector);
202 } else {
203 vector = VIRTIO_NO_VECTOR;
205 virtio_queue_set_vector(vdev, n, vector);
206 if (vector != VIRTIO_NO_VECTOR) {
207 return msix_vector_use(&proxy->pci_dev, vector);
210 return 0;
213 static bool virtio_pci_ioeventfd_enabled(DeviceState *d)
215 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
217 return (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) != 0;
220 #define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000
222 static inline int virtio_pci_queue_mem_mult(struct VirtIOPCIProxy *proxy)
224 return (proxy->flags & VIRTIO_PCI_FLAG_PAGE_PER_VQ) ?
225 QEMU_VIRTIO_PCI_QUEUE_MEM_MULT : 4;
228 static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
229 int n, bool assign)
231 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
232 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
233 VirtQueue *vq = virtio_get_queue(vdev, n);
234 bool legacy = virtio_pci_legacy(proxy);
235 bool modern = virtio_pci_modern(proxy);
236 bool fast_mmio = kvm_ioeventfd_any_length_enabled();
237 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
238 MemoryRegion *modern_mr = &proxy->notify.mr;
239 MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr;
240 MemoryRegion *legacy_mr = &proxy->bar;
241 hwaddr modern_addr = virtio_pci_queue_mem_mult(proxy) *
242 virtio_get_queue_index(vq);
243 hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY;
245 if (assign) {
246 if (modern) {
247 if (fast_mmio) {
248 memory_region_add_eventfd(modern_mr, modern_addr, 0,
249 false, n, notifier);
250 } else {
251 memory_region_add_eventfd(modern_mr, modern_addr, 2,
252 false, n, notifier);
254 if (modern_pio) {
255 memory_region_add_eventfd(modern_notify_mr, 0, 2,
256 true, n, notifier);
259 if (legacy) {
260 memory_region_add_eventfd(legacy_mr, legacy_addr, 2,
261 true, n, notifier);
263 } else {
264 if (modern) {
265 if (fast_mmio) {
266 memory_region_del_eventfd(modern_mr, modern_addr, 0,
267 false, n, notifier);
268 } else {
269 memory_region_del_eventfd(modern_mr, modern_addr, 2,
270 false, n, notifier);
272 if (modern_pio) {
273 memory_region_del_eventfd(modern_notify_mr, 0, 2,
274 true, n, notifier);
277 if (legacy) {
278 memory_region_del_eventfd(legacy_mr, legacy_addr, 2,
279 true, n, notifier);
282 return 0;
285 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
287 virtio_bus_start_ioeventfd(&proxy->bus);
290 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
292 virtio_bus_stop_ioeventfd(&proxy->bus);
295 static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
297 VirtIOPCIProxy *proxy = opaque;
298 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
299 hwaddr pa;
301 switch (addr) {
302 case VIRTIO_PCI_GUEST_FEATURES:
303 /* Guest does not negotiate properly? We have to assume nothing. */
304 if (val & (1 << VIRTIO_F_BAD_FEATURE)) {
305 val = virtio_bus_get_vdev_bad_features(&proxy->bus);
307 virtio_set_features(vdev, val);
308 break;
309 case VIRTIO_PCI_QUEUE_PFN:
310 pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
311 if (pa == 0) {
312 virtio_pci_reset(DEVICE(proxy));
314 else
315 virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
316 break;
317 case VIRTIO_PCI_QUEUE_SEL:
318 if (val < VIRTIO_QUEUE_MAX)
319 vdev->queue_sel = val;
320 break;
321 case VIRTIO_PCI_QUEUE_NOTIFY:
322 if (val < VIRTIO_QUEUE_MAX) {
323 virtio_queue_notify(vdev, val);
325 break;
326 case VIRTIO_PCI_STATUS:
327 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
328 virtio_pci_stop_ioeventfd(proxy);
331 virtio_set_status(vdev, val & 0xFF);
333 if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
334 virtio_pci_start_ioeventfd(proxy);
337 if (vdev->status == 0) {
338 virtio_pci_reset(DEVICE(proxy));
341 /* Linux before 2.6.34 drives the device without enabling
342 the PCI device bus master bit. Enable it automatically
343 for the guest. This is a PCI spec violation but so is
344 initiating DMA with bus master bit clear. */
345 if (val == (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER)) {
346 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND,
347 proxy->pci_dev.config[PCI_COMMAND] |
348 PCI_COMMAND_MASTER, 1);
350 break;
351 case VIRTIO_MSI_CONFIG_VECTOR:
352 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
353 /* Make it possible for guest to discover an error took place. */
354 if (msix_vector_use(&proxy->pci_dev, val) < 0)
355 val = VIRTIO_NO_VECTOR;
356 vdev->config_vector = val;
357 break;
358 case VIRTIO_MSI_QUEUE_VECTOR:
359 msix_vector_unuse(&proxy->pci_dev,
360 virtio_queue_vector(vdev, vdev->queue_sel));
361 /* Make it possible for guest to discover an error took place. */
362 if (msix_vector_use(&proxy->pci_dev, val) < 0)
363 val = VIRTIO_NO_VECTOR;
364 virtio_queue_set_vector(vdev, vdev->queue_sel, val);
365 break;
366 default:
367 error_report("%s: unexpected address 0x%x value 0x%x",
368 __func__, addr, val);
369 break;
373 static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
375 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
376 uint32_t ret = 0xFFFFFFFF;
378 switch (addr) {
379 case VIRTIO_PCI_HOST_FEATURES:
380 ret = vdev->host_features;
381 break;
382 case VIRTIO_PCI_GUEST_FEATURES:
383 ret = vdev->guest_features;
384 break;
385 case VIRTIO_PCI_QUEUE_PFN:
386 ret = virtio_queue_get_addr(vdev, vdev->queue_sel)
387 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
388 break;
389 case VIRTIO_PCI_QUEUE_NUM:
390 ret = virtio_queue_get_num(vdev, vdev->queue_sel);
391 break;
392 case VIRTIO_PCI_QUEUE_SEL:
393 ret = vdev->queue_sel;
394 break;
395 case VIRTIO_PCI_STATUS:
396 ret = vdev->status;
397 break;
398 case VIRTIO_PCI_ISR:
399 /* reading from the ISR also clears it. */
400 ret = atomic_xchg(&vdev->isr, 0);
401 pci_irq_deassert(&proxy->pci_dev);
402 break;
403 case VIRTIO_MSI_CONFIG_VECTOR:
404 ret = vdev->config_vector;
405 break;
406 case VIRTIO_MSI_QUEUE_VECTOR:
407 ret = virtio_queue_vector(vdev, vdev->queue_sel);
408 break;
409 default:
410 break;
413 return ret;
416 static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr,
417 unsigned size)
419 VirtIOPCIProxy *proxy = opaque;
420 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
421 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev);
422 uint64_t val = 0;
423 if (addr < config) {
424 return virtio_ioport_read(proxy, addr);
426 addr -= config;
428 switch (size) {
429 case 1:
430 val = virtio_config_readb(vdev, addr);
431 break;
432 case 2:
433 val = virtio_config_readw(vdev, addr);
434 if (virtio_is_big_endian(vdev)) {
435 val = bswap16(val);
437 break;
438 case 4:
439 val = virtio_config_readl(vdev, addr);
440 if (virtio_is_big_endian(vdev)) {
441 val = bswap32(val);
443 break;
445 return val;
448 static void virtio_pci_config_write(void *opaque, hwaddr addr,
449 uint64_t val, unsigned size)
451 VirtIOPCIProxy *proxy = opaque;
452 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev);
453 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
454 if (addr < config) {
455 virtio_ioport_write(proxy, addr, val);
456 return;
458 addr -= config;
460 * Virtio-PCI is odd. Ioports are LE but config space is target native
461 * endian.
463 switch (size) {
464 case 1:
465 virtio_config_writeb(vdev, addr, val);
466 break;
467 case 2:
468 if (virtio_is_big_endian(vdev)) {
469 val = bswap16(val);
471 virtio_config_writew(vdev, addr, val);
472 break;
473 case 4:
474 if (virtio_is_big_endian(vdev)) {
475 val = bswap32(val);
477 virtio_config_writel(vdev, addr, val);
478 break;
482 static const MemoryRegionOps virtio_pci_config_ops = {
483 .read = virtio_pci_config_read,
484 .write = virtio_pci_config_write,
485 .impl = {
486 .min_access_size = 1,
487 .max_access_size = 4,
489 .endianness = DEVICE_LITTLE_ENDIAN,
492 static MemoryRegion *virtio_address_space_lookup(VirtIOPCIProxy *proxy,
493 hwaddr *off, int len)
495 int i;
496 VirtIOPCIRegion *reg;
498 for (i = 0; i < ARRAY_SIZE(proxy->regs); ++i) {
499 reg = &proxy->regs[i];
500 if (*off >= reg->offset &&
501 *off + len <= reg->offset + reg->size) {
502 *off -= reg->offset;
503 return &reg->mr;
507 return NULL;
510 /* Below are generic functions to do memcpy from/to an address space,
511 * without byteswaps, with input validation.
513 * As regular address_space_* APIs all do some kind of byteswap at least for
514 * some host/target combinations, we are forced to explicitly convert to a
515 * known-endianness integer value.
516 * It doesn't really matter which endian format to go through, so the code
517 * below selects the endian that causes the least amount of work on the given
518 * host.
520 * Note: host pointer must be aligned.
522 static
523 void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr,
524 const uint8_t *buf, int len)
526 uint64_t val;
527 MemoryRegion *mr;
529 /* address_space_* APIs assume an aligned address.
530 * As address is under guest control, handle illegal values.
532 addr &= ~(len - 1);
534 mr = virtio_address_space_lookup(proxy, &addr, len);
535 if (!mr) {
536 return;
539 /* Make sure caller aligned buf properly */
540 assert(!(((uintptr_t)buf) & (len - 1)));
542 switch (len) {
543 case 1:
544 val = pci_get_byte(buf);
545 break;
546 case 2:
547 val = pci_get_word(buf);
548 break;
549 case 4:
550 val = pci_get_long(buf);
551 break;
552 default:
553 /* As length is under guest control, handle illegal values. */
554 return;
556 memory_region_dispatch_write(mr, addr, val, size_memop(len) | MO_LE,
557 MEMTXATTRS_UNSPECIFIED);
560 static void
561 virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr,
562 uint8_t *buf, int len)
564 uint64_t val;
565 MemoryRegion *mr;
567 /* address_space_* APIs assume an aligned address.
568 * As address is under guest control, handle illegal values.
570 addr &= ~(len - 1);
572 mr = virtio_address_space_lookup(proxy, &addr, len);
573 if (!mr) {
574 return;
577 /* Make sure caller aligned buf properly */
578 assert(!(((uintptr_t)buf) & (len - 1)));
580 memory_region_dispatch_read(mr, addr, &val, size_memop(len) | MO_LE,
581 MEMTXATTRS_UNSPECIFIED);
582 switch (len) {
583 case 1:
584 pci_set_byte(buf, val);
585 break;
586 case 2:
587 pci_set_word(buf, val);
588 break;
589 case 4:
590 pci_set_long(buf, val);
591 break;
592 default:
593 /* As length is under guest control, handle illegal values. */
594 break;
598 static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
599 uint32_t val, int len)
601 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
602 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
603 struct virtio_pci_cfg_cap *cfg;
605 pci_default_write_config(pci_dev, address, val, len);
607 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_FLR) {
608 pcie_cap_flr_write_config(pci_dev, address, val, len);
611 if (range_covers_byte(address, len, PCI_COMMAND)) {
612 if (!(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
613 virtio_set_disabled(vdev, true);
614 virtio_pci_stop_ioeventfd(proxy);
615 virtio_set_status(vdev, vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK);
616 } else {
617 virtio_set_disabled(vdev, false);
621 if (proxy->config_cap &&
622 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
623 pci_cfg_data),
624 sizeof cfg->pci_cfg_data)) {
625 uint32_t off;
626 uint32_t len;
628 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
629 off = le32_to_cpu(cfg->cap.offset);
630 len = le32_to_cpu(cfg->cap.length);
632 if (len == 1 || len == 2 || len == 4) {
633 assert(len <= sizeof cfg->pci_cfg_data);
634 virtio_address_space_write(proxy, off, cfg->pci_cfg_data, len);
639 static uint32_t virtio_read_config(PCIDevice *pci_dev,
640 uint32_t address, int len)
642 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
643 struct virtio_pci_cfg_cap *cfg;
645 if (proxy->config_cap &&
646 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
647 pci_cfg_data),
648 sizeof cfg->pci_cfg_data)) {
649 uint32_t off;
650 uint32_t len;
652 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
653 off = le32_to_cpu(cfg->cap.offset);
654 len = le32_to_cpu(cfg->cap.length);
656 if (len == 1 || len == 2 || len == 4) {
657 assert(len <= sizeof cfg->pci_cfg_data);
658 virtio_address_space_read(proxy, off, cfg->pci_cfg_data, len);
662 return pci_default_read_config(pci_dev, address, len);
665 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
666 unsigned int queue_no,
667 unsigned int vector)
669 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
670 int ret;
672 if (irqfd->users == 0) {
673 ret = kvm_irqchip_add_msi_route(kvm_state, vector, &proxy->pci_dev);
674 if (ret < 0) {
675 return ret;
677 irqfd->virq = ret;
679 irqfd->users++;
680 return 0;
683 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
684 unsigned int vector)
686 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
687 if (--irqfd->users == 0) {
688 kvm_irqchip_release_virq(kvm_state, irqfd->virq);
692 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
693 unsigned int queue_no,
694 unsigned int vector)
696 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
697 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
698 VirtQueue *vq = virtio_get_queue(vdev, queue_no);
699 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
700 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq);
703 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
704 unsigned int queue_no,
705 unsigned int vector)
707 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
708 VirtQueue *vq = virtio_get_queue(vdev, queue_no);
709 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
710 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
711 int ret;
713 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq);
714 assert(ret == 0);
717 static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
719 PCIDevice *dev = &proxy->pci_dev;
720 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
721 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
722 unsigned int vector;
723 int ret, queue_no;
725 for (queue_no = 0; queue_no < nvqs; queue_no++) {
726 if (!virtio_queue_get_num(vdev, queue_no)) {
727 break;
729 vector = virtio_queue_vector(vdev, queue_no);
730 if (vector >= msix_nr_vectors_allocated(dev)) {
731 continue;
733 ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector);
734 if (ret < 0) {
735 goto undo;
737 /* If guest supports masking, set up irqfd now.
738 * Otherwise, delay until unmasked in the frontend.
740 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
741 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
742 if (ret < 0) {
743 kvm_virtio_pci_vq_vector_release(proxy, vector);
744 goto undo;
748 return 0;
750 undo:
751 while (--queue_no >= 0) {
752 vector = virtio_queue_vector(vdev, queue_no);
753 if (vector >= msix_nr_vectors_allocated(dev)) {
754 continue;
756 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
757 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
759 kvm_virtio_pci_vq_vector_release(proxy, vector);
761 return ret;
764 static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
766 PCIDevice *dev = &proxy->pci_dev;
767 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
768 unsigned int vector;
769 int queue_no;
770 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
772 for (queue_no = 0; queue_no < nvqs; queue_no++) {
773 if (!virtio_queue_get_num(vdev, queue_no)) {
774 break;
776 vector = virtio_queue_vector(vdev, queue_no);
777 if (vector >= msix_nr_vectors_allocated(dev)) {
778 continue;
780 /* If guest supports masking, clean up irqfd now.
781 * Otherwise, it was cleaned when masked in the frontend.
783 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
784 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
786 kvm_virtio_pci_vq_vector_release(proxy, vector);
790 static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
791 unsigned int queue_no,
792 unsigned int vector,
793 MSIMessage msg)
795 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
796 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
797 VirtQueue *vq = virtio_get_queue(vdev, queue_no);
798 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
799 VirtIOIRQFD *irqfd;
800 int ret = 0;
802 if (proxy->vector_irqfd) {
803 irqfd = &proxy->vector_irqfd[vector];
804 if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) {
805 ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg,
806 &proxy->pci_dev);
807 if (ret < 0) {
808 return ret;
810 kvm_irqchip_commit_routes(kvm_state);
814 /* If guest supports masking, irqfd is already setup, unmask it.
815 * Otherwise, set it up now.
817 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
818 k->guest_notifier_mask(vdev, queue_no, false);
819 /* Test after unmasking to avoid losing events. */
820 if (k->guest_notifier_pending &&
821 k->guest_notifier_pending(vdev, queue_no)) {
822 event_notifier_set(n);
824 } else {
825 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
827 return ret;
830 static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
831 unsigned int queue_no,
832 unsigned int vector)
834 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
835 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
837 /* If guest supports masking, keep irqfd but mask it.
838 * Otherwise, clean it up now.
840 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
841 k->guest_notifier_mask(vdev, queue_no, true);
842 } else {
843 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
847 static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
848 MSIMessage msg)
850 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
851 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
852 VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
853 int ret, index, unmasked = 0;
855 while (vq) {
856 index = virtio_get_queue_index(vq);
857 if (!virtio_queue_get_num(vdev, index)) {
858 break;
860 if (index < proxy->nvqs_with_notifiers) {
861 ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg);
862 if (ret < 0) {
863 goto undo;
865 ++unmasked;
867 vq = virtio_vector_next_queue(vq);
870 return 0;
872 undo:
873 vq = virtio_vector_first_queue(vdev, vector);
874 while (vq && unmasked >= 0) {
875 index = virtio_get_queue_index(vq);
876 if (index < proxy->nvqs_with_notifiers) {
877 virtio_pci_vq_vector_mask(proxy, index, vector);
878 --unmasked;
880 vq = virtio_vector_next_queue(vq);
882 return ret;
885 static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
887 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
888 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
889 VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
890 int index;
892 while (vq) {
893 index = virtio_get_queue_index(vq);
894 if (!virtio_queue_get_num(vdev, index)) {
895 break;
897 if (index < proxy->nvqs_with_notifiers) {
898 virtio_pci_vq_vector_mask(proxy, index, vector);
900 vq = virtio_vector_next_queue(vq);
904 static void virtio_pci_vector_poll(PCIDevice *dev,
905 unsigned int vector_start,
906 unsigned int vector_end)
908 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
909 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
910 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
911 int queue_no;
912 unsigned int vector;
913 EventNotifier *notifier;
914 VirtQueue *vq;
916 for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
917 if (!virtio_queue_get_num(vdev, queue_no)) {
918 break;
920 vector = virtio_queue_vector(vdev, queue_no);
921 if (vector < vector_start || vector >= vector_end ||
922 !msix_is_masked(dev, vector)) {
923 continue;
925 vq = virtio_get_queue(vdev, queue_no);
926 notifier = virtio_queue_get_guest_notifier(vq);
927 if (k->guest_notifier_pending) {
928 if (k->guest_notifier_pending(vdev, queue_no)) {
929 msix_set_pending(dev, vector);
931 } else if (event_notifier_test_and_clear(notifier)) {
932 msix_set_pending(dev, vector);
937 static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
938 bool with_irqfd)
940 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
941 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
942 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
943 VirtQueue *vq = virtio_get_queue(vdev, n);
944 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
946 if (assign) {
947 int r = event_notifier_init(notifier, 0);
948 if (r < 0) {
949 return r;
951 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
952 } else {
953 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
954 event_notifier_cleanup(notifier);
957 if (!msix_enabled(&proxy->pci_dev) &&
958 vdev->use_guest_notifier_mask &&
959 vdc->guest_notifier_mask) {
960 vdc->guest_notifier_mask(vdev, n, !assign);
963 return 0;
966 static bool virtio_pci_query_guest_notifiers(DeviceState *d)
968 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
969 return msix_enabled(&proxy->pci_dev);
972 static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
974 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
975 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
976 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
977 int r, n;
978 bool with_irqfd = msix_enabled(&proxy->pci_dev) &&
979 kvm_msi_via_irqfd_enabled();
981 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
983 /* When deassigning, pass a consistent nvqs value
984 * to avoid leaking notifiers.
986 assert(assign || nvqs == proxy->nvqs_with_notifiers);
988 proxy->nvqs_with_notifiers = nvqs;
990 /* Must unset vector notifier while guest notifier is still assigned */
991 if ((proxy->vector_irqfd || k->guest_notifier_mask) && !assign) {
992 msix_unset_vector_notifiers(&proxy->pci_dev);
993 if (proxy->vector_irqfd) {
994 kvm_virtio_pci_vector_release(proxy, nvqs);
995 g_free(proxy->vector_irqfd);
996 proxy->vector_irqfd = NULL;
1000 for (n = 0; n < nvqs; n++) {
1001 if (!virtio_queue_get_num(vdev, n)) {
1002 break;
1005 r = virtio_pci_set_guest_notifier(d, n, assign, with_irqfd);
1006 if (r < 0) {
1007 goto assign_error;
1011 /* Must set vector notifier after guest notifier has been assigned */
1012 if ((with_irqfd || k->guest_notifier_mask) && assign) {
1013 if (with_irqfd) {
1014 proxy->vector_irqfd =
1015 g_malloc0(sizeof(*proxy->vector_irqfd) *
1016 msix_nr_vectors_allocated(&proxy->pci_dev));
1017 r = kvm_virtio_pci_vector_use(proxy, nvqs);
1018 if (r < 0) {
1019 goto assign_error;
1022 r = msix_set_vector_notifiers(&proxy->pci_dev,
1023 virtio_pci_vector_unmask,
1024 virtio_pci_vector_mask,
1025 virtio_pci_vector_poll);
1026 if (r < 0) {
1027 goto notifiers_error;
1031 return 0;
1033 notifiers_error:
1034 if (with_irqfd) {
1035 assert(assign);
1036 kvm_virtio_pci_vector_release(proxy, nvqs);
1039 assign_error:
1040 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
1041 assert(assign);
1042 while (--n >= 0) {
1043 virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd);
1045 return r;
1048 static int virtio_pci_set_host_notifier_mr(DeviceState *d, int n,
1049 MemoryRegion *mr, bool assign)
1051 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1052 int offset;
1054 if (n >= VIRTIO_QUEUE_MAX || !virtio_pci_modern(proxy) ||
1055 virtio_pci_queue_mem_mult(proxy) != memory_region_size(mr)) {
1056 return -1;
1059 if (assign) {
1060 offset = virtio_pci_queue_mem_mult(proxy) * n;
1061 memory_region_add_subregion_overlap(&proxy->notify.mr, offset, mr, 1);
1062 } else {
1063 memory_region_del_subregion(&proxy->notify.mr, mr);
1066 return 0;
1069 static void virtio_pci_vmstate_change(DeviceState *d, bool running)
1071 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1072 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1074 if (running) {
1075 /* Old QEMU versions did not set bus master enable on status write.
1076 * Detect DRIVER set and enable it.
1078 if ((proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION) &&
1079 (vdev->status & VIRTIO_CONFIG_S_DRIVER) &&
1080 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
1081 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND,
1082 proxy->pci_dev.config[PCI_COMMAND] |
1083 PCI_COMMAND_MASTER, 1);
1085 virtio_pci_start_ioeventfd(proxy);
1086 } else {
1087 virtio_pci_stop_ioeventfd(proxy);
1092 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
1095 static int virtio_pci_query_nvectors(DeviceState *d)
1097 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1099 return proxy->nvectors;
1102 static AddressSpace *virtio_pci_get_dma_as(DeviceState *d)
1104 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1105 PCIDevice *dev = &proxy->pci_dev;
1107 return pci_get_address_space(dev);
1110 static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy,
1111 struct virtio_pci_cap *cap)
1113 PCIDevice *dev = &proxy->pci_dev;
1114 int offset;
1116 offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, 0,
1117 cap->cap_len, &error_abort);
1119 assert(cap->cap_len >= sizeof *cap);
1120 memcpy(dev->config + offset + PCI_CAP_FLAGS, &cap->cap_len,
1121 cap->cap_len - PCI_CAP_FLAGS);
1123 return offset;
1126 static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr,
1127 unsigned size)
1129 VirtIOPCIProxy *proxy = opaque;
1130 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1131 uint32_t val = 0;
1132 int i;
1134 switch (addr) {
1135 case VIRTIO_PCI_COMMON_DFSELECT:
1136 val = proxy->dfselect;
1137 break;
1138 case VIRTIO_PCI_COMMON_DF:
1139 if (proxy->dfselect <= 1) {
1140 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
1142 val = (vdev->host_features & ~vdc->legacy_features) >>
1143 (32 * proxy->dfselect);
1145 break;
1146 case VIRTIO_PCI_COMMON_GFSELECT:
1147 val = proxy->gfselect;
1148 break;
1149 case VIRTIO_PCI_COMMON_GF:
1150 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) {
1151 val = proxy->guest_features[proxy->gfselect];
1153 break;
1154 case VIRTIO_PCI_COMMON_MSIX:
1155 val = vdev->config_vector;
1156 break;
1157 case VIRTIO_PCI_COMMON_NUMQ:
1158 for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) {
1159 if (virtio_queue_get_num(vdev, i)) {
1160 val = i + 1;
1163 break;
1164 case VIRTIO_PCI_COMMON_STATUS:
1165 val = vdev->status;
1166 break;
1167 case VIRTIO_PCI_COMMON_CFGGENERATION:
1168 val = vdev->generation;
1169 break;
1170 case VIRTIO_PCI_COMMON_Q_SELECT:
1171 val = vdev->queue_sel;
1172 break;
1173 case VIRTIO_PCI_COMMON_Q_SIZE:
1174 val = virtio_queue_get_num(vdev, vdev->queue_sel);
1175 break;
1176 case VIRTIO_PCI_COMMON_Q_MSIX:
1177 val = virtio_queue_vector(vdev, vdev->queue_sel);
1178 break;
1179 case VIRTIO_PCI_COMMON_Q_ENABLE:
1180 val = proxy->vqs[vdev->queue_sel].enabled;
1181 break;
1182 case VIRTIO_PCI_COMMON_Q_NOFF:
1183 /* Simply map queues in order */
1184 val = vdev->queue_sel;
1185 break;
1186 case VIRTIO_PCI_COMMON_Q_DESCLO:
1187 val = proxy->vqs[vdev->queue_sel].desc[0];
1188 break;
1189 case VIRTIO_PCI_COMMON_Q_DESCHI:
1190 val = proxy->vqs[vdev->queue_sel].desc[1];
1191 break;
1192 case VIRTIO_PCI_COMMON_Q_AVAILLO:
1193 val = proxy->vqs[vdev->queue_sel].avail[0];
1194 break;
1195 case VIRTIO_PCI_COMMON_Q_AVAILHI:
1196 val = proxy->vqs[vdev->queue_sel].avail[1];
1197 break;
1198 case VIRTIO_PCI_COMMON_Q_USEDLO:
1199 val = proxy->vqs[vdev->queue_sel].used[0];
1200 break;
1201 case VIRTIO_PCI_COMMON_Q_USEDHI:
1202 val = proxy->vqs[vdev->queue_sel].used[1];
1203 break;
1204 default:
1205 val = 0;
1208 return val;
1211 static void virtio_pci_common_write(void *opaque, hwaddr addr,
1212 uint64_t val, unsigned size)
1214 VirtIOPCIProxy *proxy = opaque;
1215 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1217 switch (addr) {
1218 case VIRTIO_PCI_COMMON_DFSELECT:
1219 proxy->dfselect = val;
1220 break;
1221 case VIRTIO_PCI_COMMON_GFSELECT:
1222 proxy->gfselect = val;
1223 break;
1224 case VIRTIO_PCI_COMMON_GF:
1225 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) {
1226 proxy->guest_features[proxy->gfselect] = val;
1227 virtio_set_features(vdev,
1228 (((uint64_t)proxy->guest_features[1]) << 32) |
1229 proxy->guest_features[0]);
1231 break;
1232 case VIRTIO_PCI_COMMON_MSIX:
1233 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
1234 /* Make it possible for guest to discover an error took place. */
1235 if (msix_vector_use(&proxy->pci_dev, val) < 0) {
1236 val = VIRTIO_NO_VECTOR;
1238 vdev->config_vector = val;
1239 break;
1240 case VIRTIO_PCI_COMMON_STATUS:
1241 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
1242 virtio_pci_stop_ioeventfd(proxy);
1245 virtio_set_status(vdev, val & 0xFF);
1247 if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
1248 virtio_pci_start_ioeventfd(proxy);
1251 if (vdev->status == 0) {
1252 virtio_pci_reset(DEVICE(proxy));
1255 break;
1256 case VIRTIO_PCI_COMMON_Q_SELECT:
1257 if (val < VIRTIO_QUEUE_MAX) {
1258 vdev->queue_sel = val;
1260 break;
1261 case VIRTIO_PCI_COMMON_Q_SIZE:
1262 proxy->vqs[vdev->queue_sel].num = val;
1263 virtio_queue_set_num(vdev, vdev->queue_sel,
1264 proxy->vqs[vdev->queue_sel].num);
1265 break;
1266 case VIRTIO_PCI_COMMON_Q_MSIX:
1267 msix_vector_unuse(&proxy->pci_dev,
1268 virtio_queue_vector(vdev, vdev->queue_sel));
1269 /* Make it possible for guest to discover an error took place. */
1270 if (msix_vector_use(&proxy->pci_dev, val) < 0) {
1271 val = VIRTIO_NO_VECTOR;
1273 virtio_queue_set_vector(vdev, vdev->queue_sel, val);
1274 break;
1275 case VIRTIO_PCI_COMMON_Q_ENABLE:
1276 virtio_queue_set_num(vdev, vdev->queue_sel,
1277 proxy->vqs[vdev->queue_sel].num);
1278 virtio_queue_set_rings(vdev, vdev->queue_sel,
1279 ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 |
1280 proxy->vqs[vdev->queue_sel].desc[0],
1281 ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 |
1282 proxy->vqs[vdev->queue_sel].avail[0],
1283 ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 |
1284 proxy->vqs[vdev->queue_sel].used[0]);
1285 proxy->vqs[vdev->queue_sel].enabled = 1;
1286 break;
1287 case VIRTIO_PCI_COMMON_Q_DESCLO:
1288 proxy->vqs[vdev->queue_sel].desc[0] = val;
1289 break;
1290 case VIRTIO_PCI_COMMON_Q_DESCHI:
1291 proxy->vqs[vdev->queue_sel].desc[1] = val;
1292 break;
1293 case VIRTIO_PCI_COMMON_Q_AVAILLO:
1294 proxy->vqs[vdev->queue_sel].avail[0] = val;
1295 break;
1296 case VIRTIO_PCI_COMMON_Q_AVAILHI:
1297 proxy->vqs[vdev->queue_sel].avail[1] = val;
1298 break;
1299 case VIRTIO_PCI_COMMON_Q_USEDLO:
1300 proxy->vqs[vdev->queue_sel].used[0] = val;
1301 break;
1302 case VIRTIO_PCI_COMMON_Q_USEDHI:
1303 proxy->vqs[vdev->queue_sel].used[1] = val;
1304 break;
1305 default:
1306 break;
1311 static uint64_t virtio_pci_notify_read(void *opaque, hwaddr addr,
1312 unsigned size)
1314 return 0;
1317 static void virtio_pci_notify_write(void *opaque, hwaddr addr,
1318 uint64_t val, unsigned size)
1320 VirtIODevice *vdev = opaque;
1321 VirtIOPCIProxy *proxy = VIRTIO_PCI(DEVICE(vdev)->parent_bus->parent);
1322 unsigned queue = addr / virtio_pci_queue_mem_mult(proxy);
1324 if (queue < VIRTIO_QUEUE_MAX) {
1325 virtio_queue_notify(vdev, queue);
1329 static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr,
1330 uint64_t val, unsigned size)
1332 VirtIODevice *vdev = opaque;
1333 unsigned queue = val;
1335 if (queue < VIRTIO_QUEUE_MAX) {
1336 virtio_queue_notify(vdev, queue);
1340 static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr,
1341 unsigned size)
1343 VirtIOPCIProxy *proxy = opaque;
1344 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1345 uint64_t val = atomic_xchg(&vdev->isr, 0);
1346 pci_irq_deassert(&proxy->pci_dev);
1348 return val;
1351 static void virtio_pci_isr_write(void *opaque, hwaddr addr,
1352 uint64_t val, unsigned size)
1356 static uint64_t virtio_pci_device_read(void *opaque, hwaddr addr,
1357 unsigned size)
1359 VirtIODevice *vdev = opaque;
1360 uint64_t val = 0;
1362 switch (size) {
1363 case 1:
1364 val = virtio_config_modern_readb(vdev, addr);
1365 break;
1366 case 2:
1367 val = virtio_config_modern_readw(vdev, addr);
1368 break;
1369 case 4:
1370 val = virtio_config_modern_readl(vdev, addr);
1371 break;
1373 return val;
1376 static void virtio_pci_device_write(void *opaque, hwaddr addr,
1377 uint64_t val, unsigned size)
1379 VirtIODevice *vdev = opaque;
1380 switch (size) {
1381 case 1:
1382 virtio_config_modern_writeb(vdev, addr, val);
1383 break;
1384 case 2:
1385 virtio_config_modern_writew(vdev, addr, val);
1386 break;
1387 case 4:
1388 virtio_config_modern_writel(vdev, addr, val);
1389 break;
1393 static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy)
1395 static const MemoryRegionOps common_ops = {
1396 .read = virtio_pci_common_read,
1397 .write = virtio_pci_common_write,
1398 .impl = {
1399 .min_access_size = 1,
1400 .max_access_size = 4,
1402 .endianness = DEVICE_LITTLE_ENDIAN,
1404 static const MemoryRegionOps isr_ops = {
1405 .read = virtio_pci_isr_read,
1406 .write = virtio_pci_isr_write,
1407 .impl = {
1408 .min_access_size = 1,
1409 .max_access_size = 4,
1411 .endianness = DEVICE_LITTLE_ENDIAN,
1413 static const MemoryRegionOps device_ops = {
1414 .read = virtio_pci_device_read,
1415 .write = virtio_pci_device_write,
1416 .impl = {
1417 .min_access_size = 1,
1418 .max_access_size = 4,
1420 .endianness = DEVICE_LITTLE_ENDIAN,
1422 static const MemoryRegionOps notify_ops = {
1423 .read = virtio_pci_notify_read,
1424 .write = virtio_pci_notify_write,
1425 .impl = {
1426 .min_access_size = 1,
1427 .max_access_size = 4,
1429 .endianness = DEVICE_LITTLE_ENDIAN,
1431 static const MemoryRegionOps notify_pio_ops = {
1432 .read = virtio_pci_notify_read,
1433 .write = virtio_pci_notify_write_pio,
1434 .impl = {
1435 .min_access_size = 1,
1436 .max_access_size = 4,
1438 .endianness = DEVICE_LITTLE_ENDIAN,
1442 memory_region_init_io(&proxy->common.mr, OBJECT(proxy),
1443 &common_ops,
1444 proxy,
1445 "virtio-pci-common",
1446 proxy->common.size);
1448 memory_region_init_io(&proxy->isr.mr, OBJECT(proxy),
1449 &isr_ops,
1450 proxy,
1451 "virtio-pci-isr",
1452 proxy->isr.size);
1454 memory_region_init_io(&proxy->device.mr, OBJECT(proxy),
1455 &device_ops,
1456 virtio_bus_get_device(&proxy->bus),
1457 "virtio-pci-device",
1458 proxy->device.size);
1460 memory_region_init_io(&proxy->notify.mr, OBJECT(proxy),
1461 &notify_ops,
1462 virtio_bus_get_device(&proxy->bus),
1463 "virtio-pci-notify",
1464 proxy->notify.size);
1466 memory_region_init_io(&proxy->notify_pio.mr, OBJECT(proxy),
1467 &notify_pio_ops,
1468 virtio_bus_get_device(&proxy->bus),
1469 "virtio-pci-notify-pio",
1470 proxy->notify_pio.size);
1473 static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy,
1474 VirtIOPCIRegion *region,
1475 struct virtio_pci_cap *cap,
1476 MemoryRegion *mr,
1477 uint8_t bar)
1479 memory_region_add_subregion(mr, region->offset, &region->mr);
1481 cap->cfg_type = region->type;
1482 cap->bar = bar;
1483 cap->offset = cpu_to_le32(region->offset);
1484 cap->length = cpu_to_le32(region->size);
1485 virtio_pci_add_mem_cap(proxy, cap);
1489 static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy,
1490 VirtIOPCIRegion *region,
1491 struct virtio_pci_cap *cap)
1493 virtio_pci_modern_region_map(proxy, region, cap,
1494 &proxy->modern_bar, proxy->modern_mem_bar_idx);
1497 static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy,
1498 VirtIOPCIRegion *region,
1499 struct virtio_pci_cap *cap)
1501 virtio_pci_modern_region_map(proxy, region, cap,
1502 &proxy->io_bar, proxy->modern_io_bar_idx);
1505 static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy,
1506 VirtIOPCIRegion *region)
1508 memory_region_del_subregion(&proxy->modern_bar,
1509 &region->mr);
1512 static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy,
1513 VirtIOPCIRegion *region)
1515 memory_region_del_subregion(&proxy->io_bar,
1516 &region->mr);
1519 static void virtio_pci_pre_plugged(DeviceState *d, Error **errp)
1521 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1522 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1524 if (virtio_pci_modern(proxy)) {
1525 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
1528 virtio_add_feature(&vdev->host_features, VIRTIO_F_BAD_FEATURE);
1531 /* This is called by virtio-bus just after the device is plugged. */
1532 static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
1534 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1535 VirtioBusState *bus = &proxy->bus;
1536 bool legacy = virtio_pci_legacy(proxy);
1537 bool modern;
1538 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
1539 uint8_t *config;
1540 uint32_t size;
1541 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1544 * Virtio capabilities present without
1545 * VIRTIO_F_VERSION_1 confuses guests
1547 if (!proxy->ignore_backend_features &&
1548 !virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) {
1549 virtio_pci_disable_modern(proxy);
1551 if (!legacy) {
1552 error_setg(errp, "Device doesn't support modern mode, and legacy"
1553 " mode is disabled");
1554 error_append_hint(errp, "Set disable-legacy to off\n");
1556 return;
1560 modern = virtio_pci_modern(proxy);
1562 config = proxy->pci_dev.config;
1563 if (proxy->class_code) {
1564 pci_config_set_class(config, proxy->class_code);
1567 if (legacy) {
1568 if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
1569 error_setg(errp, "VIRTIO_F_IOMMU_PLATFORM was supported by"
1570 " neither legacy nor transitional device");
1571 return ;
1574 * Legacy and transitional devices use specific subsystem IDs.
1575 * Note that the subsystem vendor ID (config + PCI_SUBSYSTEM_VENDOR_ID)
1576 * is set to PCI_SUBVENDOR_ID_REDHAT_QUMRANET by default.
1578 pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus));
1579 } else {
1580 /* pure virtio-1.0 */
1581 pci_set_word(config + PCI_VENDOR_ID,
1582 PCI_VENDOR_ID_REDHAT_QUMRANET);
1583 pci_set_word(config + PCI_DEVICE_ID,
1584 0x1040 + virtio_bus_get_vdev_id(bus));
1585 pci_config_set_revision(config, 1);
1587 config[PCI_INTERRUPT_PIN] = 1;
1590 if (modern) {
1591 struct virtio_pci_cap cap = {
1592 .cap_len = sizeof cap,
1594 struct virtio_pci_notify_cap notify = {
1595 .cap.cap_len = sizeof notify,
1596 .notify_off_multiplier =
1597 cpu_to_le32(virtio_pci_queue_mem_mult(proxy)),
1599 struct virtio_pci_cfg_cap cfg = {
1600 .cap.cap_len = sizeof cfg,
1601 .cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG,
1603 struct virtio_pci_notify_cap notify_pio = {
1604 .cap.cap_len = sizeof notify,
1605 .notify_off_multiplier = cpu_to_le32(0x0),
1608 struct virtio_pci_cfg_cap *cfg_mask;
1610 virtio_pci_modern_regions_init(proxy);
1612 virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap);
1613 virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap);
1614 virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap);
1615 virtio_pci_modern_mem_region_map(proxy, &proxy->notify, &notify.cap);
1617 if (modern_pio) {
1618 memory_region_init(&proxy->io_bar, OBJECT(proxy),
1619 "virtio-pci-io", 0x4);
1621 pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar_idx,
1622 PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar);
1624 virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio,
1625 &notify_pio.cap);
1628 pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar_idx,
1629 PCI_BASE_ADDRESS_SPACE_MEMORY |
1630 PCI_BASE_ADDRESS_MEM_PREFETCH |
1631 PCI_BASE_ADDRESS_MEM_TYPE_64,
1632 &proxy->modern_bar);
1634 proxy->config_cap = virtio_pci_add_mem_cap(proxy, &cfg.cap);
1635 cfg_mask = (void *)(proxy->pci_dev.wmask + proxy->config_cap);
1636 pci_set_byte(&cfg_mask->cap.bar, ~0x0);
1637 pci_set_long((uint8_t *)&cfg_mask->cap.offset, ~0x0);
1638 pci_set_long((uint8_t *)&cfg_mask->cap.length, ~0x0);
1639 pci_set_long(cfg_mask->pci_cfg_data, ~0x0);
1642 if (proxy->nvectors) {
1643 int err = msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors,
1644 proxy->msix_bar_idx, NULL);
1645 if (err) {
1646 /* Notice when a system that supports MSIx can't initialize it */
1647 if (err != -ENOTSUP) {
1648 warn_report("unable to init msix vectors to %" PRIu32,
1649 proxy->nvectors);
1651 proxy->nvectors = 0;
1655 proxy->pci_dev.config_write = virtio_write_config;
1656 proxy->pci_dev.config_read = virtio_read_config;
1658 if (legacy) {
1659 size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev)
1660 + virtio_bus_get_vdev_config_len(bus);
1661 size = pow2ceil(size);
1663 memory_region_init_io(&proxy->bar, OBJECT(proxy),
1664 &virtio_pci_config_ops,
1665 proxy, "virtio-pci", size);
1667 pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar_idx,
1668 PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar);
1672 static void virtio_pci_device_unplugged(DeviceState *d)
1674 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1675 bool modern = virtio_pci_modern(proxy);
1676 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
1678 virtio_pci_stop_ioeventfd(proxy);
1680 if (modern) {
1681 virtio_pci_modern_mem_region_unmap(proxy, &proxy->common);
1682 virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr);
1683 virtio_pci_modern_mem_region_unmap(proxy, &proxy->device);
1684 virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify);
1685 if (modern_pio) {
1686 virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio);
1691 static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
1693 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
1694 VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev);
1695 bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) &&
1696 !pci_bus_is_root(pci_get_bus(pci_dev));
1698 if (kvm_enabled() && !kvm_has_many_ioeventfds()) {
1699 proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
1703 * virtio pci bar layout used by default.
1704 * subclasses can re-arrange things if needed.
1706 * region 0 -- virtio legacy io bar
1707 * region 1 -- msi-x bar
1708 * region 4+5 -- virtio modern memory (64bit) bar
1711 proxy->legacy_io_bar_idx = 0;
1712 proxy->msix_bar_idx = 1;
1713 proxy->modern_io_bar_idx = 2;
1714 proxy->modern_mem_bar_idx = 4;
1716 proxy->common.offset = 0x0;
1717 proxy->common.size = 0x1000;
1718 proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG;
1720 proxy->isr.offset = 0x1000;
1721 proxy->isr.size = 0x1000;
1722 proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG;
1724 proxy->device.offset = 0x2000;
1725 proxy->device.size = 0x1000;
1726 proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG;
1728 proxy->notify.offset = 0x3000;
1729 proxy->notify.size = virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX;
1730 proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
1732 proxy->notify_pio.offset = 0x0;
1733 proxy->notify_pio.size = 0x4;
1734 proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
1736 /* subclasses can enforce modern, so do this unconditionally */
1737 memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci",
1738 /* PCI BAR regions must be powers of 2 */
1739 pow2ceil(proxy->notify.offset + proxy->notify.size));
1741 if (proxy->disable_legacy == ON_OFF_AUTO_AUTO) {
1742 proxy->disable_legacy = pcie_port ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
1745 if (!virtio_pci_modern(proxy) && !virtio_pci_legacy(proxy)) {
1746 error_setg(errp, "device cannot work as neither modern nor legacy mode"
1747 " is enabled");
1748 error_append_hint(errp, "Set either disable-modern or disable-legacy"
1749 " to off\n");
1750 return;
1753 if (pcie_port && pci_is_express(pci_dev)) {
1754 int pos;
1756 pos = pcie_endpoint_cap_init(pci_dev, 0);
1757 assert(pos > 0);
1759 pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0,
1760 PCI_PM_SIZEOF, errp);
1761 if (pos < 0) {
1762 return;
1765 pci_dev->exp.pm_cap = pos;
1768 * Indicates that this function complies with revision 1.2 of the
1769 * PCI Power Management Interface Specification.
1771 pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3);
1773 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_DEVERR) {
1774 /* Init error enabling flags */
1775 pcie_cap_deverr_init(pci_dev);
1778 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_LNKCTL) {
1779 /* Init Link Control Register */
1780 pcie_cap_lnkctl_init(pci_dev);
1783 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_PM) {
1784 /* Init Power Management Control Register */
1785 pci_set_word(pci_dev->wmask + pos + PCI_PM_CTRL,
1786 PCI_PM_CTRL_STATE_MASK);
1789 if (proxy->flags & VIRTIO_PCI_FLAG_ATS) {
1790 pcie_ats_init(pci_dev, 256);
1793 if (proxy->flags & VIRTIO_PCI_FLAG_INIT_FLR) {
1794 /* Set Function Level Reset capability bit */
1795 pcie_cap_flr_init(pci_dev);
1797 } else {
1799 * make future invocations of pci_is_express() return false
1800 * and pci_config_size() return PCI_CONFIG_SPACE_SIZE.
1802 pci_dev->cap_present &= ~QEMU_PCI_CAP_EXPRESS;
1805 virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy);
1806 if (k->realize) {
1807 k->realize(proxy, errp);
1811 static void virtio_pci_exit(PCIDevice *pci_dev)
1813 msix_uninit_exclusive_bar(pci_dev);
1816 static void virtio_pci_reset(DeviceState *qdev)
1818 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
1819 VirtioBusState *bus = VIRTIO_BUS(&proxy->bus);
1820 PCIDevice *dev = PCI_DEVICE(qdev);
1821 int i;
1823 virtio_pci_stop_ioeventfd(proxy);
1824 virtio_bus_reset(bus);
1825 msix_unuse_all_vectors(&proxy->pci_dev);
1827 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1828 proxy->vqs[i].enabled = 0;
1829 proxy->vqs[i].num = 0;
1830 proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0;
1831 proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0;
1832 proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0;
1835 if (pci_is_express(dev)) {
1836 pcie_cap_deverr_reset(dev);
1837 pcie_cap_lnkctl_reset(dev);
1839 pci_set_word(dev->config + dev->exp.pm_cap + PCI_PM_CTRL, 0);
1843 static Property virtio_pci_properties[] = {
1844 DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags,
1845 VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false),
1846 DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags,
1847 VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true),
1848 DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags,
1849 VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false),
1850 DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags,
1851 VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false),
1852 DEFINE_PROP_BIT("page-per-vq", VirtIOPCIProxy, flags,
1853 VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT, false),
1854 DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy,
1855 ignore_backend_features, false),
1856 DEFINE_PROP_BIT("ats", VirtIOPCIProxy, flags,
1857 VIRTIO_PCI_FLAG_ATS_BIT, false),
1858 DEFINE_PROP_BIT("x-pcie-deverr-init", VirtIOPCIProxy, flags,
1859 VIRTIO_PCI_FLAG_INIT_DEVERR_BIT, true),
1860 DEFINE_PROP_BIT("x-pcie-lnkctl-init", VirtIOPCIProxy, flags,
1861 VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT, true),
1862 DEFINE_PROP_BIT("x-pcie-pm-init", VirtIOPCIProxy, flags,
1863 VIRTIO_PCI_FLAG_INIT_PM_BIT, true),
1864 DEFINE_PROP_BIT("x-pcie-flr-init", VirtIOPCIProxy, flags,
1865 VIRTIO_PCI_FLAG_INIT_FLR_BIT, true),
1866 DEFINE_PROP_END_OF_LIST(),
1869 static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp)
1871 VirtioPCIClass *vpciklass = VIRTIO_PCI_GET_CLASS(qdev);
1872 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
1873 PCIDevice *pci_dev = &proxy->pci_dev;
1875 if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) &&
1876 virtio_pci_modern(proxy)) {
1877 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
1880 vpciklass->parent_dc_realize(qdev, errp);
1883 static void virtio_pci_class_init(ObjectClass *klass, void *data)
1885 DeviceClass *dc = DEVICE_CLASS(klass);
1886 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1887 VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass);
1889 device_class_set_props(dc, virtio_pci_properties);
1890 k->realize = virtio_pci_realize;
1891 k->exit = virtio_pci_exit;
1892 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1893 k->revision = VIRTIO_PCI_ABI_VERSION;
1894 k->class_id = PCI_CLASS_OTHERS;
1895 device_class_set_parent_realize(dc, virtio_pci_dc_realize,
1896 &vpciklass->parent_dc_realize);
1897 dc->reset = virtio_pci_reset;
1900 static const TypeInfo virtio_pci_info = {
1901 .name = TYPE_VIRTIO_PCI,
1902 .parent = TYPE_PCI_DEVICE,
1903 .instance_size = sizeof(VirtIOPCIProxy),
1904 .class_init = virtio_pci_class_init,
1905 .class_size = sizeof(VirtioPCIClass),
1906 .abstract = true,
1909 static Property virtio_pci_generic_properties[] = {
1910 DEFINE_PROP_ON_OFF_AUTO("disable-legacy", VirtIOPCIProxy, disable_legacy,
1911 ON_OFF_AUTO_AUTO),
1912 DEFINE_PROP_BOOL("disable-modern", VirtIOPCIProxy, disable_modern, false),
1913 DEFINE_PROP_END_OF_LIST(),
1916 static void virtio_pci_base_class_init(ObjectClass *klass, void *data)
1918 const VirtioPCIDeviceTypeInfo *t = data;
1919 if (t->class_init) {
1920 t->class_init(klass, NULL);
1924 static void virtio_pci_generic_class_init(ObjectClass *klass, void *data)
1926 DeviceClass *dc = DEVICE_CLASS(klass);
1928 device_class_set_props(dc, virtio_pci_generic_properties);
1931 static void virtio_pci_transitional_instance_init(Object *obj)
1933 VirtIOPCIProxy *proxy = VIRTIO_PCI(obj);
1935 proxy->disable_legacy = ON_OFF_AUTO_OFF;
1936 proxy->disable_modern = false;
1939 static void virtio_pci_non_transitional_instance_init(Object *obj)
1941 VirtIOPCIProxy *proxy = VIRTIO_PCI(obj);
1943 proxy->disable_legacy = ON_OFF_AUTO_ON;
1944 proxy->disable_modern = false;
1947 void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t)
1949 char *base_name = NULL;
1950 TypeInfo base_type_info = {
1951 .name = t->base_name,
1952 .parent = t->parent ? t->parent : TYPE_VIRTIO_PCI,
1953 .instance_size = t->instance_size,
1954 .instance_init = t->instance_init,
1955 .class_size = t->class_size,
1956 .abstract = true,
1957 .interfaces = t->interfaces,
1959 TypeInfo generic_type_info = {
1960 .name = t->generic_name,
1961 .parent = base_type_info.name,
1962 .class_init = virtio_pci_generic_class_init,
1963 .interfaces = (InterfaceInfo[]) {
1964 { INTERFACE_PCIE_DEVICE },
1965 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
1970 if (!base_type_info.name) {
1971 /* No base type -> register a single generic device type */
1972 /* use intermediate %s-base-type to add generic device props */
1973 base_name = g_strdup_printf("%s-base-type", t->generic_name);
1974 base_type_info.name = base_name;
1975 base_type_info.class_init = virtio_pci_generic_class_init;
1977 generic_type_info.parent = base_name;
1978 generic_type_info.class_init = virtio_pci_base_class_init;
1979 generic_type_info.class_data = (void *)t;
1981 assert(!t->non_transitional_name);
1982 assert(!t->transitional_name);
1983 } else {
1984 base_type_info.class_init = virtio_pci_base_class_init;
1985 base_type_info.class_data = (void *)t;
1988 type_register(&base_type_info);
1989 if (generic_type_info.name) {
1990 type_register(&generic_type_info);
1993 if (t->non_transitional_name) {
1994 const TypeInfo non_transitional_type_info = {
1995 .name = t->non_transitional_name,
1996 .parent = base_type_info.name,
1997 .instance_init = virtio_pci_non_transitional_instance_init,
1998 .interfaces = (InterfaceInfo[]) {
1999 { INTERFACE_PCIE_DEVICE },
2000 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
2004 type_register(&non_transitional_type_info);
2007 if (t->transitional_name) {
2008 const TypeInfo transitional_type_info = {
2009 .name = t->transitional_name,
2010 .parent = base_type_info.name,
2011 .instance_init = virtio_pci_transitional_instance_init,
2012 .interfaces = (InterfaceInfo[]) {
2014 * Transitional virtio devices work only as Conventional PCI
2015 * devices because they require PIO ports.
2017 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
2021 type_register(&transitional_type_info);
2023 g_free(base_name);
2026 /* virtio-pci-bus */
2028 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
2029 VirtIOPCIProxy *dev)
2031 DeviceState *qdev = DEVICE(dev);
2032 char virtio_bus_name[] = "virtio-bus";
2034 qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev,
2035 virtio_bus_name);
2038 static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
2040 BusClass *bus_class = BUS_CLASS(klass);
2041 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
2042 bus_class->max_dev = 1;
2043 k->notify = virtio_pci_notify;
2044 k->save_config = virtio_pci_save_config;
2045 k->load_config = virtio_pci_load_config;
2046 k->save_queue = virtio_pci_save_queue;
2047 k->load_queue = virtio_pci_load_queue;
2048 k->save_extra_state = virtio_pci_save_extra_state;
2049 k->load_extra_state = virtio_pci_load_extra_state;
2050 k->has_extra_state = virtio_pci_has_extra_state;
2051 k->query_guest_notifiers = virtio_pci_query_guest_notifiers;
2052 k->set_guest_notifiers = virtio_pci_set_guest_notifiers;
2053 k->set_host_notifier_mr = virtio_pci_set_host_notifier_mr;
2054 k->vmstate_change = virtio_pci_vmstate_change;
2055 k->pre_plugged = virtio_pci_pre_plugged;
2056 k->device_plugged = virtio_pci_device_plugged;
2057 k->device_unplugged = virtio_pci_device_unplugged;
2058 k->query_nvectors = virtio_pci_query_nvectors;
2059 k->ioeventfd_enabled = virtio_pci_ioeventfd_enabled;
2060 k->ioeventfd_assign = virtio_pci_ioeventfd_assign;
2061 k->get_dma_as = virtio_pci_get_dma_as;
2064 static const TypeInfo virtio_pci_bus_info = {
2065 .name = TYPE_VIRTIO_PCI_BUS,
2066 .parent = TYPE_VIRTIO_BUS,
2067 .instance_size = sizeof(VirtioPCIBusState),
2068 .class_init = virtio_pci_bus_class_init,
2071 static void virtio_pci_register_types(void)
2073 /* Base types: */
2074 type_register_static(&virtio_pci_bus_info);
2075 type_register_static(&virtio_pci_info);
2078 type_init(virtio_pci_register_types)