4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2009 CodeSourcery
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paul Brook <paul@codesourcery.com>
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
18 #include "qemu/osdep.h"
20 #include "exec/memop.h"
21 #include "standard-headers/linux/virtio_pci.h"
22 #include "hw/boards.h"
23 #include "hw/virtio/virtio.h"
24 #include "migration/qemu-file-types.h"
25 #include "hw/pci/pci.h"
26 #include "hw/pci/pci_bus.h"
27 #include "hw/qdev-properties.h"
28 #include "qapi/error.h"
29 #include "qemu/error-report.h"
31 #include "qemu/module.h"
32 #include "hw/pci/msi.h"
33 #include "hw/pci/msix.h"
34 #include "hw/loader.h"
35 #include "sysemu/kvm.h"
36 #include "hw/virtio/virtio-pci.h"
37 #include "qemu/range.h"
38 #include "hw/virtio/virtio-bus.h"
39 #include "qapi/visitor.h"
40 #include "sysemu/replay.h"
43 #define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev))
45 #undef VIRTIO_PCI_CONFIG
47 /* The remaining space is defined by each driver as the per-driver
48 * configuration space */
49 #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
51 static void virtio_pci_bus_new(VirtioBusState
*bus
, size_t bus_size
,
53 static void virtio_pci_reset(DeviceState
*qdev
);
56 /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
57 static inline VirtIOPCIProxy
*to_virtio_pci_proxy(DeviceState
*d
)
59 return container_of(d
, VirtIOPCIProxy
, pci_dev
.qdev
);
62 /* DeviceState to VirtIOPCIProxy. Note: used on datapath,
63 * be careful and test performance if you change this.
65 static inline VirtIOPCIProxy
*to_virtio_pci_proxy_fast(DeviceState
*d
)
67 return container_of(d
, VirtIOPCIProxy
, pci_dev
.qdev
);
70 static void virtio_pci_notify(DeviceState
*d
, uint16_t vector
)
72 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy_fast(d
);
74 if (msix_enabled(&proxy
->pci_dev
))
75 msix_notify(&proxy
->pci_dev
, vector
);
77 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
78 pci_set_irq(&proxy
->pci_dev
, qatomic_read(&vdev
->isr
) & 1);
82 static void virtio_pci_save_config(DeviceState
*d
, QEMUFile
*f
)
84 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
85 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
87 pci_device_save(&proxy
->pci_dev
, f
);
88 msix_save(&proxy
->pci_dev
, f
);
89 if (msix_present(&proxy
->pci_dev
))
90 qemu_put_be16(f
, vdev
->config_vector
);
93 static const VMStateDescription vmstate_virtio_pci_modern_queue_state
= {
94 .name
= "virtio_pci/modern_queue_state",
96 .minimum_version_id
= 1,
97 .fields
= (VMStateField
[]) {
98 VMSTATE_UINT16(num
, VirtIOPCIQueue
),
99 VMSTATE_UNUSED(1), /* enabled was stored as be16 */
100 VMSTATE_BOOL(enabled
, VirtIOPCIQueue
),
101 VMSTATE_UINT32_ARRAY(desc
, VirtIOPCIQueue
, 2),
102 VMSTATE_UINT32_ARRAY(avail
, VirtIOPCIQueue
, 2),
103 VMSTATE_UINT32_ARRAY(used
, VirtIOPCIQueue
, 2),
104 VMSTATE_END_OF_LIST()
108 static bool virtio_pci_modern_state_needed(void *opaque
)
110 VirtIOPCIProxy
*proxy
= opaque
;
112 return virtio_pci_modern(proxy
);
115 static const VMStateDescription vmstate_virtio_pci_modern_state_sub
= {
116 .name
= "virtio_pci/modern_state",
118 .minimum_version_id
= 1,
119 .needed
= &virtio_pci_modern_state_needed
,
120 .fields
= (VMStateField
[]) {
121 VMSTATE_UINT32(dfselect
, VirtIOPCIProxy
),
122 VMSTATE_UINT32(gfselect
, VirtIOPCIProxy
),
123 VMSTATE_UINT32_ARRAY(guest_features
, VirtIOPCIProxy
, 2),
124 VMSTATE_STRUCT_ARRAY(vqs
, VirtIOPCIProxy
, VIRTIO_QUEUE_MAX
, 0,
125 vmstate_virtio_pci_modern_queue_state
,
127 VMSTATE_END_OF_LIST()
131 static const VMStateDescription vmstate_virtio_pci
= {
132 .name
= "virtio_pci",
134 .minimum_version_id
= 1,
135 .fields
= (VMStateField
[]) {
136 VMSTATE_END_OF_LIST()
138 .subsections
= (const VMStateDescription
*[]) {
139 &vmstate_virtio_pci_modern_state_sub
,
144 static bool virtio_pci_has_extra_state(DeviceState
*d
)
146 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
148 return proxy
->flags
& VIRTIO_PCI_FLAG_MIGRATE_EXTRA
;
151 static void virtio_pci_save_extra_state(DeviceState
*d
, QEMUFile
*f
)
153 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
155 vmstate_save_state(f
, &vmstate_virtio_pci
, proxy
, NULL
);
158 static int virtio_pci_load_extra_state(DeviceState
*d
, QEMUFile
*f
)
160 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
162 return vmstate_load_state(f
, &vmstate_virtio_pci
, proxy
, 1);
165 static void virtio_pci_save_queue(DeviceState
*d
, int n
, QEMUFile
*f
)
167 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
168 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
170 if (msix_present(&proxy
->pci_dev
))
171 qemu_put_be16(f
, virtio_queue_vector(vdev
, n
));
174 static int virtio_pci_load_config(DeviceState
*d
, QEMUFile
*f
)
176 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
177 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
180 ret
= pci_device_load(&proxy
->pci_dev
, f
);
184 msix_unuse_all_vectors(&proxy
->pci_dev
);
185 msix_load(&proxy
->pci_dev
, f
);
186 if (msix_present(&proxy
->pci_dev
)) {
187 qemu_get_be16s(f
, &vdev
->config_vector
);
189 vdev
->config_vector
= VIRTIO_NO_VECTOR
;
191 if (vdev
->config_vector
!= VIRTIO_NO_VECTOR
) {
192 return msix_vector_use(&proxy
->pci_dev
, vdev
->config_vector
);
197 static int virtio_pci_load_queue(DeviceState
*d
, int n
, QEMUFile
*f
)
199 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
200 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
203 if (msix_present(&proxy
->pci_dev
)) {
204 qemu_get_be16s(f
, &vector
);
206 vector
= VIRTIO_NO_VECTOR
;
208 virtio_queue_set_vector(vdev
, n
, vector
);
209 if (vector
!= VIRTIO_NO_VECTOR
) {
210 return msix_vector_use(&proxy
->pci_dev
, vector
);
216 static bool virtio_pci_ioeventfd_enabled(DeviceState
*d
)
218 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
220 return (proxy
->flags
& VIRTIO_PCI_FLAG_USE_IOEVENTFD
) != 0;
223 #define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000
225 static inline int virtio_pci_queue_mem_mult(struct VirtIOPCIProxy
*proxy
)
227 return (proxy
->flags
& VIRTIO_PCI_FLAG_PAGE_PER_VQ
) ?
228 QEMU_VIRTIO_PCI_QUEUE_MEM_MULT
: 4;
231 static int virtio_pci_ioeventfd_assign(DeviceState
*d
, EventNotifier
*notifier
,
234 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
235 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
236 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
237 bool legacy
= virtio_pci_legacy(proxy
);
238 bool modern
= virtio_pci_modern(proxy
);
239 bool fast_mmio
= kvm_ioeventfd_any_length_enabled();
240 bool modern_pio
= proxy
->flags
& VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY
;
241 MemoryRegion
*modern_mr
= &proxy
->notify
.mr
;
242 MemoryRegion
*modern_notify_mr
= &proxy
->notify_pio
.mr
;
243 MemoryRegion
*legacy_mr
= &proxy
->bar
;
244 hwaddr modern_addr
= virtio_pci_queue_mem_mult(proxy
) *
245 virtio_get_queue_index(vq
);
246 hwaddr legacy_addr
= VIRTIO_PCI_QUEUE_NOTIFY
;
251 memory_region_add_eventfd(modern_mr
, modern_addr
, 0,
254 memory_region_add_eventfd(modern_mr
, modern_addr
, 2,
258 memory_region_add_eventfd(modern_notify_mr
, 0, 2,
263 memory_region_add_eventfd(legacy_mr
, legacy_addr
, 2,
269 memory_region_del_eventfd(modern_mr
, modern_addr
, 0,
272 memory_region_del_eventfd(modern_mr
, modern_addr
, 2,
276 memory_region_del_eventfd(modern_notify_mr
, 0, 2,
281 memory_region_del_eventfd(legacy_mr
, legacy_addr
, 2,
288 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy
*proxy
)
290 virtio_bus_start_ioeventfd(&proxy
->bus
);
293 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy
*proxy
)
295 virtio_bus_stop_ioeventfd(&proxy
->bus
);
298 static void virtio_ioport_write(void *opaque
, uint32_t addr
, uint32_t val
)
300 VirtIOPCIProxy
*proxy
= opaque
;
301 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
305 case VIRTIO_PCI_GUEST_FEATURES
:
306 /* Guest does not negotiate properly? We have to assume nothing. */
307 if (val
& (1 << VIRTIO_F_BAD_FEATURE
)) {
308 val
= virtio_bus_get_vdev_bad_features(&proxy
->bus
);
310 virtio_set_features(vdev
, val
);
312 case VIRTIO_PCI_QUEUE_PFN
:
313 pa
= (hwaddr
)val
<< VIRTIO_PCI_QUEUE_ADDR_SHIFT
;
315 virtio_pci_reset(DEVICE(proxy
));
318 virtio_queue_set_addr(vdev
, vdev
->queue_sel
, pa
);
320 case VIRTIO_PCI_QUEUE_SEL
:
321 if (val
< VIRTIO_QUEUE_MAX
)
322 vdev
->queue_sel
= val
;
324 case VIRTIO_PCI_QUEUE_NOTIFY
:
325 if (val
< VIRTIO_QUEUE_MAX
) {
326 virtio_queue_notify(vdev
, val
);
329 case VIRTIO_PCI_STATUS
:
330 if (!(val
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
331 virtio_pci_stop_ioeventfd(proxy
);
334 virtio_set_status(vdev
, val
& 0xFF);
336 if (val
& VIRTIO_CONFIG_S_DRIVER_OK
) {
337 virtio_pci_start_ioeventfd(proxy
);
340 if (vdev
->status
== 0) {
341 virtio_pci_reset(DEVICE(proxy
));
344 /* Linux before 2.6.34 drives the device without enabling
345 the PCI device bus master bit. Enable it automatically
346 for the guest. This is a PCI spec violation but so is
347 initiating DMA with bus master bit clear. */
348 if (val
== (VIRTIO_CONFIG_S_ACKNOWLEDGE
| VIRTIO_CONFIG_S_DRIVER
)) {
349 pci_default_write_config(&proxy
->pci_dev
, PCI_COMMAND
,
350 proxy
->pci_dev
.config
[PCI_COMMAND
] |
351 PCI_COMMAND_MASTER
, 1);
354 case VIRTIO_MSI_CONFIG_VECTOR
:
355 msix_vector_unuse(&proxy
->pci_dev
, vdev
->config_vector
);
356 /* Make it possible for guest to discover an error took place. */
357 if (msix_vector_use(&proxy
->pci_dev
, val
) < 0)
358 val
= VIRTIO_NO_VECTOR
;
359 vdev
->config_vector
= val
;
361 case VIRTIO_MSI_QUEUE_VECTOR
:
362 msix_vector_unuse(&proxy
->pci_dev
,
363 virtio_queue_vector(vdev
, vdev
->queue_sel
));
364 /* Make it possible for guest to discover an error took place. */
365 if (msix_vector_use(&proxy
->pci_dev
, val
) < 0)
366 val
= VIRTIO_NO_VECTOR
;
367 virtio_queue_set_vector(vdev
, vdev
->queue_sel
, val
);
370 qemu_log_mask(LOG_GUEST_ERROR
,
371 "%s: unexpected address 0x%x value 0x%x\n",
372 __func__
, addr
, val
);
377 static uint32_t virtio_ioport_read(VirtIOPCIProxy
*proxy
, uint32_t addr
)
379 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
380 uint32_t ret
= 0xFFFFFFFF;
383 case VIRTIO_PCI_HOST_FEATURES
:
384 ret
= vdev
->host_features
;
386 case VIRTIO_PCI_GUEST_FEATURES
:
387 ret
= vdev
->guest_features
;
389 case VIRTIO_PCI_QUEUE_PFN
:
390 ret
= virtio_queue_get_addr(vdev
, vdev
->queue_sel
)
391 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT
;
393 case VIRTIO_PCI_QUEUE_NUM
:
394 ret
= virtio_queue_get_num(vdev
, vdev
->queue_sel
);
396 case VIRTIO_PCI_QUEUE_SEL
:
397 ret
= vdev
->queue_sel
;
399 case VIRTIO_PCI_STATUS
:
403 /* reading from the ISR also clears it. */
404 ret
= qatomic_xchg(&vdev
->isr
, 0);
405 pci_irq_deassert(&proxy
->pci_dev
);
407 case VIRTIO_MSI_CONFIG_VECTOR
:
408 ret
= vdev
->config_vector
;
410 case VIRTIO_MSI_QUEUE_VECTOR
:
411 ret
= virtio_queue_vector(vdev
, vdev
->queue_sel
);
420 static uint64_t virtio_pci_config_read(void *opaque
, hwaddr addr
,
423 VirtIOPCIProxy
*proxy
= opaque
;
424 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
425 uint32_t config
= VIRTIO_PCI_CONFIG_SIZE(&proxy
->pci_dev
);
433 return virtio_ioport_read(proxy
, addr
);
439 val
= virtio_config_readb(vdev
, addr
);
442 val
= virtio_config_readw(vdev
, addr
);
443 if (virtio_is_big_endian(vdev
)) {
448 val
= virtio_config_readl(vdev
, addr
);
449 if (virtio_is_big_endian(vdev
)) {
457 static void virtio_pci_config_write(void *opaque
, hwaddr addr
,
458 uint64_t val
, unsigned size
)
460 VirtIOPCIProxy
*proxy
= opaque
;
461 uint32_t config
= VIRTIO_PCI_CONFIG_SIZE(&proxy
->pci_dev
);
462 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
469 virtio_ioport_write(proxy
, addr
, val
);
474 * Virtio-PCI is odd. Ioports are LE but config space is target native
479 virtio_config_writeb(vdev
, addr
, val
);
482 if (virtio_is_big_endian(vdev
)) {
485 virtio_config_writew(vdev
, addr
, val
);
488 if (virtio_is_big_endian(vdev
)) {
491 virtio_config_writel(vdev
, addr
, val
);
496 static const MemoryRegionOps virtio_pci_config_ops
= {
497 .read
= virtio_pci_config_read
,
498 .write
= virtio_pci_config_write
,
500 .min_access_size
= 1,
501 .max_access_size
= 4,
503 .endianness
= DEVICE_LITTLE_ENDIAN
,
506 static MemoryRegion
*virtio_address_space_lookup(VirtIOPCIProxy
*proxy
,
507 hwaddr
*off
, int len
)
510 VirtIOPCIRegion
*reg
;
512 for (i
= 0; i
< ARRAY_SIZE(proxy
->regs
); ++i
) {
513 reg
= &proxy
->regs
[i
];
514 if (*off
>= reg
->offset
&&
515 *off
+ len
<= reg
->offset
+ reg
->size
) {
524 /* Below are generic functions to do memcpy from/to an address space,
525 * without byteswaps, with input validation.
527 * As regular address_space_* APIs all do some kind of byteswap at least for
528 * some host/target combinations, we are forced to explicitly convert to a
529 * known-endianness integer value.
530 * It doesn't really matter which endian format to go through, so the code
531 * below selects the endian that causes the least amount of work on the given
534 * Note: host pointer must be aligned.
537 void virtio_address_space_write(VirtIOPCIProxy
*proxy
, hwaddr addr
,
538 const uint8_t *buf
, int len
)
543 /* address_space_* APIs assume an aligned address.
544 * As address is under guest control, handle illegal values.
548 mr
= virtio_address_space_lookup(proxy
, &addr
, len
);
553 /* Make sure caller aligned buf properly */
554 assert(!(((uintptr_t)buf
) & (len
- 1)));
558 val
= pci_get_byte(buf
);
561 val
= pci_get_word(buf
);
564 val
= pci_get_long(buf
);
567 /* As length is under guest control, handle illegal values. */
570 memory_region_dispatch_write(mr
, addr
, val
, size_memop(len
) | MO_LE
,
571 MEMTXATTRS_UNSPECIFIED
);
575 virtio_address_space_read(VirtIOPCIProxy
*proxy
, hwaddr addr
,
576 uint8_t *buf
, int len
)
581 /* address_space_* APIs assume an aligned address.
582 * As address is under guest control, handle illegal values.
586 mr
= virtio_address_space_lookup(proxy
, &addr
, len
);
591 /* Make sure caller aligned buf properly */
592 assert(!(((uintptr_t)buf
) & (len
- 1)));
594 memory_region_dispatch_read(mr
, addr
, &val
, size_memop(len
) | MO_LE
,
595 MEMTXATTRS_UNSPECIFIED
);
598 pci_set_byte(buf
, val
);
601 pci_set_word(buf
, val
);
604 pci_set_long(buf
, val
);
607 /* As length is under guest control, handle illegal values. */
612 static void virtio_write_config(PCIDevice
*pci_dev
, uint32_t address
,
613 uint32_t val
, int len
)
615 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(pci_dev
);
616 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
617 struct virtio_pci_cfg_cap
*cfg
;
619 pci_default_write_config(pci_dev
, address
, val
, len
);
621 if (proxy
->flags
& VIRTIO_PCI_FLAG_INIT_FLR
) {
622 pcie_cap_flr_write_config(pci_dev
, address
, val
, len
);
625 if (range_covers_byte(address
, len
, PCI_COMMAND
)) {
626 if (!(pci_dev
->config
[PCI_COMMAND
] & PCI_COMMAND_MASTER
)) {
627 virtio_set_disabled(vdev
, true);
628 virtio_pci_stop_ioeventfd(proxy
);
629 virtio_set_status(vdev
, vdev
->status
& ~VIRTIO_CONFIG_S_DRIVER_OK
);
631 virtio_set_disabled(vdev
, false);
635 if (proxy
->config_cap
&&
636 ranges_overlap(address
, len
, proxy
->config_cap
+ offsetof(struct virtio_pci_cfg_cap
,
638 sizeof cfg
->pci_cfg_data
)) {
642 cfg
= (void *)(proxy
->pci_dev
.config
+ proxy
->config_cap
);
643 off
= le32_to_cpu(cfg
->cap
.offset
);
644 len
= le32_to_cpu(cfg
->cap
.length
);
646 if (len
== 1 || len
== 2 || len
== 4) {
647 assert(len
<= sizeof cfg
->pci_cfg_data
);
648 virtio_address_space_write(proxy
, off
, cfg
->pci_cfg_data
, len
);
653 static uint32_t virtio_read_config(PCIDevice
*pci_dev
,
654 uint32_t address
, int len
)
656 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(pci_dev
);
657 struct virtio_pci_cfg_cap
*cfg
;
659 if (proxy
->config_cap
&&
660 ranges_overlap(address
, len
, proxy
->config_cap
+ offsetof(struct virtio_pci_cfg_cap
,
662 sizeof cfg
->pci_cfg_data
)) {
666 cfg
= (void *)(proxy
->pci_dev
.config
+ proxy
->config_cap
);
667 off
= le32_to_cpu(cfg
->cap
.offset
);
668 len
= le32_to_cpu(cfg
->cap
.length
);
670 if (len
== 1 || len
== 2 || len
== 4) {
671 assert(len
<= sizeof cfg
->pci_cfg_data
);
672 virtio_address_space_read(proxy
, off
, cfg
->pci_cfg_data
, len
);
676 return pci_default_read_config(pci_dev
, address
, len
);
679 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy
*proxy
,
680 unsigned int queue_no
,
683 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
686 if (irqfd
->users
== 0) {
687 KVMRouteChange c
= kvm_irqchip_begin_route_changes(kvm_state
);
688 ret
= kvm_irqchip_add_msi_route(&c
, vector
, &proxy
->pci_dev
);
692 kvm_irqchip_commit_route_changes(&c
);
699 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy
*proxy
,
702 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
703 if (--irqfd
->users
== 0) {
704 kvm_irqchip_release_virq(kvm_state
, irqfd
->virq
);
708 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy
*proxy
,
709 unsigned int queue_no
,
712 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
713 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
714 VirtQueue
*vq
= virtio_get_queue(vdev
, queue_no
);
715 EventNotifier
*n
= virtio_queue_get_guest_notifier(vq
);
716 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state
, n
, NULL
, irqfd
->virq
);
719 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy
*proxy
,
720 unsigned int queue_no
,
723 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
724 VirtQueue
*vq
= virtio_get_queue(vdev
, queue_no
);
725 EventNotifier
*n
= virtio_queue_get_guest_notifier(vq
);
726 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
729 ret
= kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state
, n
, irqfd
->virq
);
733 static int kvm_virtio_pci_vector_use(VirtIOPCIProxy
*proxy
, int nvqs
)
735 PCIDevice
*dev
= &proxy
->pci_dev
;
736 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
737 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
741 for (queue_no
= 0; queue_no
< nvqs
; queue_no
++) {
742 if (!virtio_queue_get_num(vdev
, queue_no
)) {
745 vector
= virtio_queue_vector(vdev
, queue_no
);
746 if (vector
>= msix_nr_vectors_allocated(dev
)) {
749 ret
= kvm_virtio_pci_vq_vector_use(proxy
, queue_no
, vector
);
753 /* If guest supports masking, set up irqfd now.
754 * Otherwise, delay until unmasked in the frontend.
756 if (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
) {
757 ret
= kvm_virtio_pci_irqfd_use(proxy
, queue_no
, vector
);
759 kvm_virtio_pci_vq_vector_release(proxy
, vector
);
767 while (--queue_no
>= 0) {
768 vector
= virtio_queue_vector(vdev
, queue_no
);
769 if (vector
>= msix_nr_vectors_allocated(dev
)) {
772 if (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
) {
773 kvm_virtio_pci_irqfd_release(proxy
, queue_no
, vector
);
775 kvm_virtio_pci_vq_vector_release(proxy
, vector
);
780 static void kvm_virtio_pci_vector_release(VirtIOPCIProxy
*proxy
, int nvqs
)
782 PCIDevice
*dev
= &proxy
->pci_dev
;
783 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
786 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
788 for (queue_no
= 0; queue_no
< nvqs
; queue_no
++) {
789 if (!virtio_queue_get_num(vdev
, queue_no
)) {
792 vector
= virtio_queue_vector(vdev
, queue_no
);
793 if (vector
>= msix_nr_vectors_allocated(dev
)) {
796 /* If guest supports masking, clean up irqfd now.
797 * Otherwise, it was cleaned when masked in the frontend.
799 if (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
) {
800 kvm_virtio_pci_irqfd_release(proxy
, queue_no
, vector
);
802 kvm_virtio_pci_vq_vector_release(proxy
, vector
);
806 static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy
*proxy
,
807 unsigned int queue_no
,
811 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
812 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
813 VirtQueue
*vq
= virtio_get_queue(vdev
, queue_no
);
814 EventNotifier
*n
= virtio_queue_get_guest_notifier(vq
);
818 if (proxy
->vector_irqfd
) {
819 irqfd
= &proxy
->vector_irqfd
[vector
];
820 if (irqfd
->msg
.data
!= msg
.data
|| irqfd
->msg
.address
!= msg
.address
) {
821 ret
= kvm_irqchip_update_msi_route(kvm_state
, irqfd
->virq
, msg
,
826 kvm_irqchip_commit_routes(kvm_state
);
830 /* If guest supports masking, irqfd is already setup, unmask it.
831 * Otherwise, set it up now.
833 if (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
) {
834 k
->guest_notifier_mask(vdev
, queue_no
, false);
835 /* Test after unmasking to avoid losing events. */
836 if (k
->guest_notifier_pending
&&
837 k
->guest_notifier_pending(vdev
, queue_no
)) {
838 event_notifier_set(n
);
841 ret
= kvm_virtio_pci_irqfd_use(proxy
, queue_no
, vector
);
846 static void virtio_pci_vq_vector_mask(VirtIOPCIProxy
*proxy
,
847 unsigned int queue_no
,
850 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
851 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
853 /* If guest supports masking, keep irqfd but mask it.
854 * Otherwise, clean it up now.
856 if (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
) {
857 k
->guest_notifier_mask(vdev
, queue_no
, true);
859 kvm_virtio_pci_irqfd_release(proxy
, queue_no
, vector
);
863 static int virtio_pci_vector_unmask(PCIDevice
*dev
, unsigned vector
,
866 VirtIOPCIProxy
*proxy
= container_of(dev
, VirtIOPCIProxy
, pci_dev
);
867 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
868 VirtQueue
*vq
= virtio_vector_first_queue(vdev
, vector
);
869 int ret
, index
, unmasked
= 0;
872 index
= virtio_get_queue_index(vq
);
873 if (!virtio_queue_get_num(vdev
, index
)) {
876 if (index
< proxy
->nvqs_with_notifiers
) {
877 ret
= virtio_pci_vq_vector_unmask(proxy
, index
, vector
, msg
);
883 vq
= virtio_vector_next_queue(vq
);
889 vq
= virtio_vector_first_queue(vdev
, vector
);
890 while (vq
&& unmasked
>= 0) {
891 index
= virtio_get_queue_index(vq
);
892 if (index
< proxy
->nvqs_with_notifiers
) {
893 virtio_pci_vq_vector_mask(proxy
, index
, vector
);
896 vq
= virtio_vector_next_queue(vq
);
901 static void virtio_pci_vector_mask(PCIDevice
*dev
, unsigned vector
)
903 VirtIOPCIProxy
*proxy
= container_of(dev
, VirtIOPCIProxy
, pci_dev
);
904 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
905 VirtQueue
*vq
= virtio_vector_first_queue(vdev
, vector
);
909 index
= virtio_get_queue_index(vq
);
910 if (!virtio_queue_get_num(vdev
, index
)) {
913 if (index
< proxy
->nvqs_with_notifiers
) {
914 virtio_pci_vq_vector_mask(proxy
, index
, vector
);
916 vq
= virtio_vector_next_queue(vq
);
920 static void virtio_pci_vector_poll(PCIDevice
*dev
,
921 unsigned int vector_start
,
922 unsigned int vector_end
)
924 VirtIOPCIProxy
*proxy
= container_of(dev
, VirtIOPCIProxy
, pci_dev
);
925 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
926 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
929 EventNotifier
*notifier
;
932 for (queue_no
= 0; queue_no
< proxy
->nvqs_with_notifiers
; queue_no
++) {
933 if (!virtio_queue_get_num(vdev
, queue_no
)) {
936 vector
= virtio_queue_vector(vdev
, queue_no
);
937 if (vector
< vector_start
|| vector
>= vector_end
||
938 !msix_is_masked(dev
, vector
)) {
941 vq
= virtio_get_queue(vdev
, queue_no
);
942 notifier
= virtio_queue_get_guest_notifier(vq
);
943 if (k
->guest_notifier_pending
) {
944 if (k
->guest_notifier_pending(vdev
, queue_no
)) {
945 msix_set_pending(dev
, vector
);
947 } else if (event_notifier_test_and_clear(notifier
)) {
948 msix_set_pending(dev
, vector
);
953 static int virtio_pci_set_guest_notifier(DeviceState
*d
, int n
, bool assign
,
956 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
957 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
958 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
959 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
960 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
963 int r
= event_notifier_init(notifier
, 0);
967 virtio_queue_set_guest_notifier_fd_handler(vq
, true, with_irqfd
);
969 virtio_queue_set_guest_notifier_fd_handler(vq
, false, with_irqfd
);
970 event_notifier_cleanup(notifier
);
973 if (!msix_enabled(&proxy
->pci_dev
) &&
974 vdev
->use_guest_notifier_mask
&&
975 vdc
->guest_notifier_mask
) {
976 vdc
->guest_notifier_mask(vdev
, n
, !assign
);
982 static bool virtio_pci_query_guest_notifiers(DeviceState
*d
)
984 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
985 return msix_enabled(&proxy
->pci_dev
);
988 static int virtio_pci_set_guest_notifiers(DeviceState
*d
, int nvqs
, bool assign
)
990 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
991 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
992 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
994 bool with_irqfd
= msix_enabled(&proxy
->pci_dev
) &&
995 kvm_msi_via_irqfd_enabled();
997 nvqs
= MIN(nvqs
, VIRTIO_QUEUE_MAX
);
1000 * When deassigning, pass a consistent nvqs value to avoid leaking
1001 * notifiers. But first check we've actually been configured, exit
1002 * early if we haven't.
1004 if (!assign
&& !proxy
->nvqs_with_notifiers
) {
1007 assert(assign
|| nvqs
== proxy
->nvqs_with_notifiers
);
1009 proxy
->nvqs_with_notifiers
= nvqs
;
1011 /* Must unset vector notifier while guest notifier is still assigned */
1012 if ((proxy
->vector_irqfd
|| k
->guest_notifier_mask
) && !assign
) {
1013 msix_unset_vector_notifiers(&proxy
->pci_dev
);
1014 if (proxy
->vector_irqfd
) {
1015 kvm_virtio_pci_vector_release(proxy
, nvqs
);
1016 g_free(proxy
->vector_irqfd
);
1017 proxy
->vector_irqfd
= NULL
;
1021 for (n
= 0; n
< nvqs
; n
++) {
1022 if (!virtio_queue_get_num(vdev
, n
)) {
1026 r
= virtio_pci_set_guest_notifier(d
, n
, assign
, with_irqfd
);
1032 /* Must set vector notifier after guest notifier has been assigned */
1033 if ((with_irqfd
|| k
->guest_notifier_mask
) && assign
) {
1035 proxy
->vector_irqfd
=
1036 g_malloc0(sizeof(*proxy
->vector_irqfd
) *
1037 msix_nr_vectors_allocated(&proxy
->pci_dev
));
1038 r
= kvm_virtio_pci_vector_use(proxy
, nvqs
);
1043 r
= msix_set_vector_notifiers(&proxy
->pci_dev
,
1044 virtio_pci_vector_unmask
,
1045 virtio_pci_vector_mask
,
1046 virtio_pci_vector_poll
);
1048 goto notifiers_error
;
1057 kvm_virtio_pci_vector_release(proxy
, nvqs
);
1061 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
1064 virtio_pci_set_guest_notifier(d
, n
, !assign
, with_irqfd
);
1069 static int virtio_pci_set_host_notifier_mr(DeviceState
*d
, int n
,
1070 MemoryRegion
*mr
, bool assign
)
1072 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
1075 if (n
>= VIRTIO_QUEUE_MAX
|| !virtio_pci_modern(proxy
) ||
1076 virtio_pci_queue_mem_mult(proxy
) != memory_region_size(mr
)) {
1081 offset
= virtio_pci_queue_mem_mult(proxy
) * n
;
1082 memory_region_add_subregion_overlap(&proxy
->notify
.mr
, offset
, mr
, 1);
1084 memory_region_del_subregion(&proxy
->notify
.mr
, mr
);
1090 static void virtio_pci_vmstate_change(DeviceState
*d
, bool running
)
1092 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
1093 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1096 /* Old QEMU versions did not set bus master enable on status write.
1097 * Detect DRIVER set and enable it.
1099 if ((proxy
->flags
& VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION
) &&
1100 (vdev
->status
& VIRTIO_CONFIG_S_DRIVER
) &&
1101 !(proxy
->pci_dev
.config
[PCI_COMMAND
] & PCI_COMMAND_MASTER
)) {
1102 pci_default_write_config(&proxy
->pci_dev
, PCI_COMMAND
,
1103 proxy
->pci_dev
.config
[PCI_COMMAND
] |
1104 PCI_COMMAND_MASTER
, 1);
1106 virtio_pci_start_ioeventfd(proxy
);
1108 virtio_pci_stop_ioeventfd(proxy
);
1113 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
1116 static int virtio_pci_query_nvectors(DeviceState
*d
)
1118 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1120 return proxy
->nvectors
;
1123 static AddressSpace
*virtio_pci_get_dma_as(DeviceState
*d
)
1125 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1126 PCIDevice
*dev
= &proxy
->pci_dev
;
1128 return pci_get_address_space(dev
);
1131 static bool virtio_pci_iommu_enabled(DeviceState
*d
)
1133 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1134 PCIDevice
*dev
= &proxy
->pci_dev
;
1135 AddressSpace
*dma_as
= pci_device_iommu_address_space(dev
);
1137 if (dma_as
== &address_space_memory
) {
1144 static bool virtio_pci_queue_enabled(DeviceState
*d
, int n
)
1146 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1147 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1149 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
1150 return proxy
->vqs
[n
].enabled
;
1153 return virtio_queue_enabled_legacy(vdev
, n
);
1156 static int virtio_pci_add_mem_cap(VirtIOPCIProxy
*proxy
,
1157 struct virtio_pci_cap
*cap
)
1159 PCIDevice
*dev
= &proxy
->pci_dev
;
1162 offset
= pci_add_capability(dev
, PCI_CAP_ID_VNDR
, 0,
1163 cap
->cap_len
, &error_abort
);
1165 assert(cap
->cap_len
>= sizeof *cap
);
1166 memcpy(dev
->config
+ offset
+ PCI_CAP_FLAGS
, &cap
->cap_len
,
1167 cap
->cap_len
- PCI_CAP_FLAGS
);
1172 static uint64_t virtio_pci_common_read(void *opaque
, hwaddr addr
,
1175 VirtIOPCIProxy
*proxy
= opaque
;
1176 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1185 case VIRTIO_PCI_COMMON_DFSELECT
:
1186 val
= proxy
->dfselect
;
1188 case VIRTIO_PCI_COMMON_DF
:
1189 if (proxy
->dfselect
<= 1) {
1190 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1192 val
= (vdev
->host_features
& ~vdc
->legacy_features
) >>
1193 (32 * proxy
->dfselect
);
1196 case VIRTIO_PCI_COMMON_GFSELECT
:
1197 val
= proxy
->gfselect
;
1199 case VIRTIO_PCI_COMMON_GF
:
1200 if (proxy
->gfselect
< ARRAY_SIZE(proxy
->guest_features
)) {
1201 val
= proxy
->guest_features
[proxy
->gfselect
];
1204 case VIRTIO_PCI_COMMON_MSIX
:
1205 val
= vdev
->config_vector
;
1207 case VIRTIO_PCI_COMMON_NUMQ
:
1208 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; ++i
) {
1209 if (virtio_queue_get_num(vdev
, i
)) {
1214 case VIRTIO_PCI_COMMON_STATUS
:
1217 case VIRTIO_PCI_COMMON_CFGGENERATION
:
1218 val
= vdev
->generation
;
1220 case VIRTIO_PCI_COMMON_Q_SELECT
:
1221 val
= vdev
->queue_sel
;
1223 case VIRTIO_PCI_COMMON_Q_SIZE
:
1224 val
= virtio_queue_get_num(vdev
, vdev
->queue_sel
);
1226 case VIRTIO_PCI_COMMON_Q_MSIX
:
1227 val
= virtio_queue_vector(vdev
, vdev
->queue_sel
);
1229 case VIRTIO_PCI_COMMON_Q_ENABLE
:
1230 val
= proxy
->vqs
[vdev
->queue_sel
].enabled
;
1232 case VIRTIO_PCI_COMMON_Q_NOFF
:
1233 /* Simply map queues in order */
1234 val
= vdev
->queue_sel
;
1236 case VIRTIO_PCI_COMMON_Q_DESCLO
:
1237 val
= proxy
->vqs
[vdev
->queue_sel
].desc
[0];
1239 case VIRTIO_PCI_COMMON_Q_DESCHI
:
1240 val
= proxy
->vqs
[vdev
->queue_sel
].desc
[1];
1242 case VIRTIO_PCI_COMMON_Q_AVAILLO
:
1243 val
= proxy
->vqs
[vdev
->queue_sel
].avail
[0];
1245 case VIRTIO_PCI_COMMON_Q_AVAILHI
:
1246 val
= proxy
->vqs
[vdev
->queue_sel
].avail
[1];
1248 case VIRTIO_PCI_COMMON_Q_USEDLO
:
1249 val
= proxy
->vqs
[vdev
->queue_sel
].used
[0];
1251 case VIRTIO_PCI_COMMON_Q_USEDHI
:
1252 val
= proxy
->vqs
[vdev
->queue_sel
].used
[1];
1261 static void virtio_pci_common_write(void *opaque
, hwaddr addr
,
1262 uint64_t val
, unsigned size
)
1264 VirtIOPCIProxy
*proxy
= opaque
;
1265 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1272 case VIRTIO_PCI_COMMON_DFSELECT
:
1273 proxy
->dfselect
= val
;
1275 case VIRTIO_PCI_COMMON_GFSELECT
:
1276 proxy
->gfselect
= val
;
1278 case VIRTIO_PCI_COMMON_GF
:
1279 if (proxy
->gfselect
< ARRAY_SIZE(proxy
->guest_features
)) {
1280 proxy
->guest_features
[proxy
->gfselect
] = val
;
1281 virtio_set_features(vdev
,
1282 (((uint64_t)proxy
->guest_features
[1]) << 32) |
1283 proxy
->guest_features
[0]);
1286 case VIRTIO_PCI_COMMON_MSIX
:
1287 msix_vector_unuse(&proxy
->pci_dev
, vdev
->config_vector
);
1288 /* Make it possible for guest to discover an error took place. */
1289 if (msix_vector_use(&proxy
->pci_dev
, val
) < 0) {
1290 val
= VIRTIO_NO_VECTOR
;
1292 vdev
->config_vector
= val
;
1294 case VIRTIO_PCI_COMMON_STATUS
:
1295 if (!(val
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1296 virtio_pci_stop_ioeventfd(proxy
);
1299 virtio_set_status(vdev
, val
& 0xFF);
1301 if (val
& VIRTIO_CONFIG_S_DRIVER_OK
) {
1302 virtio_pci_start_ioeventfd(proxy
);
1305 if (vdev
->status
== 0) {
1306 virtio_pci_reset(DEVICE(proxy
));
1310 case VIRTIO_PCI_COMMON_Q_SELECT
:
1311 if (val
< VIRTIO_QUEUE_MAX
) {
1312 vdev
->queue_sel
= val
;
1315 case VIRTIO_PCI_COMMON_Q_SIZE
:
1316 proxy
->vqs
[vdev
->queue_sel
].num
= val
;
1317 virtio_queue_set_num(vdev
, vdev
->queue_sel
,
1318 proxy
->vqs
[vdev
->queue_sel
].num
);
1320 case VIRTIO_PCI_COMMON_Q_MSIX
:
1321 msix_vector_unuse(&proxy
->pci_dev
,
1322 virtio_queue_vector(vdev
, vdev
->queue_sel
));
1323 /* Make it possible for guest to discover an error took place. */
1324 if (msix_vector_use(&proxy
->pci_dev
, val
) < 0) {
1325 val
= VIRTIO_NO_VECTOR
;
1327 virtio_queue_set_vector(vdev
, vdev
->queue_sel
, val
);
1329 case VIRTIO_PCI_COMMON_Q_ENABLE
:
1331 virtio_queue_set_num(vdev
, vdev
->queue_sel
,
1332 proxy
->vqs
[vdev
->queue_sel
].num
);
1333 virtio_queue_set_rings(vdev
, vdev
->queue_sel
,
1334 ((uint64_t)proxy
->vqs
[vdev
->queue_sel
].desc
[1]) << 32 |
1335 proxy
->vqs
[vdev
->queue_sel
].desc
[0],
1336 ((uint64_t)proxy
->vqs
[vdev
->queue_sel
].avail
[1]) << 32 |
1337 proxy
->vqs
[vdev
->queue_sel
].avail
[0],
1338 ((uint64_t)proxy
->vqs
[vdev
->queue_sel
].used
[1]) << 32 |
1339 proxy
->vqs
[vdev
->queue_sel
].used
[0]);
1340 proxy
->vqs
[vdev
->queue_sel
].enabled
= 1;
1342 virtio_error(vdev
, "wrong value for queue_enable %"PRIx64
, val
);
1345 case VIRTIO_PCI_COMMON_Q_DESCLO
:
1346 proxy
->vqs
[vdev
->queue_sel
].desc
[0] = val
;
1348 case VIRTIO_PCI_COMMON_Q_DESCHI
:
1349 proxy
->vqs
[vdev
->queue_sel
].desc
[1] = val
;
1351 case VIRTIO_PCI_COMMON_Q_AVAILLO
:
1352 proxy
->vqs
[vdev
->queue_sel
].avail
[0] = val
;
1354 case VIRTIO_PCI_COMMON_Q_AVAILHI
:
1355 proxy
->vqs
[vdev
->queue_sel
].avail
[1] = val
;
1357 case VIRTIO_PCI_COMMON_Q_USEDLO
:
1358 proxy
->vqs
[vdev
->queue_sel
].used
[0] = val
;
1360 case VIRTIO_PCI_COMMON_Q_USEDHI
:
1361 proxy
->vqs
[vdev
->queue_sel
].used
[1] = val
;
1369 static uint64_t virtio_pci_notify_read(void *opaque
, hwaddr addr
,
1372 VirtIOPCIProxy
*proxy
= opaque
;
1373 if (virtio_bus_get_device(&proxy
->bus
) == NULL
) {
1380 static void virtio_pci_notify_write(void *opaque
, hwaddr addr
,
1381 uint64_t val
, unsigned size
)
1383 VirtIOPCIProxy
*proxy
= opaque
;
1384 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1386 unsigned queue
= addr
/ virtio_pci_queue_mem_mult(proxy
);
1388 if (vdev
!= NULL
&& queue
< VIRTIO_QUEUE_MAX
) {
1389 trace_virtio_pci_notify_write(addr
, val
, size
);
1390 virtio_queue_notify(vdev
, queue
);
1394 static void virtio_pci_notify_write_pio(void *opaque
, hwaddr addr
,
1395 uint64_t val
, unsigned size
)
1397 VirtIOPCIProxy
*proxy
= opaque
;
1398 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1400 unsigned queue
= val
;
1402 if (vdev
!= NULL
&& queue
< VIRTIO_QUEUE_MAX
) {
1403 trace_virtio_pci_notify_write_pio(addr
, val
, size
);
1404 virtio_queue_notify(vdev
, queue
);
1408 static uint64_t virtio_pci_isr_read(void *opaque
, hwaddr addr
,
1411 VirtIOPCIProxy
*proxy
= opaque
;
1412 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1419 val
= qatomic_xchg(&vdev
->isr
, 0);
1420 pci_irq_deassert(&proxy
->pci_dev
);
1424 static void virtio_pci_isr_write(void *opaque
, hwaddr addr
,
1425 uint64_t val
, unsigned size
)
1429 static uint64_t virtio_pci_device_read(void *opaque
, hwaddr addr
,
1432 VirtIOPCIProxy
*proxy
= opaque
;
1433 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1442 val
= virtio_config_modern_readb(vdev
, addr
);
1445 val
= virtio_config_modern_readw(vdev
, addr
);
1448 val
= virtio_config_modern_readl(vdev
, addr
);
1457 static void virtio_pci_device_write(void *opaque
, hwaddr addr
,
1458 uint64_t val
, unsigned size
)
1460 VirtIOPCIProxy
*proxy
= opaque
;
1461 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1469 virtio_config_modern_writeb(vdev
, addr
, val
);
1472 virtio_config_modern_writew(vdev
, addr
, val
);
1475 virtio_config_modern_writel(vdev
, addr
, val
);
1480 static void virtio_pci_modern_regions_init(VirtIOPCIProxy
*proxy
,
1481 const char *vdev_name
)
1483 static const MemoryRegionOps common_ops
= {
1484 .read
= virtio_pci_common_read
,
1485 .write
= virtio_pci_common_write
,
1487 .min_access_size
= 1,
1488 .max_access_size
= 4,
1490 .endianness
= DEVICE_LITTLE_ENDIAN
,
1492 static const MemoryRegionOps isr_ops
= {
1493 .read
= virtio_pci_isr_read
,
1494 .write
= virtio_pci_isr_write
,
1496 .min_access_size
= 1,
1497 .max_access_size
= 4,
1499 .endianness
= DEVICE_LITTLE_ENDIAN
,
1501 static const MemoryRegionOps device_ops
= {
1502 .read
= virtio_pci_device_read
,
1503 .write
= virtio_pci_device_write
,
1505 .min_access_size
= 1,
1506 .max_access_size
= 4,
1508 .endianness
= DEVICE_LITTLE_ENDIAN
,
1510 static const MemoryRegionOps notify_ops
= {
1511 .read
= virtio_pci_notify_read
,
1512 .write
= virtio_pci_notify_write
,
1514 .min_access_size
= 1,
1515 .max_access_size
= 4,
1517 .endianness
= DEVICE_LITTLE_ENDIAN
,
1519 static const MemoryRegionOps notify_pio_ops
= {
1520 .read
= virtio_pci_notify_read
,
1521 .write
= virtio_pci_notify_write_pio
,
1523 .min_access_size
= 1,
1524 .max_access_size
= 4,
1526 .endianness
= DEVICE_LITTLE_ENDIAN
,
1528 g_autoptr(GString
) name
= g_string_new(NULL
);
1530 g_string_printf(name
, "virtio-pci-common-%s", vdev_name
);
1531 memory_region_init_io(&proxy
->common
.mr
, OBJECT(proxy
),
1535 proxy
->common
.size
);
1537 g_string_printf(name
, "virtio-pci-isr-%s", vdev_name
);
1538 memory_region_init_io(&proxy
->isr
.mr
, OBJECT(proxy
),
1544 g_string_printf(name
, "virtio-pci-device-%s", vdev_name
);
1545 memory_region_init_io(&proxy
->device
.mr
, OBJECT(proxy
),
1549 proxy
->device
.size
);
1551 g_string_printf(name
, "virtio-pci-notify-%s", vdev_name
);
1552 memory_region_init_io(&proxy
->notify
.mr
, OBJECT(proxy
),
1556 proxy
->notify
.size
);
1558 g_string_printf(name
, "virtio-pci-notify-pio-%s", vdev_name
);
1559 memory_region_init_io(&proxy
->notify_pio
.mr
, OBJECT(proxy
),
1563 proxy
->notify_pio
.size
);
1566 static void virtio_pci_modern_region_map(VirtIOPCIProxy
*proxy
,
1567 VirtIOPCIRegion
*region
,
1568 struct virtio_pci_cap
*cap
,
1572 memory_region_add_subregion(mr
, region
->offset
, ®ion
->mr
);
1574 cap
->cfg_type
= region
->type
;
1576 cap
->offset
= cpu_to_le32(region
->offset
);
1577 cap
->length
= cpu_to_le32(region
->size
);
1578 virtio_pci_add_mem_cap(proxy
, cap
);
1582 static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy
*proxy
,
1583 VirtIOPCIRegion
*region
,
1584 struct virtio_pci_cap
*cap
)
1586 virtio_pci_modern_region_map(proxy
, region
, cap
,
1587 &proxy
->modern_bar
, proxy
->modern_mem_bar_idx
);
1590 static void virtio_pci_modern_io_region_map(VirtIOPCIProxy
*proxy
,
1591 VirtIOPCIRegion
*region
,
1592 struct virtio_pci_cap
*cap
)
1594 virtio_pci_modern_region_map(proxy
, region
, cap
,
1595 &proxy
->io_bar
, proxy
->modern_io_bar_idx
);
1598 static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy
*proxy
,
1599 VirtIOPCIRegion
*region
)
1601 memory_region_del_subregion(&proxy
->modern_bar
,
1605 static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy
*proxy
,
1606 VirtIOPCIRegion
*region
)
1608 memory_region_del_subregion(&proxy
->io_bar
,
1612 static void virtio_pci_pre_plugged(DeviceState
*d
, Error
**errp
)
1614 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1615 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1617 if (virtio_pci_modern(proxy
)) {
1618 virtio_add_feature(&vdev
->host_features
, VIRTIO_F_VERSION_1
);
1621 virtio_add_feature(&vdev
->host_features
, VIRTIO_F_BAD_FEATURE
);
1624 /* This is called by virtio-bus just after the device is plugged. */
1625 static void virtio_pci_device_plugged(DeviceState
*d
, Error
**errp
)
1627 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1628 VirtioBusState
*bus
= &proxy
->bus
;
1629 bool legacy
= virtio_pci_legacy(proxy
);
1631 bool modern_pio
= proxy
->flags
& VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY
;
1634 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1637 * Virtio capabilities present without
1638 * VIRTIO_F_VERSION_1 confuses guests
1640 if (!proxy
->ignore_backend_features
&&
1641 !virtio_has_feature(vdev
->host_features
, VIRTIO_F_VERSION_1
)) {
1642 virtio_pci_disable_modern(proxy
);
1645 error_setg(errp
, "Device doesn't support modern mode, and legacy"
1646 " mode is disabled");
1647 error_append_hint(errp
, "Set disable-legacy to off\n");
1653 modern
= virtio_pci_modern(proxy
);
1655 config
= proxy
->pci_dev
.config
;
1656 if (proxy
->class_code
) {
1657 pci_config_set_class(config
, proxy
->class_code
);
1661 if (!virtio_legacy_allowed(vdev
)) {
1663 * To avoid migration issues, we allow legacy mode when legacy
1664 * check is disabled in the old machine types (< 5.1).
1666 if (virtio_legacy_check_disabled(vdev
)) {
1667 warn_report("device is modern-only, but for backward "
1668 "compatibility legacy is allowed");
1671 "device is modern-only, use disable-legacy=on");
1675 if (virtio_host_has_feature(vdev
, VIRTIO_F_IOMMU_PLATFORM
)) {
1676 error_setg(errp
, "VIRTIO_F_IOMMU_PLATFORM was supported by"
1677 " neither legacy nor transitional device");
1681 * Legacy and transitional devices use specific subsystem IDs.
1682 * Note that the subsystem vendor ID (config + PCI_SUBSYSTEM_VENDOR_ID)
1683 * is set to PCI_SUBVENDOR_ID_REDHAT_QUMRANET by default.
1685 pci_set_word(config
+ PCI_SUBSYSTEM_ID
, virtio_bus_get_vdev_id(bus
));
1687 /* pure virtio-1.0 */
1688 pci_set_word(config
+ PCI_VENDOR_ID
,
1689 PCI_VENDOR_ID_REDHAT_QUMRANET
);
1690 pci_set_word(config
+ PCI_DEVICE_ID
,
1691 0x1040 + virtio_bus_get_vdev_id(bus
));
1692 pci_config_set_revision(config
, 1);
1694 config
[PCI_INTERRUPT_PIN
] = 1;
1698 struct virtio_pci_cap cap
= {
1699 .cap_len
= sizeof cap
,
1701 struct virtio_pci_notify_cap notify
= {
1702 .cap
.cap_len
= sizeof notify
,
1703 .notify_off_multiplier
=
1704 cpu_to_le32(virtio_pci_queue_mem_mult(proxy
)),
1706 struct virtio_pci_cfg_cap cfg
= {
1707 .cap
.cap_len
= sizeof cfg
,
1708 .cap
.cfg_type
= VIRTIO_PCI_CAP_PCI_CFG
,
1710 struct virtio_pci_notify_cap notify_pio
= {
1711 .cap
.cap_len
= sizeof notify
,
1712 .notify_off_multiplier
= cpu_to_le32(0x0),
1715 struct virtio_pci_cfg_cap
*cfg_mask
;
1717 virtio_pci_modern_regions_init(proxy
, vdev
->name
);
1719 virtio_pci_modern_mem_region_map(proxy
, &proxy
->common
, &cap
);
1720 virtio_pci_modern_mem_region_map(proxy
, &proxy
->isr
, &cap
);
1721 virtio_pci_modern_mem_region_map(proxy
, &proxy
->device
, &cap
);
1722 virtio_pci_modern_mem_region_map(proxy
, &proxy
->notify
, ¬ify
.cap
);
1725 memory_region_init(&proxy
->io_bar
, OBJECT(proxy
),
1726 "virtio-pci-io", 0x4);
1728 pci_register_bar(&proxy
->pci_dev
, proxy
->modern_io_bar_idx
,
1729 PCI_BASE_ADDRESS_SPACE_IO
, &proxy
->io_bar
);
1731 virtio_pci_modern_io_region_map(proxy
, &proxy
->notify_pio
,
1735 pci_register_bar(&proxy
->pci_dev
, proxy
->modern_mem_bar_idx
,
1736 PCI_BASE_ADDRESS_SPACE_MEMORY
|
1737 PCI_BASE_ADDRESS_MEM_PREFETCH
|
1738 PCI_BASE_ADDRESS_MEM_TYPE_64
,
1739 &proxy
->modern_bar
);
1741 proxy
->config_cap
= virtio_pci_add_mem_cap(proxy
, &cfg
.cap
);
1742 cfg_mask
= (void *)(proxy
->pci_dev
.wmask
+ proxy
->config_cap
);
1743 pci_set_byte(&cfg_mask
->cap
.bar
, ~0x0);
1744 pci_set_long((uint8_t *)&cfg_mask
->cap
.offset
, ~0x0);
1745 pci_set_long((uint8_t *)&cfg_mask
->cap
.length
, ~0x0);
1746 pci_set_long(cfg_mask
->pci_cfg_data
, ~0x0);
1749 if (proxy
->nvectors
) {
1750 int err
= msix_init_exclusive_bar(&proxy
->pci_dev
, proxy
->nvectors
,
1751 proxy
->msix_bar_idx
, NULL
);
1753 /* Notice when a system that supports MSIx can't initialize it */
1754 if (err
!= -ENOTSUP
) {
1755 warn_report("unable to init msix vectors to %" PRIu32
,
1758 proxy
->nvectors
= 0;
1762 proxy
->pci_dev
.config_write
= virtio_write_config
;
1763 proxy
->pci_dev
.config_read
= virtio_read_config
;
1766 size
= VIRTIO_PCI_REGION_SIZE(&proxy
->pci_dev
)
1767 + virtio_bus_get_vdev_config_len(bus
);
1768 size
= pow2ceil(size
);
1770 memory_region_init_io(&proxy
->bar
, OBJECT(proxy
),
1771 &virtio_pci_config_ops
,
1772 proxy
, "virtio-pci", size
);
1774 pci_register_bar(&proxy
->pci_dev
, proxy
->legacy_io_bar_idx
,
1775 PCI_BASE_ADDRESS_SPACE_IO
, &proxy
->bar
);
1779 static void virtio_pci_device_unplugged(DeviceState
*d
)
1781 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1782 bool modern
= virtio_pci_modern(proxy
);
1783 bool modern_pio
= proxy
->flags
& VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY
;
1785 virtio_pci_stop_ioeventfd(proxy
);
1788 virtio_pci_modern_mem_region_unmap(proxy
, &proxy
->common
);
1789 virtio_pci_modern_mem_region_unmap(proxy
, &proxy
->isr
);
1790 virtio_pci_modern_mem_region_unmap(proxy
, &proxy
->device
);
1791 virtio_pci_modern_mem_region_unmap(proxy
, &proxy
->notify
);
1793 virtio_pci_modern_io_region_unmap(proxy
, &proxy
->notify_pio
);
1798 static void virtio_pci_realize(PCIDevice
*pci_dev
, Error
**errp
)
1800 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(pci_dev
);
1801 VirtioPCIClass
*k
= VIRTIO_PCI_GET_CLASS(pci_dev
);
1802 bool pcie_port
= pci_bus_is_express(pci_get_bus(pci_dev
)) &&
1803 !pci_bus_is_root(pci_get_bus(pci_dev
));
1805 if (kvm_enabled() && !kvm_has_many_ioeventfds()) {
1806 proxy
->flags
&= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD
;
1809 /* fd-based ioevents can't be synchronized in record/replay */
1810 if (replay_mode
!= REPLAY_MODE_NONE
) {
1811 proxy
->flags
&= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD
;
1815 * virtio pci bar layout used by default.
1816 * subclasses can re-arrange things if needed.
1818 * region 0 -- virtio legacy io bar
1819 * region 1 -- msi-x bar
1820 * region 2 -- virtio modern io bar (off by default)
1821 * region 4+5 -- virtio modern memory (64bit) bar
1824 proxy
->legacy_io_bar_idx
= 0;
1825 proxy
->msix_bar_idx
= 1;
1826 proxy
->modern_io_bar_idx
= 2;
1827 proxy
->modern_mem_bar_idx
= 4;
1829 proxy
->common
.offset
= 0x0;
1830 proxy
->common
.size
= 0x1000;
1831 proxy
->common
.type
= VIRTIO_PCI_CAP_COMMON_CFG
;
1833 proxy
->isr
.offset
= 0x1000;
1834 proxy
->isr
.size
= 0x1000;
1835 proxy
->isr
.type
= VIRTIO_PCI_CAP_ISR_CFG
;
1837 proxy
->device
.offset
= 0x2000;
1838 proxy
->device
.size
= 0x1000;
1839 proxy
->device
.type
= VIRTIO_PCI_CAP_DEVICE_CFG
;
1841 proxy
->notify
.offset
= 0x3000;
1842 proxy
->notify
.size
= virtio_pci_queue_mem_mult(proxy
) * VIRTIO_QUEUE_MAX
;
1843 proxy
->notify
.type
= VIRTIO_PCI_CAP_NOTIFY_CFG
;
1845 proxy
->notify_pio
.offset
= 0x0;
1846 proxy
->notify_pio
.size
= 0x4;
1847 proxy
->notify_pio
.type
= VIRTIO_PCI_CAP_NOTIFY_CFG
;
1849 /* subclasses can enforce modern, so do this unconditionally */
1850 memory_region_init(&proxy
->modern_bar
, OBJECT(proxy
), "virtio-pci",
1851 /* PCI BAR regions must be powers of 2 */
1852 pow2ceil(proxy
->notify
.offset
+ proxy
->notify
.size
));
1854 if (proxy
->disable_legacy
== ON_OFF_AUTO_AUTO
) {
1855 proxy
->disable_legacy
= pcie_port
? ON_OFF_AUTO_ON
: ON_OFF_AUTO_OFF
;
1858 if (!virtio_pci_modern(proxy
) && !virtio_pci_legacy(proxy
)) {
1859 error_setg(errp
, "device cannot work as neither modern nor legacy mode"
1861 error_append_hint(errp
, "Set either disable-modern or disable-legacy"
1866 if (pcie_port
&& pci_is_express(pci_dev
)) {
1868 uint16_t last_pcie_cap_offset
= PCI_CONFIG_SPACE_SIZE
;
1870 pos
= pcie_endpoint_cap_init(pci_dev
, 0);
1873 pos
= pci_add_capability(pci_dev
, PCI_CAP_ID_PM
, 0,
1874 PCI_PM_SIZEOF
, errp
);
1879 pci_dev
->exp
.pm_cap
= pos
;
1882 * Indicates that this function complies with revision 1.2 of the
1883 * PCI Power Management Interface Specification.
1885 pci_set_word(pci_dev
->config
+ pos
+ PCI_PM_PMC
, 0x3);
1887 if (proxy
->flags
& VIRTIO_PCI_FLAG_AER
) {
1888 pcie_aer_init(pci_dev
, PCI_ERR_VER
, last_pcie_cap_offset
,
1889 PCI_ERR_SIZEOF
, NULL
);
1890 last_pcie_cap_offset
+= PCI_ERR_SIZEOF
;
1893 if (proxy
->flags
& VIRTIO_PCI_FLAG_INIT_DEVERR
) {
1894 /* Init error enabling flags */
1895 pcie_cap_deverr_init(pci_dev
);
1898 if (proxy
->flags
& VIRTIO_PCI_FLAG_INIT_LNKCTL
) {
1899 /* Init Link Control Register */
1900 pcie_cap_lnkctl_init(pci_dev
);
1903 if (proxy
->flags
& VIRTIO_PCI_FLAG_INIT_PM
) {
1904 /* Init Power Management Control Register */
1905 pci_set_word(pci_dev
->wmask
+ pos
+ PCI_PM_CTRL
,
1906 PCI_PM_CTRL_STATE_MASK
);
1909 if (proxy
->flags
& VIRTIO_PCI_FLAG_ATS
) {
1910 pcie_ats_init(pci_dev
, last_pcie_cap_offset
,
1911 proxy
->flags
& VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED
);
1912 last_pcie_cap_offset
+= PCI_EXT_CAP_ATS_SIZEOF
;
1915 if (proxy
->flags
& VIRTIO_PCI_FLAG_INIT_FLR
) {
1916 /* Set Function Level Reset capability bit */
1917 pcie_cap_flr_init(pci_dev
);
1921 * make future invocations of pci_is_express() return false
1922 * and pci_config_size() return PCI_CONFIG_SPACE_SIZE.
1924 pci_dev
->cap_present
&= ~QEMU_PCI_CAP_EXPRESS
;
1927 virtio_pci_bus_new(&proxy
->bus
, sizeof(proxy
->bus
), proxy
);
1929 k
->realize(proxy
, errp
);
1933 static void virtio_pci_exit(PCIDevice
*pci_dev
)
1935 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(pci_dev
);
1936 bool pcie_port
= pci_bus_is_express(pci_get_bus(pci_dev
)) &&
1937 !pci_bus_is_root(pci_get_bus(pci_dev
));
1939 msix_uninit_exclusive_bar(pci_dev
);
1940 if (proxy
->flags
& VIRTIO_PCI_FLAG_AER
&& pcie_port
&&
1941 pci_is_express(pci_dev
)) {
1942 pcie_aer_exit(pci_dev
);
1946 static void virtio_pci_reset(DeviceState
*qdev
)
1948 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(qdev
);
1949 VirtioBusState
*bus
= VIRTIO_BUS(&proxy
->bus
);
1952 virtio_bus_reset(bus
);
1953 msix_unuse_all_vectors(&proxy
->pci_dev
);
1955 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
1956 proxy
->vqs
[i
].enabled
= 0;
1957 proxy
->vqs
[i
].num
= 0;
1958 proxy
->vqs
[i
].desc
[0] = proxy
->vqs
[i
].desc
[1] = 0;
1959 proxy
->vqs
[i
].avail
[0] = proxy
->vqs
[i
].avail
[1] = 0;
1960 proxy
->vqs
[i
].used
[0] = proxy
->vqs
[i
].used
[1] = 0;
1964 static void virtio_pci_bus_reset(DeviceState
*qdev
)
1966 PCIDevice
*dev
= PCI_DEVICE(qdev
);
1968 virtio_pci_reset(qdev
);
1970 if (pci_is_express(dev
)) {
1971 pcie_cap_deverr_reset(dev
);
1972 pcie_cap_lnkctl_reset(dev
);
1974 pci_set_word(dev
->config
+ dev
->exp
.pm_cap
+ PCI_PM_CTRL
, 0);
1978 static Property virtio_pci_properties
[] = {
1979 DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy
, flags
,
1980 VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT
, false),
1981 DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy
, flags
,
1982 VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT
, true),
1983 DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy
, flags
,
1984 VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT
, false),
1985 DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy
, flags
,
1986 VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT
, false),
1987 DEFINE_PROP_BIT("page-per-vq", VirtIOPCIProxy
, flags
,
1988 VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT
, false),
1989 DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy
,
1990 ignore_backend_features
, false),
1991 DEFINE_PROP_BIT("ats", VirtIOPCIProxy
, flags
,
1992 VIRTIO_PCI_FLAG_ATS_BIT
, false),
1993 DEFINE_PROP_BIT("x-ats-page-aligned", VirtIOPCIProxy
, flags
,
1994 VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED_BIT
, true),
1995 DEFINE_PROP_BIT("x-pcie-deverr-init", VirtIOPCIProxy
, flags
,
1996 VIRTIO_PCI_FLAG_INIT_DEVERR_BIT
, true),
1997 DEFINE_PROP_BIT("x-pcie-lnkctl-init", VirtIOPCIProxy
, flags
,
1998 VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT
, true),
1999 DEFINE_PROP_BIT("x-pcie-pm-init", VirtIOPCIProxy
, flags
,
2000 VIRTIO_PCI_FLAG_INIT_PM_BIT
, true),
2001 DEFINE_PROP_BIT("x-pcie-flr-init", VirtIOPCIProxy
, flags
,
2002 VIRTIO_PCI_FLAG_INIT_FLR_BIT
, true),
2003 DEFINE_PROP_BIT("aer", VirtIOPCIProxy
, flags
,
2004 VIRTIO_PCI_FLAG_AER_BIT
, false),
2005 DEFINE_PROP_END_OF_LIST(),
2008 static void virtio_pci_dc_realize(DeviceState
*qdev
, Error
**errp
)
2010 VirtioPCIClass
*vpciklass
= VIRTIO_PCI_GET_CLASS(qdev
);
2011 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(qdev
);
2012 PCIDevice
*pci_dev
= &proxy
->pci_dev
;
2014 if (!(proxy
->flags
& VIRTIO_PCI_FLAG_DISABLE_PCIE
) &&
2015 virtio_pci_modern(proxy
)) {
2016 pci_dev
->cap_present
|= QEMU_PCI_CAP_EXPRESS
;
2019 vpciklass
->parent_dc_realize(qdev
, errp
);
2022 static void virtio_pci_class_init(ObjectClass
*klass
, void *data
)
2024 DeviceClass
*dc
= DEVICE_CLASS(klass
);
2025 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
2026 VirtioPCIClass
*vpciklass
= VIRTIO_PCI_CLASS(klass
);
2028 device_class_set_props(dc
, virtio_pci_properties
);
2029 k
->realize
= virtio_pci_realize
;
2030 k
->exit
= virtio_pci_exit
;
2031 k
->vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
;
2032 k
->revision
= VIRTIO_PCI_ABI_VERSION
;
2033 k
->class_id
= PCI_CLASS_OTHERS
;
2034 device_class_set_parent_realize(dc
, virtio_pci_dc_realize
,
2035 &vpciklass
->parent_dc_realize
);
2036 dc
->reset
= virtio_pci_bus_reset
;
2039 static const TypeInfo virtio_pci_info
= {
2040 .name
= TYPE_VIRTIO_PCI
,
2041 .parent
= TYPE_PCI_DEVICE
,
2042 .instance_size
= sizeof(VirtIOPCIProxy
),
2043 .class_init
= virtio_pci_class_init
,
2044 .class_size
= sizeof(VirtioPCIClass
),
2048 static Property virtio_pci_generic_properties
[] = {
2049 DEFINE_PROP_ON_OFF_AUTO("disable-legacy", VirtIOPCIProxy
, disable_legacy
,
2051 DEFINE_PROP_BOOL("disable-modern", VirtIOPCIProxy
, disable_modern
, false),
2052 DEFINE_PROP_END_OF_LIST(),
2055 static void virtio_pci_base_class_init(ObjectClass
*klass
, void *data
)
2057 const VirtioPCIDeviceTypeInfo
*t
= data
;
2058 if (t
->class_init
) {
2059 t
->class_init(klass
, NULL
);
2063 static void virtio_pci_generic_class_init(ObjectClass
*klass
, void *data
)
2065 DeviceClass
*dc
= DEVICE_CLASS(klass
);
2067 device_class_set_props(dc
, virtio_pci_generic_properties
);
2070 static void virtio_pci_transitional_instance_init(Object
*obj
)
2072 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(obj
);
2074 proxy
->disable_legacy
= ON_OFF_AUTO_OFF
;
2075 proxy
->disable_modern
= false;
2078 static void virtio_pci_non_transitional_instance_init(Object
*obj
)
2080 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(obj
);
2082 proxy
->disable_legacy
= ON_OFF_AUTO_ON
;
2083 proxy
->disable_modern
= false;
2086 void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo
*t
)
2088 char *base_name
= NULL
;
2089 TypeInfo base_type_info
= {
2090 .name
= t
->base_name
,
2091 .parent
= t
->parent
? t
->parent
: TYPE_VIRTIO_PCI
,
2092 .instance_size
= t
->instance_size
,
2093 .instance_init
= t
->instance_init
,
2094 .class_size
= t
->class_size
,
2096 .interfaces
= t
->interfaces
,
2098 TypeInfo generic_type_info
= {
2099 .name
= t
->generic_name
,
2100 .parent
= base_type_info
.name
,
2101 .class_init
= virtio_pci_generic_class_init
,
2102 .interfaces
= (InterfaceInfo
[]) {
2103 { INTERFACE_PCIE_DEVICE
},
2104 { INTERFACE_CONVENTIONAL_PCI_DEVICE
},
2109 if (!base_type_info
.name
) {
2110 /* No base type -> register a single generic device type */
2111 /* use intermediate %s-base-type to add generic device props */
2112 base_name
= g_strdup_printf("%s-base-type", t
->generic_name
);
2113 base_type_info
.name
= base_name
;
2114 base_type_info
.class_init
= virtio_pci_generic_class_init
;
2116 generic_type_info
.parent
= base_name
;
2117 generic_type_info
.class_init
= virtio_pci_base_class_init
;
2118 generic_type_info
.class_data
= (void *)t
;
2120 assert(!t
->non_transitional_name
);
2121 assert(!t
->transitional_name
);
2123 base_type_info
.class_init
= virtio_pci_base_class_init
;
2124 base_type_info
.class_data
= (void *)t
;
2127 type_register(&base_type_info
);
2128 if (generic_type_info
.name
) {
2129 type_register(&generic_type_info
);
2132 if (t
->non_transitional_name
) {
2133 const TypeInfo non_transitional_type_info
= {
2134 .name
= t
->non_transitional_name
,
2135 .parent
= base_type_info
.name
,
2136 .instance_init
= virtio_pci_non_transitional_instance_init
,
2137 .interfaces
= (InterfaceInfo
[]) {
2138 { INTERFACE_PCIE_DEVICE
},
2139 { INTERFACE_CONVENTIONAL_PCI_DEVICE
},
2143 type_register(&non_transitional_type_info
);
2146 if (t
->transitional_name
) {
2147 const TypeInfo transitional_type_info
= {
2148 .name
= t
->transitional_name
,
2149 .parent
= base_type_info
.name
,
2150 .instance_init
= virtio_pci_transitional_instance_init
,
2151 .interfaces
= (InterfaceInfo
[]) {
2153 * Transitional virtio devices work only as Conventional PCI
2154 * devices because they require PIO ports.
2156 { INTERFACE_CONVENTIONAL_PCI_DEVICE
},
2160 type_register(&transitional_type_info
);
2165 unsigned virtio_pci_optimal_num_queues(unsigned fixed_queues
)
2168 * 1:1 vq to vCPU mapping is ideal because the same vCPU that submitted
2169 * virtqueue buffers can handle their completion. When a different vCPU
2170 * handles completion it may need to IPI the vCPU that submitted the
2171 * request and this adds overhead.
2173 * Virtqueues consume guest RAM and MSI-X vectors. This is wasteful in
2174 * guests with very many vCPUs and a device that is only used by a few
2175 * vCPUs. Unfortunately optimizing that case requires manual pinning inside
2176 * the guest, so those users might as well manually set the number of
2177 * queues. There is no upper limit that can be applied automatically and
2178 * doing so arbitrarily would result in a sudden performance drop once the
2179 * threshold number of vCPUs is exceeded.
2181 unsigned num_queues
= current_machine
->smp
.cpus
;
2184 * The maximum number of MSI-X vectors is PCI_MSIX_FLAGS_QSIZE + 1, but the
2185 * config change interrupt and the fixed virtqueues must be taken into
2188 num_queues
= MIN(num_queues
, PCI_MSIX_FLAGS_QSIZE
- fixed_queues
);
2191 * There is a limit to how many virtqueues a device can have.
2193 return MIN(num_queues
, VIRTIO_QUEUE_MAX
- fixed_queues
);
2196 /* virtio-pci-bus */
2198 static void virtio_pci_bus_new(VirtioBusState
*bus
, size_t bus_size
,
2199 VirtIOPCIProxy
*dev
)
2201 DeviceState
*qdev
= DEVICE(dev
);
2202 char virtio_bus_name
[] = "virtio-bus";
2204 qbus_init(bus
, bus_size
, TYPE_VIRTIO_PCI_BUS
, qdev
, virtio_bus_name
);
2207 static void virtio_pci_bus_class_init(ObjectClass
*klass
, void *data
)
2209 BusClass
*bus_class
= BUS_CLASS(klass
);
2210 VirtioBusClass
*k
= VIRTIO_BUS_CLASS(klass
);
2211 bus_class
->max_dev
= 1;
2212 k
->notify
= virtio_pci_notify
;
2213 k
->save_config
= virtio_pci_save_config
;
2214 k
->load_config
= virtio_pci_load_config
;
2215 k
->save_queue
= virtio_pci_save_queue
;
2216 k
->load_queue
= virtio_pci_load_queue
;
2217 k
->save_extra_state
= virtio_pci_save_extra_state
;
2218 k
->load_extra_state
= virtio_pci_load_extra_state
;
2219 k
->has_extra_state
= virtio_pci_has_extra_state
;
2220 k
->query_guest_notifiers
= virtio_pci_query_guest_notifiers
;
2221 k
->set_guest_notifiers
= virtio_pci_set_guest_notifiers
;
2222 k
->set_host_notifier_mr
= virtio_pci_set_host_notifier_mr
;
2223 k
->vmstate_change
= virtio_pci_vmstate_change
;
2224 k
->pre_plugged
= virtio_pci_pre_plugged
;
2225 k
->device_plugged
= virtio_pci_device_plugged
;
2226 k
->device_unplugged
= virtio_pci_device_unplugged
;
2227 k
->query_nvectors
= virtio_pci_query_nvectors
;
2228 k
->ioeventfd_enabled
= virtio_pci_ioeventfd_enabled
;
2229 k
->ioeventfd_assign
= virtio_pci_ioeventfd_assign
;
2230 k
->get_dma_as
= virtio_pci_get_dma_as
;
2231 k
->iommu_enabled
= virtio_pci_iommu_enabled
;
2232 k
->queue_enabled
= virtio_pci_queue_enabled
;
2235 static const TypeInfo virtio_pci_bus_info
= {
2236 .name
= TYPE_VIRTIO_PCI_BUS
,
2237 .parent
= TYPE_VIRTIO_BUS
,
2238 .instance_size
= sizeof(VirtioPCIBusState
),
2239 .class_size
= sizeof(VirtioPCIBusClass
),
2240 .class_init
= virtio_pci_bus_class_init
,
2243 static void virtio_pci_register_types(void)
2246 type_register_static(&virtio_pci_bus_info
);
2247 type_register_static(&virtio_pci_info
);
2250 type_init(virtio_pci_register_types
)