4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2009 CodeSourcery
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paul Brook <paul@codesourcery.com>
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
18 #include "qemu/osdep.h"
20 #include "exec/memop.h"
21 #include "standard-headers/linux/virtio_pci.h"
22 #include "standard-headers/linux/virtio_ids.h"
23 #include "hw/boards.h"
24 #include "hw/virtio/virtio.h"
25 #include "migration/qemu-file-types.h"
26 #include "hw/pci/pci.h"
27 #include "hw/pci/pci_bus.h"
28 #include "hw/qdev-properties.h"
29 #include "qapi/error.h"
30 #include "qemu/error-report.h"
32 #include "qemu/module.h"
33 #include "hw/pci/msi.h"
34 #include "hw/pci/msix.h"
35 #include "hw/loader.h"
36 #include "sysemu/kvm.h"
37 #include "hw/virtio/virtio-pci.h"
38 #include "qemu/range.h"
39 #include "hw/virtio/virtio-bus.h"
40 #include "qapi/visitor.h"
41 #include "sysemu/replay.h"
44 #define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev))
46 #undef VIRTIO_PCI_CONFIG
48 /* The remaining space is defined by each driver as the per-driver
49 * configuration space */
50 #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
52 static void virtio_pci_bus_new(VirtioBusState
*bus
, size_t bus_size
,
54 static void virtio_pci_reset(DeviceState
*qdev
);
57 /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
58 static inline VirtIOPCIProxy
*to_virtio_pci_proxy(DeviceState
*d
)
60 return container_of(d
, VirtIOPCIProxy
, pci_dev
.qdev
);
63 /* DeviceState to VirtIOPCIProxy. Note: used on datapath,
64 * be careful and test performance if you change this.
66 static inline VirtIOPCIProxy
*to_virtio_pci_proxy_fast(DeviceState
*d
)
68 return container_of(d
, VirtIOPCIProxy
, pci_dev
.qdev
);
71 static void virtio_pci_notify(DeviceState
*d
, uint16_t vector
)
73 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy_fast(d
);
75 if (msix_enabled(&proxy
->pci_dev
)) {
76 if (vector
!= VIRTIO_NO_VECTOR
) {
77 msix_notify(&proxy
->pci_dev
, vector
);
80 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
81 pci_set_irq(&proxy
->pci_dev
, qatomic_read(&vdev
->isr
) & 1);
85 static void virtio_pci_save_config(DeviceState
*d
, QEMUFile
*f
)
87 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
88 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
90 pci_device_save(&proxy
->pci_dev
, f
);
91 msix_save(&proxy
->pci_dev
, f
);
92 if (msix_present(&proxy
->pci_dev
))
93 qemu_put_be16(f
, vdev
->config_vector
);
96 static const VMStateDescription vmstate_virtio_pci_modern_queue_state
= {
97 .name
= "virtio_pci/modern_queue_state",
99 .minimum_version_id
= 1,
100 .fields
= (VMStateField
[]) {
101 VMSTATE_UINT16(num
, VirtIOPCIQueue
),
102 VMSTATE_UNUSED(1), /* enabled was stored as be16 */
103 VMSTATE_BOOL(enabled
, VirtIOPCIQueue
),
104 VMSTATE_UINT32_ARRAY(desc
, VirtIOPCIQueue
, 2),
105 VMSTATE_UINT32_ARRAY(avail
, VirtIOPCIQueue
, 2),
106 VMSTATE_UINT32_ARRAY(used
, VirtIOPCIQueue
, 2),
107 VMSTATE_END_OF_LIST()
111 static bool virtio_pci_modern_state_needed(void *opaque
)
113 VirtIOPCIProxy
*proxy
= opaque
;
115 return virtio_pci_modern(proxy
);
118 static const VMStateDescription vmstate_virtio_pci_modern_state_sub
= {
119 .name
= "virtio_pci/modern_state",
121 .minimum_version_id
= 1,
122 .needed
= &virtio_pci_modern_state_needed
,
123 .fields
= (VMStateField
[]) {
124 VMSTATE_UINT32(dfselect
, VirtIOPCIProxy
),
125 VMSTATE_UINT32(gfselect
, VirtIOPCIProxy
),
126 VMSTATE_UINT32_ARRAY(guest_features
, VirtIOPCIProxy
, 2),
127 VMSTATE_STRUCT_ARRAY(vqs
, VirtIOPCIProxy
, VIRTIO_QUEUE_MAX
, 0,
128 vmstate_virtio_pci_modern_queue_state
,
130 VMSTATE_END_OF_LIST()
134 static const VMStateDescription vmstate_virtio_pci
= {
135 .name
= "virtio_pci",
137 .minimum_version_id
= 1,
138 .fields
= (VMStateField
[]) {
139 VMSTATE_END_OF_LIST()
141 .subsections
= (const VMStateDescription
*[]) {
142 &vmstate_virtio_pci_modern_state_sub
,
147 static bool virtio_pci_has_extra_state(DeviceState
*d
)
149 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
151 return proxy
->flags
& VIRTIO_PCI_FLAG_MIGRATE_EXTRA
;
154 static void virtio_pci_save_extra_state(DeviceState
*d
, QEMUFile
*f
)
156 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
158 vmstate_save_state(f
, &vmstate_virtio_pci
, proxy
, NULL
);
161 static int virtio_pci_load_extra_state(DeviceState
*d
, QEMUFile
*f
)
163 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
165 return vmstate_load_state(f
, &vmstate_virtio_pci
, proxy
, 1);
168 static void virtio_pci_save_queue(DeviceState
*d
, int n
, QEMUFile
*f
)
170 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
171 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
173 if (msix_present(&proxy
->pci_dev
))
174 qemu_put_be16(f
, virtio_queue_vector(vdev
, n
));
177 static int virtio_pci_load_config(DeviceState
*d
, QEMUFile
*f
)
179 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
180 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
184 ret
= pci_device_load(&proxy
->pci_dev
, f
);
188 msix_unuse_all_vectors(&proxy
->pci_dev
);
189 msix_load(&proxy
->pci_dev
, f
);
190 if (msix_present(&proxy
->pci_dev
)) {
191 qemu_get_be16s(f
, &vector
);
193 if (vector
!= VIRTIO_NO_VECTOR
&& vector
>= proxy
->nvectors
) {
197 vector
= VIRTIO_NO_VECTOR
;
199 vdev
->config_vector
= vector
;
200 if (vector
!= VIRTIO_NO_VECTOR
) {
201 msix_vector_use(&proxy
->pci_dev
, vector
);
206 static int virtio_pci_load_queue(DeviceState
*d
, int n
, QEMUFile
*f
)
208 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
209 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
212 if (msix_present(&proxy
->pci_dev
)) {
213 qemu_get_be16s(f
, &vector
);
214 if (vector
!= VIRTIO_NO_VECTOR
&& vector
>= proxy
->nvectors
) {
218 vector
= VIRTIO_NO_VECTOR
;
220 virtio_queue_set_vector(vdev
, n
, vector
);
221 if (vector
!= VIRTIO_NO_VECTOR
) {
222 msix_vector_use(&proxy
->pci_dev
, vector
);
228 typedef struct VirtIOPCIIDInfo
{
231 /* pci device id for the transitional device */
232 uint16_t trans_devid
;
236 static const VirtIOPCIIDInfo virtio_pci_id_info
[] = {
238 .vdev_id
= VIRTIO_ID_CRYPTO
,
239 .class_id
= PCI_CLASS_OTHERS
,
241 .vdev_id
= VIRTIO_ID_FS
,
242 .class_id
= PCI_CLASS_STORAGE_OTHER
,
244 .vdev_id
= VIRTIO_ID_NET
,
245 .trans_devid
= PCI_DEVICE_ID_VIRTIO_NET
,
246 .class_id
= PCI_CLASS_NETWORK_ETHERNET
,
248 .vdev_id
= VIRTIO_ID_BLOCK
,
249 .trans_devid
= PCI_DEVICE_ID_VIRTIO_BLOCK
,
250 .class_id
= PCI_CLASS_STORAGE_SCSI
,
252 .vdev_id
= VIRTIO_ID_CONSOLE
,
253 .trans_devid
= PCI_DEVICE_ID_VIRTIO_CONSOLE
,
254 .class_id
= PCI_CLASS_COMMUNICATION_OTHER
,
256 .vdev_id
= VIRTIO_ID_SCSI
,
257 .trans_devid
= PCI_DEVICE_ID_VIRTIO_SCSI
,
258 .class_id
= PCI_CLASS_STORAGE_SCSI
260 .vdev_id
= VIRTIO_ID_9P
,
261 .trans_devid
= PCI_DEVICE_ID_VIRTIO_9P
,
262 .class_id
= PCI_BASE_CLASS_NETWORK
,
264 .vdev_id
= VIRTIO_ID_BALLOON
,
265 .trans_devid
= PCI_DEVICE_ID_VIRTIO_BALLOON
,
266 .class_id
= PCI_CLASS_OTHERS
,
268 .vdev_id
= VIRTIO_ID_RNG
,
269 .trans_devid
= PCI_DEVICE_ID_VIRTIO_RNG
,
270 .class_id
= PCI_CLASS_OTHERS
,
274 static const VirtIOPCIIDInfo
*virtio_pci_get_id_info(uint16_t vdev_id
)
276 const VirtIOPCIIDInfo
*info
= NULL
;
279 for (i
= 0; i
< ARRAY_SIZE(virtio_pci_id_info
); i
++) {
280 if (virtio_pci_id_info
[i
].vdev_id
== vdev_id
) {
281 info
= &virtio_pci_id_info
[i
];
287 /* The device id is invalid or not added to the id_info yet. */
288 error_report("Invalid virtio device(id %u)", vdev_id
);
296 * Get the Transitional Device ID for the specific device, return
297 * zero if the device is non-transitional.
299 uint16_t virtio_pci_get_trans_devid(uint16_t device_id
)
301 return virtio_pci_get_id_info(device_id
)->trans_devid
;
305 * Get the Class ID for the specific device.
307 uint16_t virtio_pci_get_class_id(uint16_t device_id
)
309 return virtio_pci_get_id_info(device_id
)->class_id
;
312 static bool virtio_pci_ioeventfd_enabled(DeviceState
*d
)
314 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
316 return (proxy
->flags
& VIRTIO_PCI_FLAG_USE_IOEVENTFD
) != 0;
319 #define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000
321 static inline int virtio_pci_queue_mem_mult(struct VirtIOPCIProxy
*proxy
)
323 return (proxy
->flags
& VIRTIO_PCI_FLAG_PAGE_PER_VQ
) ?
324 QEMU_VIRTIO_PCI_QUEUE_MEM_MULT
: 4;
327 static int virtio_pci_ioeventfd_assign(DeviceState
*d
, EventNotifier
*notifier
,
330 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
331 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
332 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
333 bool legacy
= virtio_pci_legacy(proxy
);
334 bool modern
= virtio_pci_modern(proxy
);
335 bool fast_mmio
= kvm_ioeventfd_any_length_enabled();
336 bool modern_pio
= proxy
->flags
& VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY
;
337 MemoryRegion
*modern_mr
= &proxy
->notify
.mr
;
338 MemoryRegion
*modern_notify_mr
= &proxy
->notify_pio
.mr
;
339 MemoryRegion
*legacy_mr
= &proxy
->bar
;
340 hwaddr modern_addr
= virtio_pci_queue_mem_mult(proxy
) *
341 virtio_get_queue_index(vq
);
342 hwaddr legacy_addr
= VIRTIO_PCI_QUEUE_NOTIFY
;
347 memory_region_add_eventfd(modern_mr
, modern_addr
, 0,
350 memory_region_add_eventfd(modern_mr
, modern_addr
, 2,
354 memory_region_add_eventfd(modern_notify_mr
, 0, 2,
359 memory_region_add_eventfd(legacy_mr
, legacy_addr
, 2,
365 memory_region_del_eventfd(modern_mr
, modern_addr
, 0,
368 memory_region_del_eventfd(modern_mr
, modern_addr
, 2,
372 memory_region_del_eventfd(modern_notify_mr
, 0, 2,
377 memory_region_del_eventfd(legacy_mr
, legacy_addr
, 2,
384 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy
*proxy
)
386 virtio_bus_start_ioeventfd(&proxy
->bus
);
389 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy
*proxy
)
391 virtio_bus_stop_ioeventfd(&proxy
->bus
);
394 static void virtio_ioport_write(void *opaque
, uint32_t addr
, uint32_t val
)
396 VirtIOPCIProxy
*proxy
= opaque
;
397 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
402 case VIRTIO_PCI_GUEST_FEATURES
:
403 /* Guest does not negotiate properly? We have to assume nothing. */
404 if (val
& (1 << VIRTIO_F_BAD_FEATURE
)) {
405 val
= virtio_bus_get_vdev_bad_features(&proxy
->bus
);
407 virtio_set_features(vdev
, val
);
409 case VIRTIO_PCI_QUEUE_PFN
:
410 pa
= (hwaddr
)val
<< VIRTIO_PCI_QUEUE_ADDR_SHIFT
;
412 virtio_pci_reset(DEVICE(proxy
));
415 virtio_queue_set_addr(vdev
, vdev
->queue_sel
, pa
);
417 case VIRTIO_PCI_QUEUE_SEL
:
418 if (val
< VIRTIO_QUEUE_MAX
)
419 vdev
->queue_sel
= val
;
421 case VIRTIO_PCI_QUEUE_NOTIFY
:
422 if (val
< VIRTIO_QUEUE_MAX
) {
423 virtio_queue_notify(vdev
, val
);
426 case VIRTIO_PCI_STATUS
:
427 if (!(val
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
428 virtio_pci_stop_ioeventfd(proxy
);
431 virtio_set_status(vdev
, val
& 0xFF);
433 if (val
& VIRTIO_CONFIG_S_DRIVER_OK
) {
434 virtio_pci_start_ioeventfd(proxy
);
437 if (vdev
->status
== 0) {
438 virtio_pci_reset(DEVICE(proxy
));
441 /* Linux before 2.6.34 drives the device without enabling
442 the PCI device bus master bit. Enable it automatically
443 for the guest. This is a PCI spec violation but so is
444 initiating DMA with bus master bit clear. */
445 if (val
== (VIRTIO_CONFIG_S_ACKNOWLEDGE
| VIRTIO_CONFIG_S_DRIVER
)) {
446 pci_default_write_config(&proxy
->pci_dev
, PCI_COMMAND
,
447 proxy
->pci_dev
.config
[PCI_COMMAND
] |
448 PCI_COMMAND_MASTER
, 1);
451 case VIRTIO_MSI_CONFIG_VECTOR
:
452 if (vdev
->config_vector
!= VIRTIO_NO_VECTOR
) {
453 msix_vector_unuse(&proxy
->pci_dev
, vdev
->config_vector
);
455 /* Make it possible for guest to discover an error took place. */
456 if (val
< proxy
->nvectors
) {
457 msix_vector_use(&proxy
->pci_dev
, val
);
459 val
= VIRTIO_NO_VECTOR
;
461 vdev
->config_vector
= val
;
463 case VIRTIO_MSI_QUEUE_VECTOR
:
464 vector
= virtio_queue_vector(vdev
, vdev
->queue_sel
);
465 if (vector
!= VIRTIO_NO_VECTOR
) {
466 msix_vector_unuse(&proxy
->pci_dev
, vector
);
468 /* Make it possible for guest to discover an error took place. */
469 if (val
< proxy
->nvectors
) {
470 msix_vector_use(&proxy
->pci_dev
, val
);
472 val
= VIRTIO_NO_VECTOR
;
474 virtio_queue_set_vector(vdev
, vdev
->queue_sel
, val
);
477 qemu_log_mask(LOG_GUEST_ERROR
,
478 "%s: unexpected address 0x%x value 0x%x\n",
479 __func__
, addr
, val
);
484 static uint32_t virtio_ioport_read(VirtIOPCIProxy
*proxy
, uint32_t addr
)
486 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
487 uint32_t ret
= 0xFFFFFFFF;
490 case VIRTIO_PCI_HOST_FEATURES
:
491 ret
= vdev
->host_features
;
493 case VIRTIO_PCI_GUEST_FEATURES
:
494 ret
= vdev
->guest_features
;
496 case VIRTIO_PCI_QUEUE_PFN
:
497 ret
= virtio_queue_get_addr(vdev
, vdev
->queue_sel
)
498 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT
;
500 case VIRTIO_PCI_QUEUE_NUM
:
501 ret
= virtio_queue_get_num(vdev
, vdev
->queue_sel
);
503 case VIRTIO_PCI_QUEUE_SEL
:
504 ret
= vdev
->queue_sel
;
506 case VIRTIO_PCI_STATUS
:
510 /* reading from the ISR also clears it. */
511 ret
= qatomic_xchg(&vdev
->isr
, 0);
512 pci_irq_deassert(&proxy
->pci_dev
);
514 case VIRTIO_MSI_CONFIG_VECTOR
:
515 ret
= vdev
->config_vector
;
517 case VIRTIO_MSI_QUEUE_VECTOR
:
518 ret
= virtio_queue_vector(vdev
, vdev
->queue_sel
);
527 static uint64_t virtio_pci_config_read(void *opaque
, hwaddr addr
,
530 VirtIOPCIProxy
*proxy
= opaque
;
531 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
532 uint32_t config
= VIRTIO_PCI_CONFIG_SIZE(&proxy
->pci_dev
);
540 return virtio_ioport_read(proxy
, addr
);
546 val
= virtio_config_readb(vdev
, addr
);
549 val
= virtio_config_readw(vdev
, addr
);
550 if (virtio_is_big_endian(vdev
)) {
555 val
= virtio_config_readl(vdev
, addr
);
556 if (virtio_is_big_endian(vdev
)) {
564 static void virtio_pci_config_write(void *opaque
, hwaddr addr
,
565 uint64_t val
, unsigned size
)
567 VirtIOPCIProxy
*proxy
= opaque
;
568 uint32_t config
= VIRTIO_PCI_CONFIG_SIZE(&proxy
->pci_dev
);
569 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
576 virtio_ioport_write(proxy
, addr
, val
);
581 * Virtio-PCI is odd. Ioports are LE but config space is target native
586 virtio_config_writeb(vdev
, addr
, val
);
589 if (virtio_is_big_endian(vdev
)) {
592 virtio_config_writew(vdev
, addr
, val
);
595 if (virtio_is_big_endian(vdev
)) {
598 virtio_config_writel(vdev
, addr
, val
);
603 static const MemoryRegionOps virtio_pci_config_ops
= {
604 .read
= virtio_pci_config_read
,
605 .write
= virtio_pci_config_write
,
607 .min_access_size
= 1,
608 .max_access_size
= 4,
610 .endianness
= DEVICE_LITTLE_ENDIAN
,
613 static MemoryRegion
*virtio_address_space_lookup(VirtIOPCIProxy
*proxy
,
614 hwaddr
*off
, int len
)
617 VirtIOPCIRegion
*reg
;
619 for (i
= 0; i
< ARRAY_SIZE(proxy
->regs
); ++i
) {
620 reg
= &proxy
->regs
[i
];
621 if (*off
>= reg
->offset
&&
622 *off
+ len
<= reg
->offset
+ reg
->size
) {
631 /* Below are generic functions to do memcpy from/to an address space,
632 * without byteswaps, with input validation.
634 * As regular address_space_* APIs all do some kind of byteswap at least for
635 * some host/target combinations, we are forced to explicitly convert to a
636 * known-endianness integer value.
637 * It doesn't really matter which endian format to go through, so the code
638 * below selects the endian that causes the least amount of work on the given
641 * Note: host pointer must be aligned.
644 void virtio_address_space_write(VirtIOPCIProxy
*proxy
, hwaddr addr
,
645 const uint8_t *buf
, int len
)
650 /* address_space_* APIs assume an aligned address.
651 * As address is under guest control, handle illegal values.
655 mr
= virtio_address_space_lookup(proxy
, &addr
, len
);
660 /* Make sure caller aligned buf properly */
661 assert(!(((uintptr_t)buf
) & (len
- 1)));
665 val
= pci_get_byte(buf
);
668 val
= pci_get_word(buf
);
671 val
= pci_get_long(buf
);
674 /* As length is under guest control, handle illegal values. */
677 memory_region_dispatch_write(mr
, addr
, val
, size_memop(len
) | MO_LE
,
678 MEMTXATTRS_UNSPECIFIED
);
682 virtio_address_space_read(VirtIOPCIProxy
*proxy
, hwaddr addr
,
683 uint8_t *buf
, int len
)
688 /* address_space_* APIs assume an aligned address.
689 * As address is under guest control, handle illegal values.
693 mr
= virtio_address_space_lookup(proxy
, &addr
, len
);
698 /* Make sure caller aligned buf properly */
699 assert(!(((uintptr_t)buf
) & (len
- 1)));
701 memory_region_dispatch_read(mr
, addr
, &val
, size_memop(len
) | MO_LE
,
702 MEMTXATTRS_UNSPECIFIED
);
705 pci_set_byte(buf
, val
);
708 pci_set_word(buf
, val
);
711 pci_set_long(buf
, val
);
714 /* As length is under guest control, handle illegal values. */
719 static void virtio_pci_ats_ctrl_trigger(PCIDevice
*pci_dev
, bool enable
)
721 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(pci_dev
);
722 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
723 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
725 vdev
->device_iotlb_enabled
= enable
;
727 if (k
->toggle_device_iotlb
) {
728 k
->toggle_device_iotlb(vdev
);
732 static void pcie_ats_config_write(PCIDevice
*dev
, uint32_t address
,
733 uint32_t val
, int len
)
736 uint16_t ats_cap
= dev
->exp
.ats_cap
;
738 if (!ats_cap
|| address
< ats_cap
) {
741 off
= address
- ats_cap
;
742 if (off
>= PCI_EXT_CAP_ATS_SIZEOF
) {
746 if (range_covers_byte(off
, len
, PCI_ATS_CTRL
+ 1)) {
747 virtio_pci_ats_ctrl_trigger(dev
, !!(val
& PCI_ATS_CTRL_ENABLE
));
751 static void virtio_write_config(PCIDevice
*pci_dev
, uint32_t address
,
752 uint32_t val
, int len
)
754 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(pci_dev
);
755 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
756 struct virtio_pci_cfg_cap
*cfg
;
758 pci_default_write_config(pci_dev
, address
, val
, len
);
760 if (proxy
->flags
& VIRTIO_PCI_FLAG_INIT_FLR
) {
761 pcie_cap_flr_write_config(pci_dev
, address
, val
, len
);
764 if (proxy
->flags
& VIRTIO_PCI_FLAG_ATS
) {
765 pcie_ats_config_write(pci_dev
, address
, val
, len
);
768 if (range_covers_byte(address
, len
, PCI_COMMAND
)) {
769 if (!(pci_dev
->config
[PCI_COMMAND
] & PCI_COMMAND_MASTER
)) {
770 virtio_set_disabled(vdev
, true);
771 virtio_pci_stop_ioeventfd(proxy
);
772 virtio_set_status(vdev
, vdev
->status
& ~VIRTIO_CONFIG_S_DRIVER_OK
);
774 virtio_set_disabled(vdev
, false);
778 if (proxy
->config_cap
&&
779 ranges_overlap(address
, len
, proxy
->config_cap
+ offsetof(struct virtio_pci_cfg_cap
,
781 sizeof cfg
->pci_cfg_data
)) {
785 cfg
= (void *)(proxy
->pci_dev
.config
+ proxy
->config_cap
);
786 off
= le32_to_cpu(cfg
->cap
.offset
);
787 len
= le32_to_cpu(cfg
->cap
.length
);
789 if (len
== 1 || len
== 2 || len
== 4) {
790 assert(len
<= sizeof cfg
->pci_cfg_data
);
791 virtio_address_space_write(proxy
, off
, cfg
->pci_cfg_data
, len
);
796 static uint32_t virtio_read_config(PCIDevice
*pci_dev
,
797 uint32_t address
, int len
)
799 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(pci_dev
);
800 struct virtio_pci_cfg_cap
*cfg
;
802 if (proxy
->config_cap
&&
803 ranges_overlap(address
, len
, proxy
->config_cap
+ offsetof(struct virtio_pci_cfg_cap
,
805 sizeof cfg
->pci_cfg_data
)) {
809 cfg
= (void *)(proxy
->pci_dev
.config
+ proxy
->config_cap
);
810 off
= le32_to_cpu(cfg
->cap
.offset
);
811 len
= le32_to_cpu(cfg
->cap
.length
);
813 if (len
== 1 || len
== 2 || len
== 4) {
814 assert(len
<= sizeof cfg
->pci_cfg_data
);
815 virtio_address_space_read(proxy
, off
, cfg
->pci_cfg_data
, len
);
819 return pci_default_read_config(pci_dev
, address
, len
);
822 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy
*proxy
,
825 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
828 if (irqfd
->users
== 0) {
829 KVMRouteChange c
= kvm_irqchip_begin_route_changes(kvm_state
);
830 ret
= kvm_irqchip_add_msi_route(&c
, vector
, &proxy
->pci_dev
);
834 kvm_irqchip_commit_route_changes(&c
);
841 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy
*proxy
,
844 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
845 if (--irqfd
->users
== 0) {
846 kvm_irqchip_release_virq(kvm_state
, irqfd
->virq
);
850 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy
*proxy
,
854 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
855 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state
, n
, NULL
, irqfd
->virq
);
858 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy
*proxy
,
862 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
865 ret
= kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state
, n
, irqfd
->virq
);
868 static int virtio_pci_get_notifier(VirtIOPCIProxy
*proxy
, int queue_no
,
869 EventNotifier
**n
, unsigned int *vector
)
871 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
874 if (queue_no
== VIRTIO_CONFIG_IRQ_IDX
) {
875 *n
= virtio_config_get_guest_notifier(vdev
);
876 *vector
= vdev
->config_vector
;
878 if (!virtio_queue_get_num(vdev
, queue_no
)) {
881 *vector
= virtio_queue_vector(vdev
, queue_no
);
882 vq
= virtio_get_queue(vdev
, queue_no
);
883 *n
= virtio_queue_get_guest_notifier(vq
);
888 static int kvm_virtio_pci_vector_use_one(VirtIOPCIProxy
*proxy
, int queue_no
)
893 PCIDevice
*dev
= &proxy
->pci_dev
;
894 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
895 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
897 ret
= virtio_pci_get_notifier(proxy
, queue_no
, &n
, &vector
);
901 if (vector
>= msix_nr_vectors_allocated(dev
)) {
904 ret
= kvm_virtio_pci_vq_vector_use(proxy
, vector
);
909 * If guest supports masking, set up irqfd now.
910 * Otherwise, delay until unmasked in the frontend.
912 if (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
) {
913 ret
= kvm_virtio_pci_irqfd_use(proxy
, n
, vector
);
915 kvm_virtio_pci_vq_vector_release(proxy
, vector
);
923 vector
= virtio_queue_vector(vdev
, queue_no
);
924 if (vector
>= msix_nr_vectors_allocated(dev
)) {
927 if (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
) {
928 ret
= virtio_pci_get_notifier(proxy
, queue_no
, &n
, &vector
);
932 kvm_virtio_pci_irqfd_release(proxy
, n
, vector
);
936 static int kvm_virtio_pci_vector_vq_use(VirtIOPCIProxy
*proxy
, int nvqs
)
940 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
942 for (queue_no
= 0; queue_no
< nvqs
; queue_no
++) {
943 if (!virtio_queue_get_num(vdev
, queue_no
)) {
946 ret
= kvm_virtio_pci_vector_use_one(proxy
, queue_no
);
951 static int kvm_virtio_pci_vector_config_use(VirtIOPCIProxy
*proxy
)
953 return kvm_virtio_pci_vector_use_one(proxy
, VIRTIO_CONFIG_IRQ_IDX
);
956 static void kvm_virtio_pci_vector_release_one(VirtIOPCIProxy
*proxy
,
959 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
963 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
964 PCIDevice
*dev
= &proxy
->pci_dev
;
966 ret
= virtio_pci_get_notifier(proxy
, queue_no
, &n
, &vector
);
970 if (vector
>= msix_nr_vectors_allocated(dev
)) {
973 if (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
) {
974 kvm_virtio_pci_irqfd_release(proxy
, n
, vector
);
976 kvm_virtio_pci_vq_vector_release(proxy
, vector
);
979 static void kvm_virtio_pci_vector_vq_release(VirtIOPCIProxy
*proxy
, int nvqs
)
982 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
984 for (queue_no
= 0; queue_no
< nvqs
; queue_no
++) {
985 if (!virtio_queue_get_num(vdev
, queue_no
)) {
988 kvm_virtio_pci_vector_release_one(proxy
, queue_no
);
992 static void kvm_virtio_pci_vector_config_release(VirtIOPCIProxy
*proxy
)
994 kvm_virtio_pci_vector_release_one(proxy
, VIRTIO_CONFIG_IRQ_IDX
);
997 static int virtio_pci_one_vector_unmask(VirtIOPCIProxy
*proxy
,
998 unsigned int queue_no
,
1003 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1004 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1008 if (proxy
->vector_irqfd
) {
1009 irqfd
= &proxy
->vector_irqfd
[vector
];
1010 if (irqfd
->msg
.data
!= msg
.data
|| irqfd
->msg
.address
!= msg
.address
) {
1011 ret
= kvm_irqchip_update_msi_route(kvm_state
, irqfd
->virq
, msg
,
1016 kvm_irqchip_commit_routes(kvm_state
);
1020 /* If guest supports masking, irqfd is already setup, unmask it.
1021 * Otherwise, set it up now.
1023 if (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
) {
1024 k
->guest_notifier_mask(vdev
, queue_no
, false);
1025 /* Test after unmasking to avoid losing events. */
1026 if (k
->guest_notifier_pending
&&
1027 k
->guest_notifier_pending(vdev
, queue_no
)) {
1028 event_notifier_set(n
);
1031 ret
= kvm_virtio_pci_irqfd_use(proxy
, n
, vector
);
1036 static void virtio_pci_one_vector_mask(VirtIOPCIProxy
*proxy
,
1037 unsigned int queue_no
,
1038 unsigned int vector
,
1041 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1042 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1044 /* If guest supports masking, keep irqfd but mask it.
1045 * Otherwise, clean it up now.
1047 if (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
) {
1048 k
->guest_notifier_mask(vdev
, queue_no
, true);
1050 kvm_virtio_pci_irqfd_release(proxy
, n
, vector
);
1054 static int virtio_pci_vector_unmask(PCIDevice
*dev
, unsigned vector
,
1057 VirtIOPCIProxy
*proxy
= container_of(dev
, VirtIOPCIProxy
, pci_dev
);
1058 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1059 VirtQueue
*vq
= virtio_vector_first_queue(vdev
, vector
);
1061 int ret
, index
, unmasked
= 0;
1064 index
= virtio_get_queue_index(vq
);
1065 if (!virtio_queue_get_num(vdev
, index
)) {
1068 if (index
< proxy
->nvqs_with_notifiers
) {
1069 n
= virtio_queue_get_guest_notifier(vq
);
1070 ret
= virtio_pci_one_vector_unmask(proxy
, index
, vector
, msg
, n
);
1076 vq
= virtio_vector_next_queue(vq
);
1078 /* unmask config intr */
1079 if (vector
== vdev
->config_vector
) {
1080 n
= virtio_config_get_guest_notifier(vdev
);
1081 ret
= virtio_pci_one_vector_unmask(proxy
, VIRTIO_CONFIG_IRQ_IDX
, vector
,
1089 n
= virtio_config_get_guest_notifier(vdev
);
1090 virtio_pci_one_vector_mask(proxy
, VIRTIO_CONFIG_IRQ_IDX
, vector
, n
);
1092 vq
= virtio_vector_first_queue(vdev
, vector
);
1093 while (vq
&& unmasked
>= 0) {
1094 index
= virtio_get_queue_index(vq
);
1095 if (index
< proxy
->nvqs_with_notifiers
) {
1096 n
= virtio_queue_get_guest_notifier(vq
);
1097 virtio_pci_one_vector_mask(proxy
, index
, vector
, n
);
1100 vq
= virtio_vector_next_queue(vq
);
1105 static void virtio_pci_vector_mask(PCIDevice
*dev
, unsigned vector
)
1107 VirtIOPCIProxy
*proxy
= container_of(dev
, VirtIOPCIProxy
, pci_dev
);
1108 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1109 VirtQueue
*vq
= virtio_vector_first_queue(vdev
, vector
);
1114 index
= virtio_get_queue_index(vq
);
1115 n
= virtio_queue_get_guest_notifier(vq
);
1116 if (!virtio_queue_get_num(vdev
, index
)) {
1119 if (index
< proxy
->nvqs_with_notifiers
) {
1120 virtio_pci_one_vector_mask(proxy
, index
, vector
, n
);
1122 vq
= virtio_vector_next_queue(vq
);
1125 if (vector
== vdev
->config_vector
) {
1126 n
= virtio_config_get_guest_notifier(vdev
);
1127 virtio_pci_one_vector_mask(proxy
, VIRTIO_CONFIG_IRQ_IDX
, vector
, n
);
1131 static void virtio_pci_vector_poll(PCIDevice
*dev
,
1132 unsigned int vector_start
,
1133 unsigned int vector_end
)
1135 VirtIOPCIProxy
*proxy
= container_of(dev
, VirtIOPCIProxy
, pci_dev
);
1136 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1137 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1139 unsigned int vector
;
1140 EventNotifier
*notifier
;
1143 for (queue_no
= 0; queue_no
< proxy
->nvqs_with_notifiers
; queue_no
++) {
1144 ret
= virtio_pci_get_notifier(proxy
, queue_no
, ¬ifier
, &vector
);
1148 if (vector
< vector_start
|| vector
>= vector_end
||
1149 !msix_is_masked(dev
, vector
)) {
1152 if (k
->guest_notifier_pending
) {
1153 if (k
->guest_notifier_pending(vdev
, queue_no
)) {
1154 msix_set_pending(dev
, vector
);
1156 } else if (event_notifier_test_and_clear(notifier
)) {
1157 msix_set_pending(dev
, vector
);
1160 /* poll the config intr */
1161 ret
= virtio_pci_get_notifier(proxy
, VIRTIO_CONFIG_IRQ_IDX
, ¬ifier
,
1166 if (vector
< vector_start
|| vector
>= vector_end
||
1167 !msix_is_masked(dev
, vector
)) {
1170 if (k
->guest_notifier_pending
) {
1171 if (k
->guest_notifier_pending(vdev
, VIRTIO_CONFIG_IRQ_IDX
)) {
1172 msix_set_pending(dev
, vector
);
1174 } else if (event_notifier_test_and_clear(notifier
)) {
1175 msix_set_pending(dev
, vector
);
1179 void virtio_pci_set_guest_notifier_fd_handler(VirtIODevice
*vdev
, VirtQueue
*vq
,
1183 if (n
== VIRTIO_CONFIG_IRQ_IDX
) {
1184 virtio_config_set_guest_notifier_fd_handler(vdev
, assign
, with_irqfd
);
1186 virtio_queue_set_guest_notifier_fd_handler(vq
, assign
, with_irqfd
);
1190 static int virtio_pci_set_guest_notifier(DeviceState
*d
, int n
, bool assign
,
1193 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
1194 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1195 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1196 VirtQueue
*vq
= NULL
;
1197 EventNotifier
*notifier
= NULL
;
1199 if (n
== VIRTIO_CONFIG_IRQ_IDX
) {
1200 notifier
= virtio_config_get_guest_notifier(vdev
);
1202 vq
= virtio_get_queue(vdev
, n
);
1203 notifier
= virtio_queue_get_guest_notifier(vq
);
1207 int r
= event_notifier_init(notifier
, 0);
1211 virtio_pci_set_guest_notifier_fd_handler(vdev
, vq
, n
, true, with_irqfd
);
1213 virtio_pci_set_guest_notifier_fd_handler(vdev
, vq
, n
, false,
1215 event_notifier_cleanup(notifier
);
1218 if (!msix_enabled(&proxy
->pci_dev
) &&
1219 vdev
->use_guest_notifier_mask
&&
1220 vdc
->guest_notifier_mask
) {
1221 vdc
->guest_notifier_mask(vdev
, n
, !assign
);
1227 static bool virtio_pci_query_guest_notifiers(DeviceState
*d
)
1229 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
1230 return msix_enabled(&proxy
->pci_dev
);
1233 static int virtio_pci_set_guest_notifiers(DeviceState
*d
, int nvqs
, bool assign
)
1235 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
1236 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1237 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1239 bool with_irqfd
= msix_enabled(&proxy
->pci_dev
) &&
1240 kvm_msi_via_irqfd_enabled();
1242 nvqs
= MIN(nvqs
, VIRTIO_QUEUE_MAX
);
1245 * When deassigning, pass a consistent nvqs value to avoid leaking
1246 * notifiers. But first check we've actually been configured, exit
1247 * early if we haven't.
1249 if (!assign
&& !proxy
->nvqs_with_notifiers
) {
1252 assert(assign
|| nvqs
== proxy
->nvqs_with_notifiers
);
1254 proxy
->nvqs_with_notifiers
= nvqs
;
1256 /* Must unset vector notifier while guest notifier is still assigned */
1257 if ((proxy
->vector_irqfd
||
1258 (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
)) &&
1260 msix_unset_vector_notifiers(&proxy
->pci_dev
);
1261 if (proxy
->vector_irqfd
) {
1262 kvm_virtio_pci_vector_vq_release(proxy
, nvqs
);
1263 kvm_virtio_pci_vector_config_release(proxy
);
1264 g_free(proxy
->vector_irqfd
);
1265 proxy
->vector_irqfd
= NULL
;
1269 for (n
= 0; n
< nvqs
; n
++) {
1270 if (!virtio_queue_get_num(vdev
, n
)) {
1274 r
= virtio_pci_set_guest_notifier(d
, n
, assign
, with_irqfd
);
1279 r
= virtio_pci_set_guest_notifier(d
, VIRTIO_CONFIG_IRQ_IDX
, assign
,
1282 goto config_assign_error
;
1284 /* Must set vector notifier after guest notifier has been assigned */
1286 (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
)) &&
1289 proxy
->vector_irqfd
=
1290 g_malloc0(sizeof(*proxy
->vector_irqfd
) *
1291 msix_nr_vectors_allocated(&proxy
->pci_dev
));
1292 r
= kvm_virtio_pci_vector_vq_use(proxy
, nvqs
);
1294 goto config_assign_error
;
1296 r
= kvm_virtio_pci_vector_config_use(proxy
);
1302 r
= msix_set_vector_notifiers(&proxy
->pci_dev
, virtio_pci_vector_unmask
,
1303 virtio_pci_vector_mask
,
1304 virtio_pci_vector_poll
);
1306 goto notifiers_error
;
1315 kvm_virtio_pci_vector_vq_release(proxy
, nvqs
);
1319 kvm_virtio_pci_vector_config_release(proxy
);
1321 config_assign_error
:
1322 virtio_pci_set_guest_notifier(d
, VIRTIO_CONFIG_IRQ_IDX
, !assign
,
1325 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
1328 virtio_pci_set_guest_notifier(d
, n
, !assign
, with_irqfd
);
1330 g_free(proxy
->vector_irqfd
);
1331 proxy
->vector_irqfd
= NULL
;
1335 static int virtio_pci_set_host_notifier_mr(DeviceState
*d
, int n
,
1336 MemoryRegion
*mr
, bool assign
)
1338 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
1341 if (n
>= VIRTIO_QUEUE_MAX
|| !virtio_pci_modern(proxy
) ||
1342 virtio_pci_queue_mem_mult(proxy
) != memory_region_size(mr
)) {
1347 offset
= virtio_pci_queue_mem_mult(proxy
) * n
;
1348 memory_region_add_subregion_overlap(&proxy
->notify
.mr
, offset
, mr
, 1);
1350 memory_region_del_subregion(&proxy
->notify
.mr
, mr
);
1356 static void virtio_pci_vmstate_change(DeviceState
*d
, bool running
)
1358 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
1359 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1362 /* Old QEMU versions did not set bus master enable on status write.
1363 * Detect DRIVER set and enable it.
1365 if ((proxy
->flags
& VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION
) &&
1366 (vdev
->status
& VIRTIO_CONFIG_S_DRIVER
) &&
1367 !(proxy
->pci_dev
.config
[PCI_COMMAND
] & PCI_COMMAND_MASTER
)) {
1368 pci_default_write_config(&proxy
->pci_dev
, PCI_COMMAND
,
1369 proxy
->pci_dev
.config
[PCI_COMMAND
] |
1370 PCI_COMMAND_MASTER
, 1);
1372 virtio_pci_start_ioeventfd(proxy
);
1374 virtio_pci_stop_ioeventfd(proxy
);
1379 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
1382 static int virtio_pci_query_nvectors(DeviceState
*d
)
1384 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1386 return proxy
->nvectors
;
1389 static AddressSpace
*virtio_pci_get_dma_as(DeviceState
*d
)
1391 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1392 PCIDevice
*dev
= &proxy
->pci_dev
;
1394 return pci_get_address_space(dev
);
1397 static bool virtio_pci_iommu_enabled(DeviceState
*d
)
1399 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1400 PCIDevice
*dev
= &proxy
->pci_dev
;
1401 AddressSpace
*dma_as
= pci_device_iommu_address_space(dev
);
1403 if (dma_as
== &address_space_memory
) {
1410 static bool virtio_pci_queue_enabled(DeviceState
*d
, int n
)
1412 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1413 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1415 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
1416 return proxy
->vqs
[n
].enabled
;
1419 return virtio_queue_enabled_legacy(vdev
, n
);
1422 static int virtio_pci_add_mem_cap(VirtIOPCIProxy
*proxy
,
1423 struct virtio_pci_cap
*cap
)
1425 PCIDevice
*dev
= &proxy
->pci_dev
;
1428 offset
= pci_add_capability(dev
, PCI_CAP_ID_VNDR
, 0,
1429 cap
->cap_len
, &error_abort
);
1431 assert(cap
->cap_len
>= sizeof *cap
);
1432 memcpy(dev
->config
+ offset
+ PCI_CAP_FLAGS
, &cap
->cap_len
,
1433 cap
->cap_len
- PCI_CAP_FLAGS
);
1438 static uint64_t virtio_pci_common_read(void *opaque
, hwaddr addr
,
1441 VirtIOPCIProxy
*proxy
= opaque
;
1442 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1451 case VIRTIO_PCI_COMMON_DFSELECT
:
1452 val
= proxy
->dfselect
;
1454 case VIRTIO_PCI_COMMON_DF
:
1455 if (proxy
->dfselect
<= 1) {
1456 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1458 val
= (vdev
->host_features
& ~vdc
->legacy_features
) >>
1459 (32 * proxy
->dfselect
);
1462 case VIRTIO_PCI_COMMON_GFSELECT
:
1463 val
= proxy
->gfselect
;
1465 case VIRTIO_PCI_COMMON_GF
:
1466 if (proxy
->gfselect
< ARRAY_SIZE(proxy
->guest_features
)) {
1467 val
= proxy
->guest_features
[proxy
->gfselect
];
1470 case VIRTIO_PCI_COMMON_MSIX
:
1471 val
= vdev
->config_vector
;
1473 case VIRTIO_PCI_COMMON_NUMQ
:
1474 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; ++i
) {
1475 if (virtio_queue_get_num(vdev
, i
)) {
1480 case VIRTIO_PCI_COMMON_STATUS
:
1483 case VIRTIO_PCI_COMMON_CFGGENERATION
:
1484 val
= vdev
->generation
;
1486 case VIRTIO_PCI_COMMON_Q_SELECT
:
1487 val
= vdev
->queue_sel
;
1489 case VIRTIO_PCI_COMMON_Q_SIZE
:
1490 val
= virtio_queue_get_num(vdev
, vdev
->queue_sel
);
1492 case VIRTIO_PCI_COMMON_Q_MSIX
:
1493 val
= virtio_queue_vector(vdev
, vdev
->queue_sel
);
1495 case VIRTIO_PCI_COMMON_Q_ENABLE
:
1496 val
= proxy
->vqs
[vdev
->queue_sel
].enabled
;
1498 case VIRTIO_PCI_COMMON_Q_NOFF
:
1499 /* Simply map queues in order */
1500 val
= vdev
->queue_sel
;
1502 case VIRTIO_PCI_COMMON_Q_DESCLO
:
1503 val
= proxy
->vqs
[vdev
->queue_sel
].desc
[0];
1505 case VIRTIO_PCI_COMMON_Q_DESCHI
:
1506 val
= proxy
->vqs
[vdev
->queue_sel
].desc
[1];
1508 case VIRTIO_PCI_COMMON_Q_AVAILLO
:
1509 val
= proxy
->vqs
[vdev
->queue_sel
].avail
[0];
1511 case VIRTIO_PCI_COMMON_Q_AVAILHI
:
1512 val
= proxy
->vqs
[vdev
->queue_sel
].avail
[1];
1514 case VIRTIO_PCI_COMMON_Q_USEDLO
:
1515 val
= proxy
->vqs
[vdev
->queue_sel
].used
[0];
1517 case VIRTIO_PCI_COMMON_Q_USEDHI
:
1518 val
= proxy
->vqs
[vdev
->queue_sel
].used
[1];
1520 case VIRTIO_PCI_COMMON_Q_RESET
:
1521 val
= proxy
->vqs
[vdev
->queue_sel
].reset
;
1530 static void virtio_pci_common_write(void *opaque
, hwaddr addr
,
1531 uint64_t val
, unsigned size
)
1533 VirtIOPCIProxy
*proxy
= opaque
;
1534 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1542 case VIRTIO_PCI_COMMON_DFSELECT
:
1543 proxy
->dfselect
= val
;
1545 case VIRTIO_PCI_COMMON_GFSELECT
:
1546 proxy
->gfselect
= val
;
1548 case VIRTIO_PCI_COMMON_GF
:
1549 if (proxy
->gfselect
< ARRAY_SIZE(proxy
->guest_features
)) {
1550 proxy
->guest_features
[proxy
->gfselect
] = val
;
1551 virtio_set_features(vdev
,
1552 (((uint64_t)proxy
->guest_features
[1]) << 32) |
1553 proxy
->guest_features
[0]);
1556 case VIRTIO_PCI_COMMON_MSIX
:
1557 if (vdev
->config_vector
!= VIRTIO_NO_VECTOR
) {
1558 msix_vector_unuse(&proxy
->pci_dev
, vdev
->config_vector
);
1560 /* Make it possible for guest to discover an error took place. */
1561 if (val
< proxy
->nvectors
) {
1562 msix_vector_use(&proxy
->pci_dev
, val
);
1564 val
= VIRTIO_NO_VECTOR
;
1566 vdev
->config_vector
= val
;
1568 case VIRTIO_PCI_COMMON_STATUS
:
1569 if (!(val
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1570 virtio_pci_stop_ioeventfd(proxy
);
1573 virtio_set_status(vdev
, val
& 0xFF);
1575 if (val
& VIRTIO_CONFIG_S_DRIVER_OK
) {
1576 virtio_pci_start_ioeventfd(proxy
);
1579 if (vdev
->status
== 0) {
1580 virtio_pci_reset(DEVICE(proxy
));
1584 case VIRTIO_PCI_COMMON_Q_SELECT
:
1585 if (val
< VIRTIO_QUEUE_MAX
) {
1586 vdev
->queue_sel
= val
;
1589 case VIRTIO_PCI_COMMON_Q_SIZE
:
1590 proxy
->vqs
[vdev
->queue_sel
].num
= val
;
1591 virtio_queue_set_num(vdev
, vdev
->queue_sel
,
1592 proxy
->vqs
[vdev
->queue_sel
].num
);
1593 virtio_init_region_cache(vdev
, vdev
->queue_sel
);
1595 case VIRTIO_PCI_COMMON_Q_MSIX
:
1596 vector
= virtio_queue_vector(vdev
, vdev
->queue_sel
);
1597 if (vector
!= VIRTIO_NO_VECTOR
) {
1598 msix_vector_unuse(&proxy
->pci_dev
, vector
);
1600 /* Make it possible for guest to discover an error took place. */
1601 if (val
< proxy
->nvectors
) {
1602 msix_vector_use(&proxy
->pci_dev
, val
);
1604 val
= VIRTIO_NO_VECTOR
;
1606 virtio_queue_set_vector(vdev
, vdev
->queue_sel
, val
);
1608 case VIRTIO_PCI_COMMON_Q_ENABLE
:
1610 virtio_queue_set_num(vdev
, vdev
->queue_sel
,
1611 proxy
->vqs
[vdev
->queue_sel
].num
);
1612 virtio_queue_set_rings(vdev
, vdev
->queue_sel
,
1613 ((uint64_t)proxy
->vqs
[vdev
->queue_sel
].desc
[1]) << 32 |
1614 proxy
->vqs
[vdev
->queue_sel
].desc
[0],
1615 ((uint64_t)proxy
->vqs
[vdev
->queue_sel
].avail
[1]) << 32 |
1616 proxy
->vqs
[vdev
->queue_sel
].avail
[0],
1617 ((uint64_t)proxy
->vqs
[vdev
->queue_sel
].used
[1]) << 32 |
1618 proxy
->vqs
[vdev
->queue_sel
].used
[0]);
1619 proxy
->vqs
[vdev
->queue_sel
].enabled
= 1;
1620 proxy
->vqs
[vdev
->queue_sel
].reset
= 0;
1621 virtio_queue_enable(vdev
, vdev
->queue_sel
);
1623 virtio_error(vdev
, "wrong value for queue_enable %"PRIx64
, val
);
1626 case VIRTIO_PCI_COMMON_Q_DESCLO
:
1627 proxy
->vqs
[vdev
->queue_sel
].desc
[0] = val
;
1629 case VIRTIO_PCI_COMMON_Q_DESCHI
:
1630 proxy
->vqs
[vdev
->queue_sel
].desc
[1] = val
;
1632 case VIRTIO_PCI_COMMON_Q_AVAILLO
:
1633 proxy
->vqs
[vdev
->queue_sel
].avail
[0] = val
;
1635 case VIRTIO_PCI_COMMON_Q_AVAILHI
:
1636 proxy
->vqs
[vdev
->queue_sel
].avail
[1] = val
;
1638 case VIRTIO_PCI_COMMON_Q_USEDLO
:
1639 proxy
->vqs
[vdev
->queue_sel
].used
[0] = val
;
1641 case VIRTIO_PCI_COMMON_Q_USEDHI
:
1642 proxy
->vqs
[vdev
->queue_sel
].used
[1] = val
;
1644 case VIRTIO_PCI_COMMON_Q_RESET
:
1646 proxy
->vqs
[vdev
->queue_sel
].reset
= 1;
1648 virtio_queue_reset(vdev
, vdev
->queue_sel
);
1650 proxy
->vqs
[vdev
->queue_sel
].reset
= 0;
1651 proxy
->vqs
[vdev
->queue_sel
].enabled
= 0;
1660 static uint64_t virtio_pci_notify_read(void *opaque
, hwaddr addr
,
1663 VirtIOPCIProxy
*proxy
= opaque
;
1664 if (virtio_bus_get_device(&proxy
->bus
) == NULL
) {
1671 static void virtio_pci_notify_write(void *opaque
, hwaddr addr
,
1672 uint64_t val
, unsigned size
)
1674 VirtIOPCIProxy
*proxy
= opaque
;
1675 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1677 unsigned queue
= addr
/ virtio_pci_queue_mem_mult(proxy
);
1679 if (vdev
!= NULL
&& queue
< VIRTIO_QUEUE_MAX
) {
1680 trace_virtio_pci_notify_write(addr
, val
, size
);
1681 virtio_queue_notify(vdev
, queue
);
1685 static void virtio_pci_notify_write_pio(void *opaque
, hwaddr addr
,
1686 uint64_t val
, unsigned size
)
1688 VirtIOPCIProxy
*proxy
= opaque
;
1689 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1691 unsigned queue
= val
;
1693 if (vdev
!= NULL
&& queue
< VIRTIO_QUEUE_MAX
) {
1694 trace_virtio_pci_notify_write_pio(addr
, val
, size
);
1695 virtio_queue_notify(vdev
, queue
);
1699 static uint64_t virtio_pci_isr_read(void *opaque
, hwaddr addr
,
1702 VirtIOPCIProxy
*proxy
= opaque
;
1703 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1710 val
= qatomic_xchg(&vdev
->isr
, 0);
1711 pci_irq_deassert(&proxy
->pci_dev
);
1715 static void virtio_pci_isr_write(void *opaque
, hwaddr addr
,
1716 uint64_t val
, unsigned size
)
1720 static uint64_t virtio_pci_device_read(void *opaque
, hwaddr addr
,
1723 VirtIOPCIProxy
*proxy
= opaque
;
1724 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1733 val
= virtio_config_modern_readb(vdev
, addr
);
1736 val
= virtio_config_modern_readw(vdev
, addr
);
1739 val
= virtio_config_modern_readl(vdev
, addr
);
1748 static void virtio_pci_device_write(void *opaque
, hwaddr addr
,
1749 uint64_t val
, unsigned size
)
1751 VirtIOPCIProxy
*proxy
= opaque
;
1752 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1760 virtio_config_modern_writeb(vdev
, addr
, val
);
1763 virtio_config_modern_writew(vdev
, addr
, val
);
1766 virtio_config_modern_writel(vdev
, addr
, val
);
1771 static void virtio_pci_modern_regions_init(VirtIOPCIProxy
*proxy
,
1772 const char *vdev_name
)
1774 static const MemoryRegionOps common_ops
= {
1775 .read
= virtio_pci_common_read
,
1776 .write
= virtio_pci_common_write
,
1778 .min_access_size
= 1,
1779 .max_access_size
= 4,
1781 .endianness
= DEVICE_LITTLE_ENDIAN
,
1783 static const MemoryRegionOps isr_ops
= {
1784 .read
= virtio_pci_isr_read
,
1785 .write
= virtio_pci_isr_write
,
1787 .min_access_size
= 1,
1788 .max_access_size
= 4,
1790 .endianness
= DEVICE_LITTLE_ENDIAN
,
1792 static const MemoryRegionOps device_ops
= {
1793 .read
= virtio_pci_device_read
,
1794 .write
= virtio_pci_device_write
,
1796 .min_access_size
= 1,
1797 .max_access_size
= 4,
1799 .endianness
= DEVICE_LITTLE_ENDIAN
,
1801 static const MemoryRegionOps notify_ops
= {
1802 .read
= virtio_pci_notify_read
,
1803 .write
= virtio_pci_notify_write
,
1805 .min_access_size
= 1,
1806 .max_access_size
= 4,
1808 .endianness
= DEVICE_LITTLE_ENDIAN
,
1810 static const MemoryRegionOps notify_pio_ops
= {
1811 .read
= virtio_pci_notify_read
,
1812 .write
= virtio_pci_notify_write_pio
,
1814 .min_access_size
= 1,
1815 .max_access_size
= 4,
1817 .endianness
= DEVICE_LITTLE_ENDIAN
,
1819 g_autoptr(GString
) name
= g_string_new(NULL
);
1821 g_string_printf(name
, "virtio-pci-common-%s", vdev_name
);
1822 memory_region_init_io(&proxy
->common
.mr
, OBJECT(proxy
),
1826 proxy
->common
.size
);
1828 g_string_printf(name
, "virtio-pci-isr-%s", vdev_name
);
1829 memory_region_init_io(&proxy
->isr
.mr
, OBJECT(proxy
),
1835 g_string_printf(name
, "virtio-pci-device-%s", vdev_name
);
1836 memory_region_init_io(&proxy
->device
.mr
, OBJECT(proxy
),
1840 proxy
->device
.size
);
1842 g_string_printf(name
, "virtio-pci-notify-%s", vdev_name
);
1843 memory_region_init_io(&proxy
->notify
.mr
, OBJECT(proxy
),
1847 proxy
->notify
.size
);
1849 g_string_printf(name
, "virtio-pci-notify-pio-%s", vdev_name
);
1850 memory_region_init_io(&proxy
->notify_pio
.mr
, OBJECT(proxy
),
1854 proxy
->notify_pio
.size
);
1857 static void virtio_pci_modern_region_map(VirtIOPCIProxy
*proxy
,
1858 VirtIOPCIRegion
*region
,
1859 struct virtio_pci_cap
*cap
,
1863 memory_region_add_subregion(mr
, region
->offset
, ®ion
->mr
);
1865 cap
->cfg_type
= region
->type
;
1867 cap
->offset
= cpu_to_le32(region
->offset
);
1868 cap
->length
= cpu_to_le32(region
->size
);
1869 virtio_pci_add_mem_cap(proxy
, cap
);
1873 static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy
*proxy
,
1874 VirtIOPCIRegion
*region
,
1875 struct virtio_pci_cap
*cap
)
1877 virtio_pci_modern_region_map(proxy
, region
, cap
,
1878 &proxy
->modern_bar
, proxy
->modern_mem_bar_idx
);
1881 static void virtio_pci_modern_io_region_map(VirtIOPCIProxy
*proxy
,
1882 VirtIOPCIRegion
*region
,
1883 struct virtio_pci_cap
*cap
)
1885 virtio_pci_modern_region_map(proxy
, region
, cap
,
1886 &proxy
->io_bar
, proxy
->modern_io_bar_idx
);
1889 static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy
*proxy
,
1890 VirtIOPCIRegion
*region
)
1892 memory_region_del_subregion(&proxy
->modern_bar
,
1896 static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy
*proxy
,
1897 VirtIOPCIRegion
*region
)
1899 memory_region_del_subregion(&proxy
->io_bar
,
1903 static void virtio_pci_pre_plugged(DeviceState
*d
, Error
**errp
)
1905 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1906 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1908 if (virtio_pci_modern(proxy
)) {
1909 virtio_add_feature(&vdev
->host_features
, VIRTIO_F_VERSION_1
);
1912 virtio_add_feature(&vdev
->host_features
, VIRTIO_F_BAD_FEATURE
);
1915 /* This is called by virtio-bus just after the device is plugged. */
1916 static void virtio_pci_device_plugged(DeviceState
*d
, Error
**errp
)
1918 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1919 VirtioBusState
*bus
= &proxy
->bus
;
1920 bool legacy
= virtio_pci_legacy(proxy
);
1922 bool modern_pio
= proxy
->flags
& VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY
;
1925 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1928 * Virtio capabilities present without
1929 * VIRTIO_F_VERSION_1 confuses guests
1931 if (!proxy
->ignore_backend_features
&&
1932 !virtio_has_feature(vdev
->host_features
, VIRTIO_F_VERSION_1
)) {
1933 virtio_pci_disable_modern(proxy
);
1936 error_setg(errp
, "Device doesn't support modern mode, and legacy"
1937 " mode is disabled");
1938 error_append_hint(errp
, "Set disable-legacy to off\n");
1944 modern
= virtio_pci_modern(proxy
);
1946 config
= proxy
->pci_dev
.config
;
1947 if (proxy
->class_code
) {
1948 pci_config_set_class(config
, proxy
->class_code
);
1952 if (!virtio_legacy_allowed(vdev
)) {
1954 * To avoid migration issues, we allow legacy mode when legacy
1955 * check is disabled in the old machine types (< 5.1).
1957 if (virtio_legacy_check_disabled(vdev
)) {
1958 warn_report("device is modern-only, but for backward "
1959 "compatibility legacy is allowed");
1962 "device is modern-only, use disable-legacy=on");
1966 if (virtio_host_has_feature(vdev
, VIRTIO_F_IOMMU_PLATFORM
)) {
1967 error_setg(errp
, "VIRTIO_F_IOMMU_PLATFORM was supported by"
1968 " neither legacy nor transitional device");
1972 * Legacy and transitional devices use specific subsystem IDs.
1973 * Note that the subsystem vendor ID (config + PCI_SUBSYSTEM_VENDOR_ID)
1974 * is set to PCI_SUBVENDOR_ID_REDHAT_QUMRANET by default.
1976 pci_set_word(config
+ PCI_SUBSYSTEM_ID
, virtio_bus_get_vdev_id(bus
));
1977 if (proxy
->trans_devid
) {
1978 pci_config_set_device_id(config
, proxy
->trans_devid
);
1981 /* pure virtio-1.0 */
1982 pci_set_word(config
+ PCI_VENDOR_ID
,
1983 PCI_VENDOR_ID_REDHAT_QUMRANET
);
1984 pci_set_word(config
+ PCI_DEVICE_ID
,
1985 PCI_DEVICE_ID_VIRTIO_10_BASE
+ virtio_bus_get_vdev_id(bus
));
1986 pci_config_set_revision(config
, 1);
1988 config
[PCI_INTERRUPT_PIN
] = 1;
1992 struct virtio_pci_cap cap
= {
1993 .cap_len
= sizeof cap
,
1995 struct virtio_pci_notify_cap notify
= {
1996 .cap
.cap_len
= sizeof notify
,
1997 .notify_off_multiplier
=
1998 cpu_to_le32(virtio_pci_queue_mem_mult(proxy
)),
2000 struct virtio_pci_cfg_cap cfg
= {
2001 .cap
.cap_len
= sizeof cfg
,
2002 .cap
.cfg_type
= VIRTIO_PCI_CAP_PCI_CFG
,
2004 struct virtio_pci_notify_cap notify_pio
= {
2005 .cap
.cap_len
= sizeof notify
,
2006 .notify_off_multiplier
= cpu_to_le32(0x0),
2009 struct virtio_pci_cfg_cap
*cfg_mask
;
2011 virtio_pci_modern_regions_init(proxy
, vdev
->name
);
2013 virtio_pci_modern_mem_region_map(proxy
, &proxy
->common
, &cap
);
2014 virtio_pci_modern_mem_region_map(proxy
, &proxy
->isr
, &cap
);
2015 virtio_pci_modern_mem_region_map(proxy
, &proxy
->device
, &cap
);
2016 virtio_pci_modern_mem_region_map(proxy
, &proxy
->notify
, ¬ify
.cap
);
2019 memory_region_init(&proxy
->io_bar
, OBJECT(proxy
),
2020 "virtio-pci-io", 0x4);
2022 pci_register_bar(&proxy
->pci_dev
, proxy
->modern_io_bar_idx
,
2023 PCI_BASE_ADDRESS_SPACE_IO
, &proxy
->io_bar
);
2025 virtio_pci_modern_io_region_map(proxy
, &proxy
->notify_pio
,
2029 pci_register_bar(&proxy
->pci_dev
, proxy
->modern_mem_bar_idx
,
2030 PCI_BASE_ADDRESS_SPACE_MEMORY
|
2031 PCI_BASE_ADDRESS_MEM_PREFETCH
|
2032 PCI_BASE_ADDRESS_MEM_TYPE_64
,
2033 &proxy
->modern_bar
);
2035 proxy
->config_cap
= virtio_pci_add_mem_cap(proxy
, &cfg
.cap
);
2036 cfg_mask
= (void *)(proxy
->pci_dev
.wmask
+ proxy
->config_cap
);
2037 pci_set_byte(&cfg_mask
->cap
.bar
, ~0x0);
2038 pci_set_long((uint8_t *)&cfg_mask
->cap
.offset
, ~0x0);
2039 pci_set_long((uint8_t *)&cfg_mask
->cap
.length
, ~0x0);
2040 pci_set_long(cfg_mask
->pci_cfg_data
, ~0x0);
2043 if (proxy
->nvectors
) {
2044 int err
= msix_init_exclusive_bar(&proxy
->pci_dev
, proxy
->nvectors
,
2045 proxy
->msix_bar_idx
, NULL
);
2047 /* Notice when a system that supports MSIx can't initialize it */
2048 if (err
!= -ENOTSUP
) {
2049 warn_report("unable to init msix vectors to %" PRIu32
,
2052 proxy
->nvectors
= 0;
2056 proxy
->pci_dev
.config_write
= virtio_write_config
;
2057 proxy
->pci_dev
.config_read
= virtio_read_config
;
2060 size
= VIRTIO_PCI_REGION_SIZE(&proxy
->pci_dev
)
2061 + virtio_bus_get_vdev_config_len(bus
);
2062 size
= pow2ceil(size
);
2064 memory_region_init_io(&proxy
->bar
, OBJECT(proxy
),
2065 &virtio_pci_config_ops
,
2066 proxy
, "virtio-pci", size
);
2068 pci_register_bar(&proxy
->pci_dev
, proxy
->legacy_io_bar_idx
,
2069 PCI_BASE_ADDRESS_SPACE_IO
, &proxy
->bar
);
2073 static void virtio_pci_device_unplugged(DeviceState
*d
)
2075 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
2076 bool modern
= virtio_pci_modern(proxy
);
2077 bool modern_pio
= proxy
->flags
& VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY
;
2079 virtio_pci_stop_ioeventfd(proxy
);
2082 virtio_pci_modern_mem_region_unmap(proxy
, &proxy
->common
);
2083 virtio_pci_modern_mem_region_unmap(proxy
, &proxy
->isr
);
2084 virtio_pci_modern_mem_region_unmap(proxy
, &proxy
->device
);
2085 virtio_pci_modern_mem_region_unmap(proxy
, &proxy
->notify
);
2087 virtio_pci_modern_io_region_unmap(proxy
, &proxy
->notify_pio
);
2092 static void virtio_pci_realize(PCIDevice
*pci_dev
, Error
**errp
)
2094 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(pci_dev
);
2095 VirtioPCIClass
*k
= VIRTIO_PCI_GET_CLASS(pci_dev
);
2096 bool pcie_port
= pci_bus_is_express(pci_get_bus(pci_dev
)) &&
2097 !pci_bus_is_root(pci_get_bus(pci_dev
));
2099 if (kvm_enabled() && !kvm_has_many_ioeventfds()) {
2100 proxy
->flags
&= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD
;
2103 /* fd-based ioevents can't be synchronized in record/replay */
2104 if (replay_mode
!= REPLAY_MODE_NONE
) {
2105 proxy
->flags
&= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD
;
2109 * virtio pci bar layout used by default.
2110 * subclasses can re-arrange things if needed.
2112 * region 0 -- virtio legacy io bar
2113 * region 1 -- msi-x bar
2114 * region 2 -- virtio modern io bar (off by default)
2115 * region 4+5 -- virtio modern memory (64bit) bar
2118 proxy
->legacy_io_bar_idx
= 0;
2119 proxy
->msix_bar_idx
= 1;
2120 proxy
->modern_io_bar_idx
= 2;
2121 proxy
->modern_mem_bar_idx
= 4;
2123 proxy
->common
.offset
= 0x0;
2124 proxy
->common
.size
= 0x1000;
2125 proxy
->common
.type
= VIRTIO_PCI_CAP_COMMON_CFG
;
2127 proxy
->isr
.offset
= 0x1000;
2128 proxy
->isr
.size
= 0x1000;
2129 proxy
->isr
.type
= VIRTIO_PCI_CAP_ISR_CFG
;
2131 proxy
->device
.offset
= 0x2000;
2132 proxy
->device
.size
= 0x1000;
2133 proxy
->device
.type
= VIRTIO_PCI_CAP_DEVICE_CFG
;
2135 proxy
->notify
.offset
= 0x3000;
2136 proxy
->notify
.size
= virtio_pci_queue_mem_mult(proxy
) * VIRTIO_QUEUE_MAX
;
2137 proxy
->notify
.type
= VIRTIO_PCI_CAP_NOTIFY_CFG
;
2139 proxy
->notify_pio
.offset
= 0x0;
2140 proxy
->notify_pio
.size
= 0x4;
2141 proxy
->notify_pio
.type
= VIRTIO_PCI_CAP_NOTIFY_CFG
;
2143 /* subclasses can enforce modern, so do this unconditionally */
2144 memory_region_init(&proxy
->modern_bar
, OBJECT(proxy
), "virtio-pci",
2145 /* PCI BAR regions must be powers of 2 */
2146 pow2ceil(proxy
->notify
.offset
+ proxy
->notify
.size
));
2148 if (proxy
->disable_legacy
== ON_OFF_AUTO_AUTO
) {
2149 proxy
->disable_legacy
= pcie_port
? ON_OFF_AUTO_ON
: ON_OFF_AUTO_OFF
;
2152 if (!virtio_pci_modern(proxy
) && !virtio_pci_legacy(proxy
)) {
2153 error_setg(errp
, "device cannot work as neither modern nor legacy mode"
2155 error_append_hint(errp
, "Set either disable-modern or disable-legacy"
2160 if (pcie_port
&& pci_is_express(pci_dev
)) {
2162 uint16_t last_pcie_cap_offset
= PCI_CONFIG_SPACE_SIZE
;
2164 pos
= pcie_endpoint_cap_init(pci_dev
, 0);
2167 pos
= pci_add_capability(pci_dev
, PCI_CAP_ID_PM
, 0,
2168 PCI_PM_SIZEOF
, errp
);
2173 pci_dev
->exp
.pm_cap
= pos
;
2176 * Indicates that this function complies with revision 1.2 of the
2177 * PCI Power Management Interface Specification.
2179 pci_set_word(pci_dev
->config
+ pos
+ PCI_PM_PMC
, 0x3);
2181 if (proxy
->flags
& VIRTIO_PCI_FLAG_AER
) {
2182 pcie_aer_init(pci_dev
, PCI_ERR_VER
, last_pcie_cap_offset
,
2183 PCI_ERR_SIZEOF
, NULL
);
2184 last_pcie_cap_offset
+= PCI_ERR_SIZEOF
;
2187 if (proxy
->flags
& VIRTIO_PCI_FLAG_INIT_DEVERR
) {
2188 /* Init error enabling flags */
2189 pcie_cap_deverr_init(pci_dev
);
2192 if (proxy
->flags
& VIRTIO_PCI_FLAG_INIT_LNKCTL
) {
2193 /* Init Link Control Register */
2194 pcie_cap_lnkctl_init(pci_dev
);
2197 if (proxy
->flags
& VIRTIO_PCI_FLAG_INIT_PM
) {
2198 /* Init Power Management Control Register */
2199 pci_set_word(pci_dev
->wmask
+ pos
+ PCI_PM_CTRL
,
2200 PCI_PM_CTRL_STATE_MASK
);
2203 if (proxy
->flags
& VIRTIO_PCI_FLAG_ATS
) {
2204 pcie_ats_init(pci_dev
, last_pcie_cap_offset
,
2205 proxy
->flags
& VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED
);
2206 last_pcie_cap_offset
+= PCI_EXT_CAP_ATS_SIZEOF
;
2209 if (proxy
->flags
& VIRTIO_PCI_FLAG_INIT_FLR
) {
2210 /* Set Function Level Reset capability bit */
2211 pcie_cap_flr_init(pci_dev
);
2215 * make future invocations of pci_is_express() return false
2216 * and pci_config_size() return PCI_CONFIG_SPACE_SIZE.
2218 pci_dev
->cap_present
&= ~QEMU_PCI_CAP_EXPRESS
;
2221 virtio_pci_bus_new(&proxy
->bus
, sizeof(proxy
->bus
), proxy
);
2223 k
->realize(proxy
, errp
);
2227 static void virtio_pci_exit(PCIDevice
*pci_dev
)
2229 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(pci_dev
);
2230 bool pcie_port
= pci_bus_is_express(pci_get_bus(pci_dev
)) &&
2231 !pci_bus_is_root(pci_get_bus(pci_dev
));
2233 msix_uninit_exclusive_bar(pci_dev
);
2234 if (proxy
->flags
& VIRTIO_PCI_FLAG_AER
&& pcie_port
&&
2235 pci_is_express(pci_dev
)) {
2236 pcie_aer_exit(pci_dev
);
2240 static void virtio_pci_reset(DeviceState
*qdev
)
2242 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(qdev
);
2243 VirtioBusState
*bus
= VIRTIO_BUS(&proxy
->bus
);
2246 virtio_bus_reset(bus
);
2247 msix_unuse_all_vectors(&proxy
->pci_dev
);
2249 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2250 proxy
->vqs
[i
].enabled
= 0;
2251 proxy
->vqs
[i
].reset
= 0;
2252 proxy
->vqs
[i
].num
= 0;
2253 proxy
->vqs
[i
].desc
[0] = proxy
->vqs
[i
].desc
[1] = 0;
2254 proxy
->vqs
[i
].avail
[0] = proxy
->vqs
[i
].avail
[1] = 0;
2255 proxy
->vqs
[i
].used
[0] = proxy
->vqs
[i
].used
[1] = 0;
2259 static void virtio_pci_bus_reset_hold(Object
*obj
)
2261 PCIDevice
*dev
= PCI_DEVICE(obj
);
2262 DeviceState
*qdev
= DEVICE(obj
);
2264 virtio_pci_reset(qdev
);
2266 if (pci_is_express(dev
)) {
2267 pcie_cap_deverr_reset(dev
);
2268 pcie_cap_lnkctl_reset(dev
);
2270 pci_set_word(dev
->config
+ dev
->exp
.pm_cap
+ PCI_PM_CTRL
, 0);
2274 static Property virtio_pci_properties
[] = {
2275 DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy
, flags
,
2276 VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT
, false),
2277 DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy
, flags
,
2278 VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT
, true),
2279 DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy
, flags
,
2280 VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT
, false),
2281 DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy
, flags
,
2282 VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT
, false),
2283 DEFINE_PROP_BIT("page-per-vq", VirtIOPCIProxy
, flags
,
2284 VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT
, false),
2285 DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy
,
2286 ignore_backend_features
, false),
2287 DEFINE_PROP_BIT("ats", VirtIOPCIProxy
, flags
,
2288 VIRTIO_PCI_FLAG_ATS_BIT
, false),
2289 DEFINE_PROP_BIT("x-ats-page-aligned", VirtIOPCIProxy
, flags
,
2290 VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED_BIT
, true),
2291 DEFINE_PROP_BIT("x-pcie-deverr-init", VirtIOPCIProxy
, flags
,
2292 VIRTIO_PCI_FLAG_INIT_DEVERR_BIT
, true),
2293 DEFINE_PROP_BIT("x-pcie-lnkctl-init", VirtIOPCIProxy
, flags
,
2294 VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT
, true),
2295 DEFINE_PROP_BIT("x-pcie-pm-init", VirtIOPCIProxy
, flags
,
2296 VIRTIO_PCI_FLAG_INIT_PM_BIT
, true),
2297 DEFINE_PROP_BIT("x-pcie-flr-init", VirtIOPCIProxy
, flags
,
2298 VIRTIO_PCI_FLAG_INIT_FLR_BIT
, true),
2299 DEFINE_PROP_BIT("aer", VirtIOPCIProxy
, flags
,
2300 VIRTIO_PCI_FLAG_AER_BIT
, false),
2301 DEFINE_PROP_END_OF_LIST(),
2304 static void virtio_pci_dc_realize(DeviceState
*qdev
, Error
**errp
)
2306 VirtioPCIClass
*vpciklass
= VIRTIO_PCI_GET_CLASS(qdev
);
2307 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(qdev
);
2308 PCIDevice
*pci_dev
= &proxy
->pci_dev
;
2310 if (!(proxy
->flags
& VIRTIO_PCI_FLAG_DISABLE_PCIE
) &&
2311 virtio_pci_modern(proxy
)) {
2312 pci_dev
->cap_present
|= QEMU_PCI_CAP_EXPRESS
;
2315 vpciklass
->parent_dc_realize(qdev
, errp
);
2318 static void virtio_pci_class_init(ObjectClass
*klass
, void *data
)
2320 DeviceClass
*dc
= DEVICE_CLASS(klass
);
2321 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
2322 VirtioPCIClass
*vpciklass
= VIRTIO_PCI_CLASS(klass
);
2323 ResettableClass
*rc
= RESETTABLE_CLASS(klass
);
2325 device_class_set_props(dc
, virtio_pci_properties
);
2326 k
->realize
= virtio_pci_realize
;
2327 k
->exit
= virtio_pci_exit
;
2328 k
->vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
;
2329 k
->revision
= VIRTIO_PCI_ABI_VERSION
;
2330 k
->class_id
= PCI_CLASS_OTHERS
;
2331 device_class_set_parent_realize(dc
, virtio_pci_dc_realize
,
2332 &vpciklass
->parent_dc_realize
);
2333 rc
->phases
.hold
= virtio_pci_bus_reset_hold
;
2336 static const TypeInfo virtio_pci_info
= {
2337 .name
= TYPE_VIRTIO_PCI
,
2338 .parent
= TYPE_PCI_DEVICE
,
2339 .instance_size
= sizeof(VirtIOPCIProxy
),
2340 .class_init
= virtio_pci_class_init
,
2341 .class_size
= sizeof(VirtioPCIClass
),
2345 static Property virtio_pci_generic_properties
[] = {
2346 DEFINE_PROP_ON_OFF_AUTO("disable-legacy", VirtIOPCIProxy
, disable_legacy
,
2348 DEFINE_PROP_BOOL("disable-modern", VirtIOPCIProxy
, disable_modern
, false),
2349 DEFINE_PROP_END_OF_LIST(),
2352 static void virtio_pci_base_class_init(ObjectClass
*klass
, void *data
)
2354 const VirtioPCIDeviceTypeInfo
*t
= data
;
2355 if (t
->class_init
) {
2356 t
->class_init(klass
, NULL
);
2360 static void virtio_pci_generic_class_init(ObjectClass
*klass
, void *data
)
2362 DeviceClass
*dc
= DEVICE_CLASS(klass
);
2364 device_class_set_props(dc
, virtio_pci_generic_properties
);
2367 static void virtio_pci_transitional_instance_init(Object
*obj
)
2369 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(obj
);
2371 proxy
->disable_legacy
= ON_OFF_AUTO_OFF
;
2372 proxy
->disable_modern
= false;
2375 static void virtio_pci_non_transitional_instance_init(Object
*obj
)
2377 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(obj
);
2379 proxy
->disable_legacy
= ON_OFF_AUTO_ON
;
2380 proxy
->disable_modern
= false;
2383 void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo
*t
)
2385 char *base_name
= NULL
;
2386 TypeInfo base_type_info
= {
2387 .name
= t
->base_name
,
2388 .parent
= t
->parent
? t
->parent
: TYPE_VIRTIO_PCI
,
2389 .instance_size
= t
->instance_size
,
2390 .instance_init
= t
->instance_init
,
2391 .class_size
= t
->class_size
,
2393 .interfaces
= t
->interfaces
,
2395 TypeInfo generic_type_info
= {
2396 .name
= t
->generic_name
,
2397 .parent
= base_type_info
.name
,
2398 .class_init
= virtio_pci_generic_class_init
,
2399 .interfaces
= (InterfaceInfo
[]) {
2400 { INTERFACE_PCIE_DEVICE
},
2401 { INTERFACE_CONVENTIONAL_PCI_DEVICE
},
2406 if (!base_type_info
.name
) {
2407 /* No base type -> register a single generic device type */
2408 /* use intermediate %s-base-type to add generic device props */
2409 base_name
= g_strdup_printf("%s-base-type", t
->generic_name
);
2410 base_type_info
.name
= base_name
;
2411 base_type_info
.class_init
= virtio_pci_generic_class_init
;
2413 generic_type_info
.parent
= base_name
;
2414 generic_type_info
.class_init
= virtio_pci_base_class_init
;
2415 generic_type_info
.class_data
= (void *)t
;
2417 assert(!t
->non_transitional_name
);
2418 assert(!t
->transitional_name
);
2420 base_type_info
.class_init
= virtio_pci_base_class_init
;
2421 base_type_info
.class_data
= (void *)t
;
2424 type_register(&base_type_info
);
2425 if (generic_type_info
.name
) {
2426 type_register(&generic_type_info
);
2429 if (t
->non_transitional_name
) {
2430 const TypeInfo non_transitional_type_info
= {
2431 .name
= t
->non_transitional_name
,
2432 .parent
= base_type_info
.name
,
2433 .instance_init
= virtio_pci_non_transitional_instance_init
,
2434 .interfaces
= (InterfaceInfo
[]) {
2435 { INTERFACE_PCIE_DEVICE
},
2436 { INTERFACE_CONVENTIONAL_PCI_DEVICE
},
2440 type_register(&non_transitional_type_info
);
2443 if (t
->transitional_name
) {
2444 const TypeInfo transitional_type_info
= {
2445 .name
= t
->transitional_name
,
2446 .parent
= base_type_info
.name
,
2447 .instance_init
= virtio_pci_transitional_instance_init
,
2448 .interfaces
= (InterfaceInfo
[]) {
2450 * Transitional virtio devices work only as Conventional PCI
2451 * devices because they require PIO ports.
2453 { INTERFACE_CONVENTIONAL_PCI_DEVICE
},
2457 type_register(&transitional_type_info
);
2462 unsigned virtio_pci_optimal_num_queues(unsigned fixed_queues
)
2465 * 1:1 vq to vCPU mapping is ideal because the same vCPU that submitted
2466 * virtqueue buffers can handle their completion. When a different vCPU
2467 * handles completion it may need to IPI the vCPU that submitted the
2468 * request and this adds overhead.
2470 * Virtqueues consume guest RAM and MSI-X vectors. This is wasteful in
2471 * guests with very many vCPUs and a device that is only used by a few
2472 * vCPUs. Unfortunately optimizing that case requires manual pinning inside
2473 * the guest, so those users might as well manually set the number of
2474 * queues. There is no upper limit that can be applied automatically and
2475 * doing so arbitrarily would result in a sudden performance drop once the
2476 * threshold number of vCPUs is exceeded.
2478 unsigned num_queues
= current_machine
->smp
.cpus
;
2481 * The maximum number of MSI-X vectors is PCI_MSIX_FLAGS_QSIZE + 1, but the
2482 * config change interrupt and the fixed virtqueues must be taken into
2485 num_queues
= MIN(num_queues
, PCI_MSIX_FLAGS_QSIZE
- fixed_queues
);
2488 * There is a limit to how many virtqueues a device can have.
2490 return MIN(num_queues
, VIRTIO_QUEUE_MAX
- fixed_queues
);
2493 /* virtio-pci-bus */
2495 static void virtio_pci_bus_new(VirtioBusState
*bus
, size_t bus_size
,
2496 VirtIOPCIProxy
*dev
)
2498 DeviceState
*qdev
= DEVICE(dev
);
2499 char virtio_bus_name
[] = "virtio-bus";
2501 qbus_init(bus
, bus_size
, TYPE_VIRTIO_PCI_BUS
, qdev
, virtio_bus_name
);
2504 static void virtio_pci_bus_class_init(ObjectClass
*klass
, void *data
)
2506 BusClass
*bus_class
= BUS_CLASS(klass
);
2507 VirtioBusClass
*k
= VIRTIO_BUS_CLASS(klass
);
2508 bus_class
->max_dev
= 1;
2509 k
->notify
= virtio_pci_notify
;
2510 k
->save_config
= virtio_pci_save_config
;
2511 k
->load_config
= virtio_pci_load_config
;
2512 k
->save_queue
= virtio_pci_save_queue
;
2513 k
->load_queue
= virtio_pci_load_queue
;
2514 k
->save_extra_state
= virtio_pci_save_extra_state
;
2515 k
->load_extra_state
= virtio_pci_load_extra_state
;
2516 k
->has_extra_state
= virtio_pci_has_extra_state
;
2517 k
->query_guest_notifiers
= virtio_pci_query_guest_notifiers
;
2518 k
->set_guest_notifiers
= virtio_pci_set_guest_notifiers
;
2519 k
->set_host_notifier_mr
= virtio_pci_set_host_notifier_mr
;
2520 k
->vmstate_change
= virtio_pci_vmstate_change
;
2521 k
->pre_plugged
= virtio_pci_pre_plugged
;
2522 k
->device_plugged
= virtio_pci_device_plugged
;
2523 k
->device_unplugged
= virtio_pci_device_unplugged
;
2524 k
->query_nvectors
= virtio_pci_query_nvectors
;
2525 k
->ioeventfd_enabled
= virtio_pci_ioeventfd_enabled
;
2526 k
->ioeventfd_assign
= virtio_pci_ioeventfd_assign
;
2527 k
->get_dma_as
= virtio_pci_get_dma_as
;
2528 k
->iommu_enabled
= virtio_pci_iommu_enabled
;
2529 k
->queue_enabled
= virtio_pci_queue_enabled
;
2532 static const TypeInfo virtio_pci_bus_info
= {
2533 .name
= TYPE_VIRTIO_PCI_BUS
,
2534 .parent
= TYPE_VIRTIO_BUS
,
2535 .instance_size
= sizeof(VirtioPCIBusState
),
2536 .class_size
= sizeof(VirtioPCIBusClass
),
2537 .class_init
= virtio_pci_bus_class_init
,
2540 static void virtio_pci_register_types(void)
2543 type_register_static(&virtio_pci_bus_info
);
2544 type_register_static(&virtio_pci_info
);
2547 type_init(virtio_pci_register_types
)