4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2009 CodeSourcery
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paul Brook <paul@codesourcery.com>
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
18 #include "qemu/osdep.h"
20 #include "exec/memop.h"
21 #include "standard-headers/linux/virtio_pci.h"
22 #include "standard-headers/linux/virtio_ids.h"
23 #include "hw/boards.h"
24 #include "hw/virtio/virtio.h"
25 #include "migration/qemu-file-types.h"
26 #include "hw/pci/pci.h"
27 #include "hw/pci/pci_bus.h"
28 #include "hw/qdev-properties.h"
29 #include "qapi/error.h"
30 #include "qemu/error-report.h"
32 #include "qemu/module.h"
33 #include "hw/pci/msi.h"
34 #include "hw/pci/msix.h"
35 #include "hw/loader.h"
36 #include "sysemu/kvm.h"
37 #include "hw/virtio/virtio-pci.h"
38 #include "qemu/range.h"
39 #include "hw/virtio/virtio-bus.h"
40 #include "qapi/visitor.h"
41 #include "sysemu/replay.h"
44 #define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev))
46 #undef VIRTIO_PCI_CONFIG
48 /* The remaining space is defined by each driver as the per-driver
49 * configuration space */
50 #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
52 static void virtio_pci_bus_new(VirtioBusState
*bus
, size_t bus_size
,
54 static void virtio_pci_reset(DeviceState
*qdev
);
57 /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
58 static inline VirtIOPCIProxy
*to_virtio_pci_proxy(DeviceState
*d
)
60 return container_of(d
, VirtIOPCIProxy
, pci_dev
.qdev
);
63 /* DeviceState to VirtIOPCIProxy. Note: used on datapath,
64 * be careful and test performance if you change this.
66 static inline VirtIOPCIProxy
*to_virtio_pci_proxy_fast(DeviceState
*d
)
68 return container_of(d
, VirtIOPCIProxy
, pci_dev
.qdev
);
71 static void virtio_pci_notify(DeviceState
*d
, uint16_t vector
)
73 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy_fast(d
);
75 if (msix_enabled(&proxy
->pci_dev
)) {
76 if (vector
!= VIRTIO_NO_VECTOR
) {
77 msix_notify(&proxy
->pci_dev
, vector
);
80 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
81 pci_set_irq(&proxy
->pci_dev
, qatomic_read(&vdev
->isr
) & 1);
85 static void virtio_pci_save_config(DeviceState
*d
, QEMUFile
*f
)
87 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
88 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
90 pci_device_save(&proxy
->pci_dev
, f
);
91 msix_save(&proxy
->pci_dev
, f
);
92 if (msix_present(&proxy
->pci_dev
))
93 qemu_put_be16(f
, vdev
->config_vector
);
96 static const VMStateDescription vmstate_virtio_pci_modern_queue_state
= {
97 .name
= "virtio_pci/modern_queue_state",
99 .minimum_version_id
= 1,
100 .fields
= (const VMStateField
[]) {
101 VMSTATE_UINT16(num
, VirtIOPCIQueue
),
102 VMSTATE_UNUSED(1), /* enabled was stored as be16 */
103 VMSTATE_BOOL(enabled
, VirtIOPCIQueue
),
104 VMSTATE_UINT32_ARRAY(desc
, VirtIOPCIQueue
, 2),
105 VMSTATE_UINT32_ARRAY(avail
, VirtIOPCIQueue
, 2),
106 VMSTATE_UINT32_ARRAY(used
, VirtIOPCIQueue
, 2),
107 VMSTATE_END_OF_LIST()
111 static bool virtio_pci_modern_state_needed(void *opaque
)
113 VirtIOPCIProxy
*proxy
= opaque
;
115 return virtio_pci_modern(proxy
);
118 static const VMStateDescription vmstate_virtio_pci_modern_state_sub
= {
119 .name
= "virtio_pci/modern_state",
121 .minimum_version_id
= 1,
122 .needed
= &virtio_pci_modern_state_needed
,
123 .fields
= (const VMStateField
[]) {
124 VMSTATE_UINT32(dfselect
, VirtIOPCIProxy
),
125 VMSTATE_UINT32(gfselect
, VirtIOPCIProxy
),
126 VMSTATE_UINT32_ARRAY(guest_features
, VirtIOPCIProxy
, 2),
127 VMSTATE_STRUCT_ARRAY(vqs
, VirtIOPCIProxy
, VIRTIO_QUEUE_MAX
, 0,
128 vmstate_virtio_pci_modern_queue_state
,
130 VMSTATE_END_OF_LIST()
134 static const VMStateDescription vmstate_virtio_pci
= {
135 .name
= "virtio_pci",
137 .minimum_version_id
= 1,
138 .fields
= (const VMStateField
[]) {
139 VMSTATE_END_OF_LIST()
141 .subsections
= (const VMStateDescription
* const []) {
142 &vmstate_virtio_pci_modern_state_sub
,
147 static bool virtio_pci_has_extra_state(DeviceState
*d
)
149 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
151 return proxy
->flags
& VIRTIO_PCI_FLAG_MIGRATE_EXTRA
;
154 static void virtio_pci_save_extra_state(DeviceState
*d
, QEMUFile
*f
)
156 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
158 vmstate_save_state(f
, &vmstate_virtio_pci
, proxy
, NULL
);
161 static int virtio_pci_load_extra_state(DeviceState
*d
, QEMUFile
*f
)
163 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
165 return vmstate_load_state(f
, &vmstate_virtio_pci
, proxy
, 1);
168 static void virtio_pci_save_queue(DeviceState
*d
, int n
, QEMUFile
*f
)
170 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
171 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
173 if (msix_present(&proxy
->pci_dev
))
174 qemu_put_be16(f
, virtio_queue_vector(vdev
, n
));
177 static int virtio_pci_load_config(DeviceState
*d
, QEMUFile
*f
)
179 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
180 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
184 ret
= pci_device_load(&proxy
->pci_dev
, f
);
188 msix_unuse_all_vectors(&proxy
->pci_dev
);
189 msix_load(&proxy
->pci_dev
, f
);
190 if (msix_present(&proxy
->pci_dev
)) {
191 qemu_get_be16s(f
, &vector
);
193 if (vector
!= VIRTIO_NO_VECTOR
&& vector
>= proxy
->nvectors
) {
197 vector
= VIRTIO_NO_VECTOR
;
199 vdev
->config_vector
= vector
;
200 if (vector
!= VIRTIO_NO_VECTOR
) {
201 msix_vector_use(&proxy
->pci_dev
, vector
);
206 static int virtio_pci_load_queue(DeviceState
*d
, int n
, QEMUFile
*f
)
208 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
209 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
212 if (msix_present(&proxy
->pci_dev
)) {
213 qemu_get_be16s(f
, &vector
);
214 if (vector
!= VIRTIO_NO_VECTOR
&& vector
>= proxy
->nvectors
) {
218 vector
= VIRTIO_NO_VECTOR
;
220 virtio_queue_set_vector(vdev
, n
, vector
);
221 if (vector
!= VIRTIO_NO_VECTOR
) {
222 msix_vector_use(&proxy
->pci_dev
, vector
);
228 typedef struct VirtIOPCIIDInfo
{
231 /* pci device id for the transitional device */
232 uint16_t trans_devid
;
236 static const VirtIOPCIIDInfo virtio_pci_id_info
[] = {
238 .vdev_id
= VIRTIO_ID_CRYPTO
,
239 .class_id
= PCI_CLASS_OTHERS
,
241 .vdev_id
= VIRTIO_ID_FS
,
242 .class_id
= PCI_CLASS_STORAGE_OTHER
,
244 .vdev_id
= VIRTIO_ID_NET
,
245 .trans_devid
= PCI_DEVICE_ID_VIRTIO_NET
,
246 .class_id
= PCI_CLASS_NETWORK_ETHERNET
,
248 .vdev_id
= VIRTIO_ID_BLOCK
,
249 .trans_devid
= PCI_DEVICE_ID_VIRTIO_BLOCK
,
250 .class_id
= PCI_CLASS_STORAGE_SCSI
,
252 .vdev_id
= VIRTIO_ID_CONSOLE
,
253 .trans_devid
= PCI_DEVICE_ID_VIRTIO_CONSOLE
,
254 .class_id
= PCI_CLASS_COMMUNICATION_OTHER
,
256 .vdev_id
= VIRTIO_ID_SCSI
,
257 .trans_devid
= PCI_DEVICE_ID_VIRTIO_SCSI
,
258 .class_id
= PCI_CLASS_STORAGE_SCSI
260 .vdev_id
= VIRTIO_ID_9P
,
261 .trans_devid
= PCI_DEVICE_ID_VIRTIO_9P
,
262 .class_id
= PCI_BASE_CLASS_NETWORK
,
264 .vdev_id
= VIRTIO_ID_BALLOON
,
265 .trans_devid
= PCI_DEVICE_ID_VIRTIO_BALLOON
,
266 .class_id
= PCI_CLASS_OTHERS
,
268 .vdev_id
= VIRTIO_ID_RNG
,
269 .trans_devid
= PCI_DEVICE_ID_VIRTIO_RNG
,
270 .class_id
= PCI_CLASS_OTHERS
,
274 static const VirtIOPCIIDInfo
*virtio_pci_get_id_info(uint16_t vdev_id
)
276 const VirtIOPCIIDInfo
*info
= NULL
;
279 for (i
= 0; i
< ARRAY_SIZE(virtio_pci_id_info
); i
++) {
280 if (virtio_pci_id_info
[i
].vdev_id
== vdev_id
) {
281 info
= &virtio_pci_id_info
[i
];
287 /* The device id is invalid or not added to the id_info yet. */
288 error_report("Invalid virtio device(id %u)", vdev_id
);
296 * Get the Transitional Device ID for the specific device, return
297 * zero if the device is non-transitional.
299 uint16_t virtio_pci_get_trans_devid(uint16_t device_id
)
301 return virtio_pci_get_id_info(device_id
)->trans_devid
;
305 * Get the Class ID for the specific device.
307 uint16_t virtio_pci_get_class_id(uint16_t device_id
)
309 return virtio_pci_get_id_info(device_id
)->class_id
;
312 static bool virtio_pci_ioeventfd_enabled(DeviceState
*d
)
314 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
316 return (proxy
->flags
& VIRTIO_PCI_FLAG_USE_IOEVENTFD
) != 0;
319 #define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000
321 static inline int virtio_pci_queue_mem_mult(struct VirtIOPCIProxy
*proxy
)
323 return (proxy
->flags
& VIRTIO_PCI_FLAG_PAGE_PER_VQ
) ?
324 QEMU_VIRTIO_PCI_QUEUE_MEM_MULT
: 4;
327 static int virtio_pci_ioeventfd_assign(DeviceState
*d
, EventNotifier
*notifier
,
330 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
331 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
332 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
333 bool legacy
= virtio_pci_legacy(proxy
);
334 bool modern
= virtio_pci_modern(proxy
);
335 bool modern_pio
= proxy
->flags
& VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY
;
336 MemoryRegion
*modern_mr
= &proxy
->notify
.mr
;
337 MemoryRegion
*modern_notify_mr
= &proxy
->notify_pio
.mr
;
338 MemoryRegion
*legacy_mr
= &proxy
->bar
;
339 hwaddr modern_addr
= virtio_pci_queue_mem_mult(proxy
) *
340 virtio_get_queue_index(vq
);
341 hwaddr legacy_addr
= VIRTIO_PCI_QUEUE_NOTIFY
;
345 memory_region_add_eventfd(modern_mr
, modern_addr
, 0,
348 memory_region_add_eventfd(modern_notify_mr
, 0, 2,
353 memory_region_add_eventfd(legacy_mr
, legacy_addr
, 2,
358 memory_region_del_eventfd(modern_mr
, modern_addr
, 0,
361 memory_region_del_eventfd(modern_notify_mr
, 0, 2,
366 memory_region_del_eventfd(legacy_mr
, legacy_addr
, 2,
373 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy
*proxy
)
375 virtio_bus_start_ioeventfd(&proxy
->bus
);
378 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy
*proxy
)
380 virtio_bus_stop_ioeventfd(&proxy
->bus
);
383 static void virtio_ioport_write(void *opaque
, uint32_t addr
, uint32_t val
)
385 VirtIOPCIProxy
*proxy
= opaque
;
386 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
391 case VIRTIO_PCI_GUEST_FEATURES
:
392 /* Guest does not negotiate properly? We have to assume nothing. */
393 if (val
& (1 << VIRTIO_F_BAD_FEATURE
)) {
394 val
= virtio_bus_get_vdev_bad_features(&proxy
->bus
);
396 virtio_set_features(vdev
, val
);
398 case VIRTIO_PCI_QUEUE_PFN
:
399 pa
= (hwaddr
)val
<< VIRTIO_PCI_QUEUE_ADDR_SHIFT
;
401 virtio_pci_reset(DEVICE(proxy
));
404 virtio_queue_set_addr(vdev
, vdev
->queue_sel
, pa
);
406 case VIRTIO_PCI_QUEUE_SEL
:
407 if (val
< VIRTIO_QUEUE_MAX
)
408 vdev
->queue_sel
= val
;
410 case VIRTIO_PCI_QUEUE_NOTIFY
:
411 if (val
< VIRTIO_QUEUE_MAX
) {
412 virtio_queue_notify(vdev
, val
);
415 case VIRTIO_PCI_STATUS
:
416 if (!(val
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
417 virtio_pci_stop_ioeventfd(proxy
);
420 virtio_set_status(vdev
, val
& 0xFF);
422 if (val
& VIRTIO_CONFIG_S_DRIVER_OK
) {
423 virtio_pci_start_ioeventfd(proxy
);
426 if (vdev
->status
== 0) {
427 virtio_pci_reset(DEVICE(proxy
));
430 /* Linux before 2.6.34 drives the device without enabling
431 the PCI device bus master bit. Enable it automatically
432 for the guest. This is a PCI spec violation but so is
433 initiating DMA with bus master bit clear. */
434 if (val
== (VIRTIO_CONFIG_S_ACKNOWLEDGE
| VIRTIO_CONFIG_S_DRIVER
)) {
435 pci_default_write_config(&proxy
->pci_dev
, PCI_COMMAND
,
436 proxy
->pci_dev
.config
[PCI_COMMAND
] |
437 PCI_COMMAND_MASTER
, 1);
440 case VIRTIO_MSI_CONFIG_VECTOR
:
441 if (vdev
->config_vector
!= VIRTIO_NO_VECTOR
) {
442 msix_vector_unuse(&proxy
->pci_dev
, vdev
->config_vector
);
444 /* Make it possible for guest to discover an error took place. */
445 if (val
< proxy
->nvectors
) {
446 msix_vector_use(&proxy
->pci_dev
, val
);
448 val
= VIRTIO_NO_VECTOR
;
450 vdev
->config_vector
= val
;
452 case VIRTIO_MSI_QUEUE_VECTOR
:
453 vector
= virtio_queue_vector(vdev
, vdev
->queue_sel
);
454 if (vector
!= VIRTIO_NO_VECTOR
) {
455 msix_vector_unuse(&proxy
->pci_dev
, vector
);
457 /* Make it possible for guest to discover an error took place. */
458 if (val
< proxy
->nvectors
) {
459 msix_vector_use(&proxy
->pci_dev
, val
);
461 val
= VIRTIO_NO_VECTOR
;
463 virtio_queue_set_vector(vdev
, vdev
->queue_sel
, val
);
466 qemu_log_mask(LOG_GUEST_ERROR
,
467 "%s: unexpected address 0x%x value 0x%x\n",
468 __func__
, addr
, val
);
473 static uint32_t virtio_ioport_read(VirtIOPCIProxy
*proxy
, uint32_t addr
)
475 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
476 uint32_t ret
= 0xFFFFFFFF;
479 case VIRTIO_PCI_HOST_FEATURES
:
480 ret
= vdev
->host_features
;
482 case VIRTIO_PCI_GUEST_FEATURES
:
483 ret
= vdev
->guest_features
;
485 case VIRTIO_PCI_QUEUE_PFN
:
486 ret
= virtio_queue_get_addr(vdev
, vdev
->queue_sel
)
487 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT
;
489 case VIRTIO_PCI_QUEUE_NUM
:
490 ret
= virtio_queue_get_num(vdev
, vdev
->queue_sel
);
492 case VIRTIO_PCI_QUEUE_SEL
:
493 ret
= vdev
->queue_sel
;
495 case VIRTIO_PCI_STATUS
:
499 /* reading from the ISR also clears it. */
500 ret
= qatomic_xchg(&vdev
->isr
, 0);
501 pci_irq_deassert(&proxy
->pci_dev
);
503 case VIRTIO_MSI_CONFIG_VECTOR
:
504 ret
= vdev
->config_vector
;
506 case VIRTIO_MSI_QUEUE_VECTOR
:
507 ret
= virtio_queue_vector(vdev
, vdev
->queue_sel
);
516 static uint64_t virtio_pci_config_read(void *opaque
, hwaddr addr
,
519 VirtIOPCIProxy
*proxy
= opaque
;
520 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
521 uint32_t config
= VIRTIO_PCI_CONFIG_SIZE(&proxy
->pci_dev
);
529 return virtio_ioport_read(proxy
, addr
);
535 val
= virtio_config_readb(vdev
, addr
);
538 val
= virtio_config_readw(vdev
, addr
);
539 if (virtio_is_big_endian(vdev
)) {
544 val
= virtio_config_readl(vdev
, addr
);
545 if (virtio_is_big_endian(vdev
)) {
553 static void virtio_pci_config_write(void *opaque
, hwaddr addr
,
554 uint64_t val
, unsigned size
)
556 VirtIOPCIProxy
*proxy
= opaque
;
557 uint32_t config
= VIRTIO_PCI_CONFIG_SIZE(&proxy
->pci_dev
);
558 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
565 virtio_ioport_write(proxy
, addr
, val
);
570 * Virtio-PCI is odd. Ioports are LE but config space is target native
575 virtio_config_writeb(vdev
, addr
, val
);
578 if (virtio_is_big_endian(vdev
)) {
581 virtio_config_writew(vdev
, addr
, val
);
584 if (virtio_is_big_endian(vdev
)) {
587 virtio_config_writel(vdev
, addr
, val
);
592 static const MemoryRegionOps virtio_pci_config_ops
= {
593 .read
= virtio_pci_config_read
,
594 .write
= virtio_pci_config_write
,
596 .min_access_size
= 1,
597 .max_access_size
= 4,
599 .endianness
= DEVICE_LITTLE_ENDIAN
,
602 static MemoryRegion
*virtio_address_space_lookup(VirtIOPCIProxy
*proxy
,
603 hwaddr
*off
, int len
)
606 VirtIOPCIRegion
*reg
;
608 for (i
= 0; i
< ARRAY_SIZE(proxy
->regs
); ++i
) {
609 reg
= &proxy
->regs
[i
];
610 if (*off
>= reg
->offset
&&
611 *off
+ len
<= reg
->offset
+ reg
->size
) {
620 /* Below are generic functions to do memcpy from/to an address space,
621 * without byteswaps, with input validation.
623 * As regular address_space_* APIs all do some kind of byteswap at least for
624 * some host/target combinations, we are forced to explicitly convert to a
625 * known-endianness integer value.
626 * It doesn't really matter which endian format to go through, so the code
627 * below selects the endian that causes the least amount of work on the given
630 * Note: host pointer must be aligned.
633 void virtio_address_space_write(VirtIOPCIProxy
*proxy
, hwaddr addr
,
634 const uint8_t *buf
, int len
)
639 /* address_space_* APIs assume an aligned address.
640 * As address is under guest control, handle illegal values.
644 mr
= virtio_address_space_lookup(proxy
, &addr
, len
);
649 /* Make sure caller aligned buf properly */
650 assert(!(((uintptr_t)buf
) & (len
- 1)));
654 val
= pci_get_byte(buf
);
657 val
= pci_get_word(buf
);
660 val
= pci_get_long(buf
);
663 /* As length is under guest control, handle illegal values. */
666 memory_region_dispatch_write(mr
, addr
, val
, size_memop(len
) | MO_LE
,
667 MEMTXATTRS_UNSPECIFIED
);
671 virtio_address_space_read(VirtIOPCIProxy
*proxy
, hwaddr addr
,
672 uint8_t *buf
, int len
)
677 /* address_space_* APIs assume an aligned address.
678 * As address is under guest control, handle illegal values.
682 mr
= virtio_address_space_lookup(proxy
, &addr
, len
);
687 /* Make sure caller aligned buf properly */
688 assert(!(((uintptr_t)buf
) & (len
- 1)));
690 memory_region_dispatch_read(mr
, addr
, &val
, size_memop(len
) | MO_LE
,
691 MEMTXATTRS_UNSPECIFIED
);
694 pci_set_byte(buf
, val
);
697 pci_set_word(buf
, val
);
700 pci_set_long(buf
, val
);
703 /* As length is under guest control, handle illegal values. */
708 static void virtio_pci_ats_ctrl_trigger(PCIDevice
*pci_dev
, bool enable
)
710 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(pci_dev
);
711 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
712 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
714 vdev
->device_iotlb_enabled
= enable
;
716 if (k
->toggle_device_iotlb
) {
717 k
->toggle_device_iotlb(vdev
);
721 static void pcie_ats_config_write(PCIDevice
*dev
, uint32_t address
,
722 uint32_t val
, int len
)
725 uint16_t ats_cap
= dev
->exp
.ats_cap
;
727 if (!ats_cap
|| address
< ats_cap
) {
730 off
= address
- ats_cap
;
731 if (off
>= PCI_EXT_CAP_ATS_SIZEOF
) {
735 if (range_covers_byte(off
, len
, PCI_ATS_CTRL
+ 1)) {
736 virtio_pci_ats_ctrl_trigger(dev
, !!(val
& PCI_ATS_CTRL_ENABLE
));
740 static void virtio_write_config(PCIDevice
*pci_dev
, uint32_t address
,
741 uint32_t val
, int len
)
743 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(pci_dev
);
744 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
745 struct virtio_pci_cfg_cap
*cfg
;
747 pci_default_write_config(pci_dev
, address
, val
, len
);
749 if (proxy
->flags
& VIRTIO_PCI_FLAG_INIT_FLR
) {
750 pcie_cap_flr_write_config(pci_dev
, address
, val
, len
);
753 if (proxy
->flags
& VIRTIO_PCI_FLAG_ATS
) {
754 pcie_ats_config_write(pci_dev
, address
, val
, len
);
757 if (range_covers_byte(address
, len
, PCI_COMMAND
)) {
758 if (!(pci_dev
->config
[PCI_COMMAND
] & PCI_COMMAND_MASTER
)) {
759 virtio_set_disabled(vdev
, true);
760 virtio_pci_stop_ioeventfd(proxy
);
761 virtio_set_status(vdev
, vdev
->status
& ~VIRTIO_CONFIG_S_DRIVER_OK
);
763 virtio_set_disabled(vdev
, false);
767 if (proxy
->config_cap
&&
768 ranges_overlap(address
, len
, proxy
->config_cap
+ offsetof(struct virtio_pci_cfg_cap
,
770 sizeof cfg
->pci_cfg_data
)) {
774 cfg
= (void *)(proxy
->pci_dev
.config
+ proxy
->config_cap
);
775 off
= le32_to_cpu(cfg
->cap
.offset
);
776 caplen
= le32_to_cpu(cfg
->cap
.length
);
778 if (caplen
== 1 || caplen
== 2 || caplen
== 4) {
779 assert(caplen
<= sizeof cfg
->pci_cfg_data
);
780 virtio_address_space_write(proxy
, off
, cfg
->pci_cfg_data
, caplen
);
785 static uint32_t virtio_read_config(PCIDevice
*pci_dev
,
786 uint32_t address
, int len
)
788 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(pci_dev
);
789 struct virtio_pci_cfg_cap
*cfg
;
791 if (proxy
->config_cap
&&
792 ranges_overlap(address
, len
, proxy
->config_cap
+ offsetof(struct virtio_pci_cfg_cap
,
794 sizeof cfg
->pci_cfg_data
)) {
798 cfg
= (void *)(proxy
->pci_dev
.config
+ proxy
->config_cap
);
799 off
= le32_to_cpu(cfg
->cap
.offset
);
800 caplen
= le32_to_cpu(cfg
->cap
.length
);
802 if (caplen
== 1 || caplen
== 2 || caplen
== 4) {
803 assert(caplen
<= sizeof cfg
->pci_cfg_data
);
804 virtio_address_space_read(proxy
, off
, cfg
->pci_cfg_data
, caplen
);
808 return pci_default_read_config(pci_dev
, address
, len
);
811 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy
*proxy
,
814 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
817 if (irqfd
->users
== 0) {
818 KVMRouteChange c
= kvm_irqchip_begin_route_changes(kvm_state
);
819 ret
= kvm_irqchip_add_msi_route(&c
, vector
, &proxy
->pci_dev
);
823 kvm_irqchip_commit_route_changes(&c
);
830 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy
*proxy
,
833 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
834 if (--irqfd
->users
== 0) {
835 kvm_irqchip_release_virq(kvm_state
, irqfd
->virq
);
839 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy
*proxy
,
843 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
844 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state
, n
, NULL
, irqfd
->virq
);
847 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy
*proxy
,
851 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
854 ret
= kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state
, n
, irqfd
->virq
);
857 static int virtio_pci_get_notifier(VirtIOPCIProxy
*proxy
, int queue_no
,
858 EventNotifier
**n
, unsigned int *vector
)
860 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
863 if (queue_no
== VIRTIO_CONFIG_IRQ_IDX
) {
864 *n
= virtio_config_get_guest_notifier(vdev
);
865 *vector
= vdev
->config_vector
;
867 if (!virtio_queue_get_num(vdev
, queue_no
)) {
870 *vector
= virtio_queue_vector(vdev
, queue_no
);
871 vq
= virtio_get_queue(vdev
, queue_no
);
872 *n
= virtio_queue_get_guest_notifier(vq
);
877 static int kvm_virtio_pci_vector_use_one(VirtIOPCIProxy
*proxy
, int queue_no
)
882 PCIDevice
*dev
= &proxy
->pci_dev
;
883 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
884 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
886 ret
= virtio_pci_get_notifier(proxy
, queue_no
, &n
, &vector
);
890 if (vector
>= msix_nr_vectors_allocated(dev
)) {
893 ret
= kvm_virtio_pci_vq_vector_use(proxy
, vector
);
898 * If guest supports masking, set up irqfd now.
899 * Otherwise, delay until unmasked in the frontend.
901 if (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
) {
902 ret
= kvm_virtio_pci_irqfd_use(proxy
, n
, vector
);
904 kvm_virtio_pci_vq_vector_release(proxy
, vector
);
912 vector
= virtio_queue_vector(vdev
, queue_no
);
913 if (vector
>= msix_nr_vectors_allocated(dev
)) {
916 if (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
) {
917 ret
= virtio_pci_get_notifier(proxy
, queue_no
, &n
, &vector
);
921 kvm_virtio_pci_irqfd_release(proxy
, n
, vector
);
925 static int kvm_virtio_pci_vector_vq_use(VirtIOPCIProxy
*proxy
, int nvqs
)
929 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
931 for (queue_no
= 0; queue_no
< nvqs
; queue_no
++) {
932 if (!virtio_queue_get_num(vdev
, queue_no
)) {
935 ret
= kvm_virtio_pci_vector_use_one(proxy
, queue_no
);
940 static int kvm_virtio_pci_vector_config_use(VirtIOPCIProxy
*proxy
)
942 return kvm_virtio_pci_vector_use_one(proxy
, VIRTIO_CONFIG_IRQ_IDX
);
945 static void kvm_virtio_pci_vector_release_one(VirtIOPCIProxy
*proxy
,
948 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
952 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
953 PCIDevice
*dev
= &proxy
->pci_dev
;
955 ret
= virtio_pci_get_notifier(proxy
, queue_no
, &n
, &vector
);
959 if (vector
>= msix_nr_vectors_allocated(dev
)) {
962 if (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
) {
963 kvm_virtio_pci_irqfd_release(proxy
, n
, vector
);
965 kvm_virtio_pci_vq_vector_release(proxy
, vector
);
968 static void kvm_virtio_pci_vector_vq_release(VirtIOPCIProxy
*proxy
, int nvqs
)
971 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
973 for (queue_no
= 0; queue_no
< nvqs
; queue_no
++) {
974 if (!virtio_queue_get_num(vdev
, queue_no
)) {
977 kvm_virtio_pci_vector_release_one(proxy
, queue_no
);
981 static void kvm_virtio_pci_vector_config_release(VirtIOPCIProxy
*proxy
)
983 kvm_virtio_pci_vector_release_one(proxy
, VIRTIO_CONFIG_IRQ_IDX
);
986 static int virtio_pci_one_vector_unmask(VirtIOPCIProxy
*proxy
,
987 unsigned int queue_no
,
992 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
993 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
997 if (proxy
->vector_irqfd
) {
998 irqfd
= &proxy
->vector_irqfd
[vector
];
999 if (irqfd
->msg
.data
!= msg
.data
|| irqfd
->msg
.address
!= msg
.address
) {
1000 ret
= kvm_irqchip_update_msi_route(kvm_state
, irqfd
->virq
, msg
,
1005 kvm_irqchip_commit_routes(kvm_state
);
1009 /* If guest supports masking, irqfd is already setup, unmask it.
1010 * Otherwise, set it up now.
1012 if (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
) {
1013 k
->guest_notifier_mask(vdev
, queue_no
, false);
1014 /* Test after unmasking to avoid losing events. */
1015 if (k
->guest_notifier_pending
&&
1016 k
->guest_notifier_pending(vdev
, queue_no
)) {
1017 event_notifier_set(n
);
1020 ret
= kvm_virtio_pci_irqfd_use(proxy
, n
, vector
);
1025 static void virtio_pci_one_vector_mask(VirtIOPCIProxy
*proxy
,
1026 unsigned int queue_no
,
1027 unsigned int vector
,
1030 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1031 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1033 /* If guest supports masking, keep irqfd but mask it.
1034 * Otherwise, clean it up now.
1036 if (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
) {
1037 k
->guest_notifier_mask(vdev
, queue_no
, true);
1039 kvm_virtio_pci_irqfd_release(proxy
, n
, vector
);
1043 static int virtio_pci_vector_unmask(PCIDevice
*dev
, unsigned vector
,
1046 VirtIOPCIProxy
*proxy
= container_of(dev
, VirtIOPCIProxy
, pci_dev
);
1047 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1048 VirtQueue
*vq
= virtio_vector_first_queue(vdev
, vector
);
1050 int ret
, index
, unmasked
= 0;
1053 index
= virtio_get_queue_index(vq
);
1054 if (!virtio_queue_get_num(vdev
, index
)) {
1057 if (index
< proxy
->nvqs_with_notifiers
) {
1058 n
= virtio_queue_get_guest_notifier(vq
);
1059 ret
= virtio_pci_one_vector_unmask(proxy
, index
, vector
, msg
, n
);
1065 vq
= virtio_vector_next_queue(vq
);
1067 /* unmask config intr */
1068 if (vector
== vdev
->config_vector
) {
1069 n
= virtio_config_get_guest_notifier(vdev
);
1070 ret
= virtio_pci_one_vector_unmask(proxy
, VIRTIO_CONFIG_IRQ_IDX
, vector
,
1078 n
= virtio_config_get_guest_notifier(vdev
);
1079 virtio_pci_one_vector_mask(proxy
, VIRTIO_CONFIG_IRQ_IDX
, vector
, n
);
1081 vq
= virtio_vector_first_queue(vdev
, vector
);
1082 while (vq
&& unmasked
>= 0) {
1083 index
= virtio_get_queue_index(vq
);
1084 if (index
< proxy
->nvqs_with_notifiers
) {
1085 n
= virtio_queue_get_guest_notifier(vq
);
1086 virtio_pci_one_vector_mask(proxy
, index
, vector
, n
);
1089 vq
= virtio_vector_next_queue(vq
);
1094 static void virtio_pci_vector_mask(PCIDevice
*dev
, unsigned vector
)
1096 VirtIOPCIProxy
*proxy
= container_of(dev
, VirtIOPCIProxy
, pci_dev
);
1097 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1098 VirtQueue
*vq
= virtio_vector_first_queue(vdev
, vector
);
1103 index
= virtio_get_queue_index(vq
);
1104 n
= virtio_queue_get_guest_notifier(vq
);
1105 if (!virtio_queue_get_num(vdev
, index
)) {
1108 if (index
< proxy
->nvqs_with_notifiers
) {
1109 virtio_pci_one_vector_mask(proxy
, index
, vector
, n
);
1111 vq
= virtio_vector_next_queue(vq
);
1114 if (vector
== vdev
->config_vector
) {
1115 n
= virtio_config_get_guest_notifier(vdev
);
1116 virtio_pci_one_vector_mask(proxy
, VIRTIO_CONFIG_IRQ_IDX
, vector
, n
);
1120 static void virtio_pci_vector_poll(PCIDevice
*dev
,
1121 unsigned int vector_start
,
1122 unsigned int vector_end
)
1124 VirtIOPCIProxy
*proxy
= container_of(dev
, VirtIOPCIProxy
, pci_dev
);
1125 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1126 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1128 unsigned int vector
;
1129 EventNotifier
*notifier
;
1132 for (queue_no
= 0; queue_no
< proxy
->nvqs_with_notifiers
; queue_no
++) {
1133 ret
= virtio_pci_get_notifier(proxy
, queue_no
, ¬ifier
, &vector
);
1137 if (vector
< vector_start
|| vector
>= vector_end
||
1138 !msix_is_masked(dev
, vector
)) {
1141 if (k
->guest_notifier_pending
) {
1142 if (k
->guest_notifier_pending(vdev
, queue_no
)) {
1143 msix_set_pending(dev
, vector
);
1145 } else if (event_notifier_test_and_clear(notifier
)) {
1146 msix_set_pending(dev
, vector
);
1149 /* poll the config intr */
1150 ret
= virtio_pci_get_notifier(proxy
, VIRTIO_CONFIG_IRQ_IDX
, ¬ifier
,
1155 if (vector
< vector_start
|| vector
>= vector_end
||
1156 !msix_is_masked(dev
, vector
)) {
1159 if (k
->guest_notifier_pending
) {
1160 if (k
->guest_notifier_pending(vdev
, VIRTIO_CONFIG_IRQ_IDX
)) {
1161 msix_set_pending(dev
, vector
);
1163 } else if (event_notifier_test_and_clear(notifier
)) {
1164 msix_set_pending(dev
, vector
);
1168 void virtio_pci_set_guest_notifier_fd_handler(VirtIODevice
*vdev
, VirtQueue
*vq
,
1172 if (n
== VIRTIO_CONFIG_IRQ_IDX
) {
1173 virtio_config_set_guest_notifier_fd_handler(vdev
, assign
, with_irqfd
);
1175 virtio_queue_set_guest_notifier_fd_handler(vq
, assign
, with_irqfd
);
1179 static int virtio_pci_set_guest_notifier(DeviceState
*d
, int n
, bool assign
,
1182 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
1183 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1184 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1185 VirtQueue
*vq
= NULL
;
1186 EventNotifier
*notifier
= NULL
;
1188 if (n
== VIRTIO_CONFIG_IRQ_IDX
) {
1189 notifier
= virtio_config_get_guest_notifier(vdev
);
1191 vq
= virtio_get_queue(vdev
, n
);
1192 notifier
= virtio_queue_get_guest_notifier(vq
);
1196 int r
= event_notifier_init(notifier
, 0);
1200 virtio_pci_set_guest_notifier_fd_handler(vdev
, vq
, n
, true, with_irqfd
);
1202 virtio_pci_set_guest_notifier_fd_handler(vdev
, vq
, n
, false,
1204 event_notifier_cleanup(notifier
);
1207 if (!msix_enabled(&proxy
->pci_dev
) &&
1208 vdev
->use_guest_notifier_mask
&&
1209 vdc
->guest_notifier_mask
) {
1210 vdc
->guest_notifier_mask(vdev
, n
, !assign
);
1216 static bool virtio_pci_query_guest_notifiers(DeviceState
*d
)
1218 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
1219 return msix_enabled(&proxy
->pci_dev
);
1222 static int virtio_pci_set_guest_notifiers(DeviceState
*d
, int nvqs
, bool assign
)
1224 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
1225 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1226 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1228 bool with_irqfd
= msix_enabled(&proxy
->pci_dev
) &&
1229 kvm_msi_via_irqfd_enabled();
1231 nvqs
= MIN(nvqs
, VIRTIO_QUEUE_MAX
);
1234 * When deassigning, pass a consistent nvqs value to avoid leaking
1235 * notifiers. But first check we've actually been configured, exit
1236 * early if we haven't.
1238 if (!assign
&& !proxy
->nvqs_with_notifiers
) {
1241 assert(assign
|| nvqs
== proxy
->nvqs_with_notifiers
);
1243 proxy
->nvqs_with_notifiers
= nvqs
;
1245 /* Must unset vector notifier while guest notifier is still assigned */
1246 if ((proxy
->vector_irqfd
||
1247 (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
)) &&
1249 msix_unset_vector_notifiers(&proxy
->pci_dev
);
1250 if (proxy
->vector_irqfd
) {
1251 kvm_virtio_pci_vector_vq_release(proxy
, nvqs
);
1252 kvm_virtio_pci_vector_config_release(proxy
);
1253 g_free(proxy
->vector_irqfd
);
1254 proxy
->vector_irqfd
= NULL
;
1258 for (n
= 0; n
< nvqs
; n
++) {
1259 if (!virtio_queue_get_num(vdev
, n
)) {
1263 r
= virtio_pci_set_guest_notifier(d
, n
, assign
, with_irqfd
);
1268 r
= virtio_pci_set_guest_notifier(d
, VIRTIO_CONFIG_IRQ_IDX
, assign
,
1271 goto config_assign_error
;
1273 /* Must set vector notifier after guest notifier has been assigned */
1275 (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
)) &&
1278 proxy
->vector_irqfd
=
1279 g_malloc0(sizeof(*proxy
->vector_irqfd
) *
1280 msix_nr_vectors_allocated(&proxy
->pci_dev
));
1281 r
= kvm_virtio_pci_vector_vq_use(proxy
, nvqs
);
1283 goto config_assign_error
;
1285 r
= kvm_virtio_pci_vector_config_use(proxy
);
1291 r
= msix_set_vector_notifiers(&proxy
->pci_dev
, virtio_pci_vector_unmask
,
1292 virtio_pci_vector_mask
,
1293 virtio_pci_vector_poll
);
1295 goto notifiers_error
;
1304 kvm_virtio_pci_vector_vq_release(proxy
, nvqs
);
1308 kvm_virtio_pci_vector_config_release(proxy
);
1310 config_assign_error
:
1311 virtio_pci_set_guest_notifier(d
, VIRTIO_CONFIG_IRQ_IDX
, !assign
,
1314 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
1317 virtio_pci_set_guest_notifier(d
, n
, !assign
, with_irqfd
);
1319 g_free(proxy
->vector_irqfd
);
1320 proxy
->vector_irqfd
= NULL
;
1324 static int virtio_pci_set_host_notifier_mr(DeviceState
*d
, int n
,
1325 MemoryRegion
*mr
, bool assign
)
1327 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
1330 if (n
>= VIRTIO_QUEUE_MAX
|| !virtio_pci_modern(proxy
) ||
1331 virtio_pci_queue_mem_mult(proxy
) != memory_region_size(mr
)) {
1336 offset
= virtio_pci_queue_mem_mult(proxy
) * n
;
1337 memory_region_add_subregion_overlap(&proxy
->notify
.mr
, offset
, mr
, 1);
1339 memory_region_del_subregion(&proxy
->notify
.mr
, mr
);
1345 static void virtio_pci_vmstate_change(DeviceState
*d
, bool running
)
1347 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
1348 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1351 /* Old QEMU versions did not set bus master enable on status write.
1352 * Detect DRIVER set and enable it.
1354 if ((proxy
->flags
& VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION
) &&
1355 (vdev
->status
& VIRTIO_CONFIG_S_DRIVER
) &&
1356 !(proxy
->pci_dev
.config
[PCI_COMMAND
] & PCI_COMMAND_MASTER
)) {
1357 pci_default_write_config(&proxy
->pci_dev
, PCI_COMMAND
,
1358 proxy
->pci_dev
.config
[PCI_COMMAND
] |
1359 PCI_COMMAND_MASTER
, 1);
1361 virtio_pci_start_ioeventfd(proxy
);
1363 virtio_pci_stop_ioeventfd(proxy
);
1368 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
1371 static int virtio_pci_query_nvectors(DeviceState
*d
)
1373 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1375 return proxy
->nvectors
;
1378 static AddressSpace
*virtio_pci_get_dma_as(DeviceState
*d
)
1380 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1381 PCIDevice
*dev
= &proxy
->pci_dev
;
1383 return pci_get_address_space(dev
);
1386 static bool virtio_pci_iommu_enabled(DeviceState
*d
)
1388 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1389 PCIDevice
*dev
= &proxy
->pci_dev
;
1390 AddressSpace
*dma_as
= pci_device_iommu_address_space(dev
);
1392 if (dma_as
== &address_space_memory
) {
1399 static bool virtio_pci_queue_enabled(DeviceState
*d
, int n
)
1401 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1402 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1404 if (virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
)) {
1405 return proxy
->vqs
[n
].enabled
;
1408 return virtio_queue_enabled_legacy(vdev
, n
);
1411 static int virtio_pci_add_mem_cap(VirtIOPCIProxy
*proxy
,
1412 struct virtio_pci_cap
*cap
)
1414 PCIDevice
*dev
= &proxy
->pci_dev
;
1417 offset
= pci_add_capability(dev
, PCI_CAP_ID_VNDR
, 0,
1418 cap
->cap_len
, &error_abort
);
1420 assert(cap
->cap_len
>= sizeof *cap
);
1421 memcpy(dev
->config
+ offset
+ PCI_CAP_FLAGS
, &cap
->cap_len
,
1422 cap
->cap_len
- PCI_CAP_FLAGS
);
1427 int virtio_pci_add_shm_cap(VirtIOPCIProxy
*proxy
,
1428 uint8_t bar
, uint64_t offset
, uint64_t length
,
1431 struct virtio_pci_cap64 cap
= {
1432 .cap
.cap_len
= sizeof cap
,
1433 .cap
.cfg_type
= VIRTIO_PCI_CAP_SHARED_MEMORY_CFG
,
1437 cap
.cap
.length
= cpu_to_le32(length
);
1438 cap
.length_hi
= cpu_to_le32(length
>> 32);
1439 cap
.cap
.offset
= cpu_to_le32(offset
);
1440 cap
.offset_hi
= cpu_to_le32(offset
>> 32);
1442 return virtio_pci_add_mem_cap(proxy
, &cap
.cap
);
1445 /* Called within call_rcu(). */
1446 static void bitmap_free_region_cache(BitmapMemoryRegionCaches
*caches
)
1448 assert(caches
!= NULL
);
1449 address_space_cache_destroy(&caches
->bitmap
);
1453 static void lm_disable(VirtIODevice
*vdev
)
1455 BitmapMemoryRegionCaches
*caches
;
1456 caches
= qatomic_read(&vdev
->caches
);
1457 qatomic_rcu_set(&vdev
->caches
, NULL
);
1459 call_rcu(caches
, bitmap_free_region_cache
, rcu
);
1463 static void lm_enable(VirtIODevice
*vdev
)
1465 BitmapMemoryRegionCaches
*old
= vdev
->caches
;
1466 BitmapMemoryRegionCaches
*new = NULL
;
1467 hwaddr addr
, end
, size
;
1470 addr
= vdev
->lm_base_addr_low
| ((hwaddr
)(vdev
->lm_base_addr_high
) << 32);
1471 end
= vdev
->lm_end_addr_low
| ((hwaddr
)(vdev
->lm_end_addr_high
) << 32);
1474 error_report("Invalid lm size.");
1478 new = g_new0(BitmapMemoryRegionCaches
, 1);
1479 len
= address_space_cache_init(&new->bitmap
, vdev
->dma_as
, addr
, size
,
1482 virtio_error(vdev
, "Cannot map bitmap");
1485 qatomic_rcu_set(&vdev
->caches
, new);
1488 call_rcu(old
, bitmap_free_region_cache
, rcu
);
1494 address_space_cache_destroy(&new->bitmap
);
1498 static uint64_t virtio_pci_lm_read(void *opaque
, hwaddr addr
,
1501 VirtIOPCIProxy
*proxy
= opaque
;
1502 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1503 hwaddr offset_end
= LM_VRING_STATE_OFFSET
+
1504 virtio_pci_queue_mem_mult(proxy
) * VIRTIO_QUEUE_MAX
;
1512 case LM_LOGGING_CTRL
:
1513 val
= vdev
->lm_logging_ctrl
;
1515 case LM_BASE_ADDR_LOW
:
1516 val
= vdev
->lm_base_addr_low
;
1518 case LM_BASE_ADDR_HIGH
:
1519 val
= vdev
->lm_base_addr_high
;
1521 case LM_END_ADDR_LOW
:
1522 val
= vdev
->lm_end_addr_low
;
1524 case LM_END_ADDR_HIGH
:
1525 val
= vdev
->lm_end_addr_high
;
1528 if (addr
>= LM_VRING_STATE_OFFSET
&& addr
<= offset_end
) {
1529 qid
= (addr
- LM_VRING_STATE_OFFSET
) /
1530 virtio_pci_queue_mem_mult(proxy
);
1531 val
= virtio_queue_get_vring_states(vdev
, qid
);
1541 static void virtio_pci_lm_write(void *opaque
, hwaddr addr
,
1542 uint64_t val
, unsigned size
)
1544 VirtIOPCIProxy
*proxy
= opaque
;
1545 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1546 hwaddr offset_end
= LM_VRING_STATE_OFFSET
+
1547 virtio_pci_queue_mem_mult(proxy
) * VIRTIO_QUEUE_MAX
;
1555 case LM_LOGGING_CTRL
:
1556 vdev
->lm_logging_ctrl
= val
;
1565 virtio_error(vdev
, "Unsupport LM_LOGGING_CTRL value: %"PRIx64
,
1571 case LM_BASE_ADDR_LOW
:
1572 vdev
->lm_base_addr_low
= val
;
1574 case LM_BASE_ADDR_HIGH
:
1575 vdev
->lm_base_addr_high
= val
;
1577 case LM_END_ADDR_LOW
:
1578 vdev
->lm_end_addr_low
= val
;
1580 case LM_END_ADDR_HIGH
:
1581 vdev
->lm_end_addr_high
= val
;
1584 if (addr
>= LM_VRING_STATE_OFFSET
&& addr
<= offset_end
) {
1585 qid
= (addr
- LM_VRING_STATE_OFFSET
) /
1586 virtio_pci_queue_mem_mult(proxy
);
1587 virtio_queue_set_vring_states(vdev
, qid
, val
);
1589 virtio_error(vdev
, "Unsupport addr: %"PRIx64
, addr
);
1594 static uint64_t virtio_pci_common_read(void *opaque
, hwaddr addr
,
1597 VirtIOPCIProxy
*proxy
= opaque
;
1598 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1607 case VIRTIO_PCI_COMMON_DFSELECT
:
1608 val
= proxy
->dfselect
;
1610 case VIRTIO_PCI_COMMON_DF
:
1611 if (proxy
->dfselect
<= 1) {
1612 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1614 val
= (vdev
->host_features
& ~vdc
->legacy_features
) >>
1615 (32 * proxy
->dfselect
);
1618 case VIRTIO_PCI_COMMON_GFSELECT
:
1619 val
= proxy
->gfselect
;
1621 case VIRTIO_PCI_COMMON_GF
:
1622 if (proxy
->gfselect
< ARRAY_SIZE(proxy
->guest_features
)) {
1623 val
= proxy
->guest_features
[proxy
->gfselect
];
1626 case VIRTIO_PCI_COMMON_MSIX
:
1627 val
= vdev
->config_vector
;
1629 case VIRTIO_PCI_COMMON_NUMQ
:
1630 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; ++i
) {
1631 if (virtio_queue_get_num(vdev
, i
)) {
1636 case VIRTIO_PCI_COMMON_STATUS
:
1639 case VIRTIO_PCI_COMMON_CFGGENERATION
:
1640 val
= vdev
->generation
;
1642 case VIRTIO_PCI_COMMON_Q_SELECT
:
1643 val
= vdev
->queue_sel
;
1645 case VIRTIO_PCI_COMMON_Q_SIZE
:
1646 val
= virtio_queue_get_num(vdev
, vdev
->queue_sel
);
1648 case VIRTIO_PCI_COMMON_Q_MSIX
:
1649 val
= virtio_queue_vector(vdev
, vdev
->queue_sel
);
1651 case VIRTIO_PCI_COMMON_Q_ENABLE
:
1652 val
= proxy
->vqs
[vdev
->queue_sel
].enabled
;
1654 case VIRTIO_PCI_COMMON_Q_NOFF
:
1655 /* Simply map queues in order */
1656 val
= vdev
->queue_sel
;
1658 case VIRTIO_PCI_COMMON_Q_DESCLO
:
1659 val
= proxy
->vqs
[vdev
->queue_sel
].desc
[0];
1661 case VIRTIO_PCI_COMMON_Q_DESCHI
:
1662 val
= proxy
->vqs
[vdev
->queue_sel
].desc
[1];
1664 case VIRTIO_PCI_COMMON_Q_AVAILLO
:
1665 val
= proxy
->vqs
[vdev
->queue_sel
].avail
[0];
1667 case VIRTIO_PCI_COMMON_Q_AVAILHI
:
1668 val
= proxy
->vqs
[vdev
->queue_sel
].avail
[1];
1670 case VIRTIO_PCI_COMMON_Q_USEDLO
:
1671 val
= proxy
->vqs
[vdev
->queue_sel
].used
[0];
1673 case VIRTIO_PCI_COMMON_Q_USEDHI
:
1674 val
= proxy
->vqs
[vdev
->queue_sel
].used
[1];
1676 case VIRTIO_PCI_COMMON_Q_RESET
:
1677 val
= proxy
->vqs
[vdev
->queue_sel
].reset
;
1686 static void virtio_pci_common_write(void *opaque
, hwaddr addr
,
1687 uint64_t val
, unsigned size
)
1689 VirtIOPCIProxy
*proxy
= opaque
;
1690 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1698 case VIRTIO_PCI_COMMON_DFSELECT
:
1699 proxy
->dfselect
= val
;
1701 case VIRTIO_PCI_COMMON_GFSELECT
:
1702 proxy
->gfselect
= val
;
1704 case VIRTIO_PCI_COMMON_GF
:
1705 if (proxy
->gfselect
< ARRAY_SIZE(proxy
->guest_features
)) {
1706 proxy
->guest_features
[proxy
->gfselect
] = val
;
1707 virtio_set_features(vdev
,
1708 (((uint64_t)proxy
->guest_features
[1]) << 32) |
1709 proxy
->guest_features
[0]);
1712 case VIRTIO_PCI_COMMON_MSIX
:
1713 if (vdev
->config_vector
!= VIRTIO_NO_VECTOR
) {
1714 msix_vector_unuse(&proxy
->pci_dev
, vdev
->config_vector
);
1716 /* Make it possible for guest to discover an error took place. */
1717 if (val
< proxy
->nvectors
) {
1718 msix_vector_use(&proxy
->pci_dev
, val
);
1720 val
= VIRTIO_NO_VECTOR
;
1722 vdev
->config_vector
= val
;
1724 case VIRTIO_PCI_COMMON_STATUS
:
1725 if (!(val
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1726 virtio_pci_stop_ioeventfd(proxy
);
1729 virtio_set_status(vdev
, val
& 0xFF);
1731 if (val
& VIRTIO_CONFIG_S_DRIVER_OK
) {
1732 virtio_pci_start_ioeventfd(proxy
);
1735 if (vdev
->status
== 0) {
1736 virtio_pci_reset(DEVICE(proxy
));
1740 case VIRTIO_PCI_COMMON_Q_SELECT
:
1741 if (val
< VIRTIO_QUEUE_MAX
) {
1742 vdev
->queue_sel
= val
;
1745 case VIRTIO_PCI_COMMON_Q_SIZE
:
1746 proxy
->vqs
[vdev
->queue_sel
].num
= val
;
1747 virtio_queue_set_num(vdev
, vdev
->queue_sel
,
1748 proxy
->vqs
[vdev
->queue_sel
].num
);
1749 virtio_init_region_cache(vdev
, vdev
->queue_sel
);
1751 case VIRTIO_PCI_COMMON_Q_MSIX
:
1752 vector
= virtio_queue_vector(vdev
, vdev
->queue_sel
);
1753 if (vector
!= VIRTIO_NO_VECTOR
) {
1754 msix_vector_unuse(&proxy
->pci_dev
, vector
);
1756 /* Make it possible for guest to discover an error took place. */
1757 if (val
< proxy
->nvectors
) {
1758 msix_vector_use(&proxy
->pci_dev
, val
);
1760 val
= VIRTIO_NO_VECTOR
;
1762 virtio_queue_set_vector(vdev
, vdev
->queue_sel
, val
);
1764 case VIRTIO_PCI_COMMON_Q_ENABLE
:
1766 virtio_queue_set_num(vdev
, vdev
->queue_sel
,
1767 proxy
->vqs
[vdev
->queue_sel
].num
);
1768 virtio_queue_set_rings(vdev
, vdev
->queue_sel
,
1769 ((uint64_t)proxy
->vqs
[vdev
->queue_sel
].desc
[1]) << 32 |
1770 proxy
->vqs
[vdev
->queue_sel
].desc
[0],
1771 ((uint64_t)proxy
->vqs
[vdev
->queue_sel
].avail
[1]) << 32 |
1772 proxy
->vqs
[vdev
->queue_sel
].avail
[0],
1773 ((uint64_t)proxy
->vqs
[vdev
->queue_sel
].used
[1]) << 32 |
1774 proxy
->vqs
[vdev
->queue_sel
].used
[0]);
1775 proxy
->vqs
[vdev
->queue_sel
].enabled
= 1;
1776 proxy
->vqs
[vdev
->queue_sel
].reset
= 0;
1777 virtio_queue_enable(vdev
, vdev
->queue_sel
);
1779 virtio_error(vdev
, "wrong value for queue_enable %"PRIx64
, val
);
1782 case VIRTIO_PCI_COMMON_Q_DESCLO
:
1783 proxy
->vqs
[vdev
->queue_sel
].desc
[0] = val
;
1785 case VIRTIO_PCI_COMMON_Q_DESCHI
:
1786 proxy
->vqs
[vdev
->queue_sel
].desc
[1] = val
;
1788 case VIRTIO_PCI_COMMON_Q_AVAILLO
:
1789 proxy
->vqs
[vdev
->queue_sel
].avail
[0] = val
;
1791 case VIRTIO_PCI_COMMON_Q_AVAILHI
:
1792 proxy
->vqs
[vdev
->queue_sel
].avail
[1] = val
;
1794 case VIRTIO_PCI_COMMON_Q_USEDLO
:
1795 proxy
->vqs
[vdev
->queue_sel
].used
[0] = val
;
1797 case VIRTIO_PCI_COMMON_Q_USEDHI
:
1798 proxy
->vqs
[vdev
->queue_sel
].used
[1] = val
;
1800 case VIRTIO_PCI_COMMON_Q_RESET
:
1802 proxy
->vqs
[vdev
->queue_sel
].reset
= 1;
1804 virtio_queue_reset(vdev
, vdev
->queue_sel
);
1806 proxy
->vqs
[vdev
->queue_sel
].reset
= 0;
1807 proxy
->vqs
[vdev
->queue_sel
].enabled
= 0;
1816 static uint64_t virtio_pci_notify_read(void *opaque
, hwaddr addr
,
1819 VirtIOPCIProxy
*proxy
= opaque
;
1820 if (virtio_bus_get_device(&proxy
->bus
) == NULL
) {
1827 static void virtio_pci_notify_write(void *opaque
, hwaddr addr
,
1828 uint64_t val
, unsigned size
)
1830 VirtIOPCIProxy
*proxy
= opaque
;
1831 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1833 unsigned queue
= addr
/ virtio_pci_queue_mem_mult(proxy
);
1835 if (vdev
!= NULL
&& queue
< VIRTIO_QUEUE_MAX
) {
1836 trace_virtio_pci_notify_write(addr
, val
, size
);
1837 virtio_queue_notify(vdev
, queue
);
1841 static void virtio_pci_notify_write_pio(void *opaque
, hwaddr addr
,
1842 uint64_t val
, unsigned size
)
1844 VirtIOPCIProxy
*proxy
= opaque
;
1845 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1847 unsigned queue
= val
;
1849 if (vdev
!= NULL
&& queue
< VIRTIO_QUEUE_MAX
) {
1850 trace_virtio_pci_notify_write_pio(addr
, val
, size
);
1851 virtio_queue_notify(vdev
, queue
);
1855 static uint64_t virtio_pci_isr_read(void *opaque
, hwaddr addr
,
1858 VirtIOPCIProxy
*proxy
= opaque
;
1859 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1866 val
= qatomic_xchg(&vdev
->isr
, 0);
1867 pci_irq_deassert(&proxy
->pci_dev
);
1871 static void virtio_pci_isr_write(void *opaque
, hwaddr addr
,
1872 uint64_t val
, unsigned size
)
1876 static uint64_t virtio_pci_device_read(void *opaque
, hwaddr addr
,
1879 VirtIOPCIProxy
*proxy
= opaque
;
1880 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1889 val
= virtio_config_modern_readb(vdev
, addr
);
1892 val
= virtio_config_modern_readw(vdev
, addr
);
1895 val
= virtio_config_modern_readl(vdev
, addr
);
1904 static void virtio_pci_device_write(void *opaque
, hwaddr addr
,
1905 uint64_t val
, unsigned size
)
1907 VirtIOPCIProxy
*proxy
= opaque
;
1908 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1916 virtio_config_modern_writeb(vdev
, addr
, val
);
1919 virtio_config_modern_writew(vdev
, addr
, val
);
1922 virtio_config_modern_writel(vdev
, addr
, val
);
1927 static void virtio_pci_modern_regions_init(VirtIOPCIProxy
*proxy
,
1928 const char *vdev_name
)
1930 static const MemoryRegionOps common_ops
= {
1931 .read
= virtio_pci_common_read
,
1932 .write
= virtio_pci_common_write
,
1934 .min_access_size
= 1,
1935 .max_access_size
= 4,
1937 .endianness
= DEVICE_LITTLE_ENDIAN
,
1939 static const MemoryRegionOps isr_ops
= {
1940 .read
= virtio_pci_isr_read
,
1941 .write
= virtio_pci_isr_write
,
1943 .min_access_size
= 1,
1944 .max_access_size
= 4,
1946 .endianness
= DEVICE_LITTLE_ENDIAN
,
1948 static const MemoryRegionOps device_ops
= {
1949 .read
= virtio_pci_device_read
,
1950 .write
= virtio_pci_device_write
,
1952 .min_access_size
= 1,
1953 .max_access_size
= 4,
1955 .endianness
= DEVICE_LITTLE_ENDIAN
,
1957 static const MemoryRegionOps notify_ops
= {
1958 .read
= virtio_pci_notify_read
,
1959 .write
= virtio_pci_notify_write
,
1961 .min_access_size
= 1,
1962 .max_access_size
= 4,
1964 .endianness
= DEVICE_LITTLE_ENDIAN
,
1966 static const MemoryRegionOps notify_pio_ops
= {
1967 .read
= virtio_pci_notify_read
,
1968 .write
= virtio_pci_notify_write_pio
,
1970 .min_access_size
= 1,
1971 .max_access_size
= 4,
1973 .endianness
= DEVICE_LITTLE_ENDIAN
,
1975 static const MemoryRegionOps lm_ops
= {
1976 .read
= virtio_pci_lm_read
,
1977 .write
= virtio_pci_lm_write
,
1979 .min_access_size
= 1,
1980 .max_access_size
= 4,
1982 .endianness
= DEVICE_LITTLE_ENDIAN
,
1984 g_autoptr(GString
) name
= g_string_new(NULL
);
1986 g_string_printf(name
, "virtio-pci-common-%s", vdev_name
);
1987 memory_region_init_io(&proxy
->common
.mr
, OBJECT(proxy
),
1991 proxy
->common
.size
);
1993 g_string_printf(name
, "virtio-pci-isr-%s", vdev_name
);
1994 memory_region_init_io(&proxy
->isr
.mr
, OBJECT(proxy
),
2000 g_string_printf(name
, "virtio-pci-device-%s", vdev_name
);
2001 memory_region_init_io(&proxy
->device
.mr
, OBJECT(proxy
),
2005 proxy
->device
.size
);
2007 g_string_printf(name
, "virtio-pci-notify-%s", vdev_name
);
2008 memory_region_init_io(&proxy
->notify
.mr
, OBJECT(proxy
),
2012 proxy
->notify
.size
);
2014 g_string_printf(name
, "virtio-pci-notify-pio-%s", vdev_name
);
2015 memory_region_init_io(&proxy
->notify_pio
.mr
, OBJECT(proxy
),
2019 proxy
->notify_pio
.size
);
2020 if (proxy
->flags
& VIRTIO_PCI_FLAG_VDPA
) {
2021 g_string_printf(name
, "virtio-pci-lm-%s", vdev_name
);
2022 memory_region_init_io(&proxy
->lm
.mr
, OBJECT(proxy
),
2030 static void virtio_pci_modern_region_map(VirtIOPCIProxy
*proxy
,
2031 VirtIOPCIRegion
*region
,
2032 struct virtio_pci_cap
*cap
,
2036 memory_region_add_subregion(mr
, region
->offset
, ®ion
->mr
);
2038 cap
->cfg_type
= region
->type
;
2040 cap
->offset
= cpu_to_le32(region
->offset
);
2041 cap
->length
= cpu_to_le32(region
->size
);
2042 virtio_pci_add_mem_cap(proxy
, cap
);
2046 static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy
*proxy
,
2047 VirtIOPCIRegion
*region
,
2048 struct virtio_pci_cap
*cap
)
2050 virtio_pci_modern_region_map(proxy
, region
, cap
,
2051 &proxy
->modern_bar
, proxy
->modern_mem_bar_idx
);
2054 static void virtio_pci_modern_io_region_map(VirtIOPCIProxy
*proxy
,
2055 VirtIOPCIRegion
*region
,
2056 struct virtio_pci_cap
*cap
)
2058 virtio_pci_modern_region_map(proxy
, region
, cap
,
2059 &proxy
->io_bar
, proxy
->modern_io_bar_idx
);
2062 static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy
*proxy
,
2063 VirtIOPCIRegion
*region
)
2065 memory_region_del_subregion(&proxy
->modern_bar
,
2069 static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy
*proxy
,
2070 VirtIOPCIRegion
*region
)
2072 memory_region_del_subregion(&proxy
->io_bar
,
2076 static void virtio_pci_pre_plugged(DeviceState
*d
, Error
**errp
)
2078 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
2079 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
2081 if (virtio_pci_modern(proxy
)) {
2082 virtio_add_feature(&vdev
->host_features
, VIRTIO_F_VERSION_1
);
2085 virtio_add_feature(&vdev
->host_features
, VIRTIO_F_BAD_FEATURE
);
2088 /* This is called by virtio-bus just after the device is plugged. */
2089 static void virtio_pci_device_plugged(DeviceState
*d
, Error
**errp
)
2091 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
2092 VirtioBusState
*bus
= &proxy
->bus
;
2093 bool legacy
= virtio_pci_legacy(proxy
);
2095 bool modern_pio
= proxy
->flags
& VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY
;
2098 VirtIODevice
*vdev
= virtio_bus_get_device(bus
);
2101 * Virtio capabilities present without
2102 * VIRTIO_F_VERSION_1 confuses guests
2104 if (!proxy
->ignore_backend_features
&&
2105 !virtio_has_feature(vdev
->host_features
, VIRTIO_F_VERSION_1
)) {
2106 virtio_pci_disable_modern(proxy
);
2109 error_setg(errp
, "Device doesn't support modern mode, and legacy"
2110 " mode is disabled");
2111 error_append_hint(errp
, "Set disable-legacy to off\n");
2117 modern
= virtio_pci_modern(proxy
);
2119 config
= proxy
->pci_dev
.config
;
2120 if (proxy
->class_code
) {
2121 pci_config_set_class(config
, proxy
->class_code
);
2125 if (!virtio_legacy_allowed(vdev
)) {
2127 * To avoid migration issues, we allow legacy mode when legacy
2128 * check is disabled in the old machine types (< 5.1).
2130 if (virtio_legacy_check_disabled(vdev
)) {
2131 warn_report("device is modern-only, but for backward "
2132 "compatibility legacy is allowed");
2135 "device is modern-only, use disable-legacy=on");
2139 if (virtio_host_has_feature(vdev
, VIRTIO_F_IOMMU_PLATFORM
)) {
2140 error_setg(errp
, "VIRTIO_F_IOMMU_PLATFORM was supported by"
2141 " neither legacy nor transitional device");
2145 * Legacy and transitional devices use specific subsystem IDs.
2146 * Note that the subsystem vendor ID (config + PCI_SUBSYSTEM_VENDOR_ID)
2147 * is set to PCI_SUBVENDOR_ID_REDHAT_QUMRANET by default.
2149 pci_set_word(config
+ PCI_SUBSYSTEM_ID
, virtio_bus_get_vdev_id(bus
));
2150 if (proxy
->trans_devid
) {
2151 pci_config_set_device_id(config
, proxy
->trans_devid
);
2154 /* pure virtio-1.0 */
2155 pci_set_word(config
+ PCI_VENDOR_ID
,
2156 PCI_VENDOR_ID_REDHAT_QUMRANET
);
2157 pci_set_word(config
+ PCI_DEVICE_ID
,
2158 PCI_DEVICE_ID_VIRTIO_10_BASE
+ virtio_bus_get_vdev_id(bus
));
2159 pci_config_set_revision(config
, 1);
2161 config
[PCI_INTERRUPT_PIN
] = 1;
2165 struct virtio_pci_cap cap
= {
2166 .cap_len
= sizeof cap
,
2168 struct virtio_pci_notify_cap notify
= {
2169 .cap
.cap_len
= sizeof notify
,
2170 .notify_off_multiplier
=
2171 cpu_to_le32(virtio_pci_queue_mem_mult(proxy
)),
2173 struct virtio_pci_cfg_cap cfg
= {
2174 .cap
.cap_len
= sizeof cfg
,
2175 .cap
.cfg_type
= VIRTIO_PCI_CAP_PCI_CFG
,
2177 struct virtio_pci_notify_cap notify_pio
= {
2178 .cap
.cap_len
= sizeof notify
,
2179 .notify_off_multiplier
= cpu_to_le32(0x0),
2182 struct virtio_pci_cfg_cap
*cfg_mask
;
2184 virtio_pci_modern_regions_init(proxy
, vdev
->name
);
2186 virtio_pci_modern_mem_region_map(proxy
, &proxy
->common
, &cap
);
2187 virtio_pci_modern_mem_region_map(proxy
, &proxy
->isr
, &cap
);
2188 virtio_pci_modern_mem_region_map(proxy
, &proxy
->device
, &cap
);
2189 virtio_pci_modern_mem_region_map(proxy
, &proxy
->notify
, ¬ify
.cap
);
2190 if (proxy
->flags
& VIRTIO_PCI_FLAG_VDPA
) {
2191 memory_region_add_subregion(&proxy
->modern_bar
,
2192 proxy
->lm
.offset
, &proxy
->lm
.mr
);
2196 memory_region_init(&proxy
->io_bar
, OBJECT(proxy
),
2197 "virtio-pci-io", 0x4);
2199 pci_register_bar(&proxy
->pci_dev
, proxy
->modern_io_bar_idx
,
2200 PCI_BASE_ADDRESS_SPACE_IO
, &proxy
->io_bar
);
2202 virtio_pci_modern_io_region_map(proxy
, &proxy
->notify_pio
,
2206 pci_register_bar(&proxy
->pci_dev
, proxy
->modern_mem_bar_idx
,
2207 PCI_BASE_ADDRESS_SPACE_MEMORY
|
2208 PCI_BASE_ADDRESS_MEM_PREFETCH
|
2209 PCI_BASE_ADDRESS_MEM_TYPE_64
,
2210 &proxy
->modern_bar
);
2212 proxy
->config_cap
= virtio_pci_add_mem_cap(proxy
, &cfg
.cap
);
2213 cfg_mask
= (void *)(proxy
->pci_dev
.wmask
+ proxy
->config_cap
);
2214 pci_set_byte(&cfg_mask
->cap
.bar
, ~0x0);
2215 pci_set_long((uint8_t *)&cfg_mask
->cap
.offset
, ~0x0);
2216 pci_set_long((uint8_t *)&cfg_mask
->cap
.length
, ~0x0);
2217 pci_set_long(cfg_mask
->pci_cfg_data
, ~0x0);
2220 if (proxy
->nvectors
) {
2221 int err
= msix_init_exclusive_bar(&proxy
->pci_dev
, proxy
->nvectors
,
2222 proxy
->msix_bar_idx
, NULL
);
2224 /* Notice when a system that supports MSIx can't initialize it */
2225 if (err
!= -ENOTSUP
) {
2226 warn_report("unable to init msix vectors to %" PRIu32
,
2229 proxy
->nvectors
= 0;
2233 proxy
->pci_dev
.config_write
= virtio_write_config
;
2234 proxy
->pci_dev
.config_read
= virtio_read_config
;
2237 size
= VIRTIO_PCI_REGION_SIZE(&proxy
->pci_dev
)
2238 + virtio_bus_get_vdev_config_len(bus
);
2239 size
= pow2ceil(size
);
2241 memory_region_init_io(&proxy
->bar
, OBJECT(proxy
),
2242 &virtio_pci_config_ops
,
2243 proxy
, "virtio-pci", size
);
2245 pci_register_bar(&proxy
->pci_dev
, proxy
->legacy_io_bar_idx
,
2246 PCI_BASE_ADDRESS_SPACE_IO
, &proxy
->bar
);
2250 static void virtio_pci_device_unplugged(DeviceState
*d
)
2252 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
2253 bool modern
= virtio_pci_modern(proxy
);
2254 bool modern_pio
= proxy
->flags
& VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY
;
2256 virtio_pci_stop_ioeventfd(proxy
);
2259 virtio_pci_modern_mem_region_unmap(proxy
, &proxy
->common
);
2260 virtio_pci_modern_mem_region_unmap(proxy
, &proxy
->isr
);
2261 virtio_pci_modern_mem_region_unmap(proxy
, &proxy
->device
);
2262 virtio_pci_modern_mem_region_unmap(proxy
, &proxy
->notify
);
2263 if (proxy
->flags
& VIRTIO_PCI_FLAG_VDPA
) {
2264 memory_region_del_subregion(&proxy
->modern_bar
, &proxy
->lm
.mr
);
2267 virtio_pci_modern_io_region_unmap(proxy
, &proxy
->notify_pio
);
2272 static void virtio_pci_realize(PCIDevice
*pci_dev
, Error
**errp
)
2274 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(pci_dev
);
2275 VirtioPCIClass
*k
= VIRTIO_PCI_GET_CLASS(pci_dev
);
2276 bool pcie_port
= pci_bus_is_express(pci_get_bus(pci_dev
)) &&
2277 !pci_bus_is_root(pci_get_bus(pci_dev
));
2279 /* fd-based ioevents can't be synchronized in record/replay */
2280 if (replay_mode
!= REPLAY_MODE_NONE
) {
2281 proxy
->flags
&= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD
;
2285 * virtio pci bar layout used by default.
2286 * subclasses can re-arrange things if needed.
2288 * region 0 -- virtio legacy io bar
2289 * region 1 -- msi-x bar
2290 * region 2 -- virtio modern io bar (off by default)
2291 * region 4+5 -- virtio modern memory (64bit) bar
2294 proxy
->legacy_io_bar_idx
= 0;
2295 proxy
->msix_bar_idx
= 1;
2296 proxy
->modern_io_bar_idx
= 2;
2297 proxy
->modern_mem_bar_idx
= 4;
2299 proxy
->common
.offset
= 0x0;
2300 proxy
->common
.size
= 0x1000;
2301 proxy
->common
.type
= VIRTIO_PCI_CAP_COMMON_CFG
;
2303 proxy
->isr
.offset
= 0x1000;
2304 proxy
->isr
.size
= 0x1000;
2305 proxy
->isr
.type
= VIRTIO_PCI_CAP_ISR_CFG
;
2307 proxy
->device
.offset
= 0x2000;
2308 proxy
->device
.size
= 0x1000;
2309 proxy
->device
.type
= VIRTIO_PCI_CAP_DEVICE_CFG
;
2311 proxy
->notify
.offset
= 0x3000;
2312 proxy
->notify
.size
= virtio_pci_queue_mem_mult(proxy
) * VIRTIO_QUEUE_MAX
;
2313 proxy
->notify
.type
= VIRTIO_PCI_CAP_NOTIFY_CFG
;
2315 proxy
->notify_pio
.offset
= 0x0;
2316 proxy
->notify_pio
.size
= 0x4;
2317 proxy
->notify_pio
.type
= VIRTIO_PCI_CAP_NOTIFY_CFG
;
2319 /* subclasses can enforce modern, so do this unconditionally */
2320 if (!(proxy
->flags
& VIRTIO_PCI_FLAG_VDPA
)) {
2321 memory_region_init(&proxy
->modern_bar
, OBJECT(proxy
), "virtio-pci",
2322 /* PCI BAR regions must be powers of 2 */
2323 pow2ceil(proxy
->notify
.offset
+ proxy
->notify
.size
));
2325 proxy
->lm
.offset
= proxy
->notify
.offset
+ proxy
->notify
.size
;
2326 proxy
->lm
.size
= 0x20 + VIRTIO_QUEUE_MAX
* 4;
2327 memory_region_init(&proxy
->modern_bar
, OBJECT(proxy
), "virtio-pci",
2328 /* PCI BAR regions must be powers of 2 */
2329 pow2ceil(proxy
->lm
.offset
+ proxy
->lm
.size
));
2332 if (proxy
->disable_legacy
== ON_OFF_AUTO_AUTO
) {
2333 proxy
->disable_legacy
= pcie_port
? ON_OFF_AUTO_ON
: ON_OFF_AUTO_OFF
;
2336 if (!virtio_pci_modern(proxy
) && !virtio_pci_legacy(proxy
)) {
2337 error_setg(errp
, "device cannot work as neither modern nor legacy mode"
2339 error_append_hint(errp
, "Set either disable-modern or disable-legacy"
2344 if (pcie_port
&& pci_is_express(pci_dev
)) {
2346 uint16_t last_pcie_cap_offset
= PCI_CONFIG_SPACE_SIZE
;
2348 pos
= pcie_endpoint_cap_init(pci_dev
, 0);
2351 pos
= pci_add_capability(pci_dev
, PCI_CAP_ID_PM
, 0,
2352 PCI_PM_SIZEOF
, errp
);
2357 pci_dev
->exp
.pm_cap
= pos
;
2360 * Indicates that this function complies with revision 1.2 of the
2361 * PCI Power Management Interface Specification.
2363 pci_set_word(pci_dev
->config
+ pos
+ PCI_PM_PMC
, 0x3);
2365 if (proxy
->flags
& VIRTIO_PCI_FLAG_AER
) {
2366 pcie_aer_init(pci_dev
, PCI_ERR_VER
, last_pcie_cap_offset
,
2367 PCI_ERR_SIZEOF
, NULL
);
2368 last_pcie_cap_offset
+= PCI_ERR_SIZEOF
;
2371 if (proxy
->flags
& VIRTIO_PCI_FLAG_INIT_DEVERR
) {
2372 /* Init error enabling flags */
2373 pcie_cap_deverr_init(pci_dev
);
2376 if (proxy
->flags
& VIRTIO_PCI_FLAG_INIT_LNKCTL
) {
2377 /* Init Link Control Register */
2378 pcie_cap_lnkctl_init(pci_dev
);
2381 if (proxy
->flags
& VIRTIO_PCI_FLAG_INIT_PM
) {
2382 /* Init Power Management Control Register */
2383 pci_set_word(pci_dev
->wmask
+ pos
+ PCI_PM_CTRL
,
2384 PCI_PM_CTRL_STATE_MASK
);
2387 if (proxy
->flags
& VIRTIO_PCI_FLAG_ATS
) {
2388 pcie_ats_init(pci_dev
, last_pcie_cap_offset
,
2389 proxy
->flags
& VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED
);
2390 last_pcie_cap_offset
+= PCI_EXT_CAP_ATS_SIZEOF
;
2393 if (proxy
->flags
& VIRTIO_PCI_FLAG_INIT_FLR
) {
2394 /* Set Function Level Reset capability bit */
2395 pcie_cap_flr_init(pci_dev
);
2399 * make future invocations of pci_is_express() return false
2400 * and pci_config_size() return PCI_CONFIG_SPACE_SIZE.
2402 pci_dev
->cap_present
&= ~QEMU_PCI_CAP_EXPRESS
;
2405 virtio_pci_bus_new(&proxy
->bus
, sizeof(proxy
->bus
), proxy
);
2407 k
->realize(proxy
, errp
);
2411 static void virtio_pci_exit(PCIDevice
*pci_dev
)
2413 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(pci_dev
);
2414 bool pcie_port
= pci_bus_is_express(pci_get_bus(pci_dev
)) &&
2415 !pci_bus_is_root(pci_get_bus(pci_dev
));
2417 msix_uninit_exclusive_bar(pci_dev
);
2418 if (proxy
->flags
& VIRTIO_PCI_FLAG_AER
&& pcie_port
&&
2419 pci_is_express(pci_dev
)) {
2420 pcie_aer_exit(pci_dev
);
2424 static void virtio_pci_reset(DeviceState
*qdev
)
2426 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(qdev
);
2427 VirtioBusState
*bus
= VIRTIO_BUS(&proxy
->bus
);
2430 virtio_bus_reset(bus
);
2431 msix_unuse_all_vectors(&proxy
->pci_dev
);
2433 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2434 proxy
->vqs
[i
].enabled
= 0;
2435 proxy
->vqs
[i
].reset
= 0;
2436 proxy
->vqs
[i
].num
= 0;
2437 proxy
->vqs
[i
].desc
[0] = proxy
->vqs
[i
].desc
[1] = 0;
2438 proxy
->vqs
[i
].avail
[0] = proxy
->vqs
[i
].avail
[1] = 0;
2439 proxy
->vqs
[i
].used
[0] = proxy
->vqs
[i
].used
[1] = 0;
2443 static void virtio_pci_bus_reset_hold(Object
*obj
)
2445 PCIDevice
*dev
= PCI_DEVICE(obj
);
2446 DeviceState
*qdev
= DEVICE(obj
);
2448 virtio_pci_reset(qdev
);
2450 if (pci_is_express(dev
)) {
2451 pcie_cap_deverr_reset(dev
);
2452 pcie_cap_lnkctl_reset(dev
);
2454 pci_set_word(dev
->config
+ dev
->exp
.pm_cap
+ PCI_PM_CTRL
, 0);
2458 static Property virtio_pci_properties
[] = {
2459 DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy
, flags
,
2460 VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT
, false),
2461 DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy
, flags
,
2462 VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT
, true),
2463 DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy
, flags
,
2464 VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT
, false),
2465 DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy
, flags
,
2466 VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT
, false),
2467 DEFINE_PROP_BIT("page-per-vq", VirtIOPCIProxy
, flags
,
2468 VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT
, false),
2469 DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy
,
2470 ignore_backend_features
, false),
2471 DEFINE_PROP_BIT("ats", VirtIOPCIProxy
, flags
,
2472 VIRTIO_PCI_FLAG_ATS_BIT
, false),
2473 DEFINE_PROP_BIT("x-ats-page-aligned", VirtIOPCIProxy
, flags
,
2474 VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED_BIT
, true),
2475 DEFINE_PROP_BIT("x-pcie-deverr-init", VirtIOPCIProxy
, flags
,
2476 VIRTIO_PCI_FLAG_INIT_DEVERR_BIT
, true),
2477 DEFINE_PROP_BIT("x-pcie-lnkctl-init", VirtIOPCIProxy
, flags
,
2478 VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT
, true),
2479 DEFINE_PROP_BIT("x-pcie-pm-init", VirtIOPCIProxy
, flags
,
2480 VIRTIO_PCI_FLAG_INIT_PM_BIT
, true),
2481 DEFINE_PROP_BIT("x-pcie-flr-init", VirtIOPCIProxy
, flags
,
2482 VIRTIO_PCI_FLAG_INIT_FLR_BIT
, true),
2483 DEFINE_PROP_BIT("aer", VirtIOPCIProxy
, flags
,
2484 VIRTIO_PCI_FLAG_AER_BIT
, false),
2485 DEFINE_PROP_BIT("vdpa", VirtIOPCIProxy
, flags
,
2486 VIRTIO_PCI_FLAG_VDPA_BIT
, false),
2487 DEFINE_PROP_END_OF_LIST(),
2490 static void virtio_pci_dc_realize(DeviceState
*qdev
, Error
**errp
)
2492 VirtioPCIClass
*vpciklass
= VIRTIO_PCI_GET_CLASS(qdev
);
2493 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(qdev
);
2494 PCIDevice
*pci_dev
= &proxy
->pci_dev
;
2496 if (!(proxy
->flags
& VIRTIO_PCI_FLAG_DISABLE_PCIE
) &&
2497 virtio_pci_modern(proxy
)) {
2498 pci_dev
->cap_present
|= QEMU_PCI_CAP_EXPRESS
;
2501 vpciklass
->parent_dc_realize(qdev
, errp
);
2504 static void virtio_pci_class_init(ObjectClass
*klass
, void *data
)
2506 DeviceClass
*dc
= DEVICE_CLASS(klass
);
2507 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
2508 VirtioPCIClass
*vpciklass
= VIRTIO_PCI_CLASS(klass
);
2509 ResettableClass
*rc
= RESETTABLE_CLASS(klass
);
2511 device_class_set_props(dc
, virtio_pci_properties
);
2512 k
->realize
= virtio_pci_realize
;
2513 k
->exit
= virtio_pci_exit
;
2514 k
->vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
;
2515 k
->revision
= VIRTIO_PCI_ABI_VERSION
;
2516 k
->class_id
= PCI_CLASS_OTHERS
;
2517 device_class_set_parent_realize(dc
, virtio_pci_dc_realize
,
2518 &vpciklass
->parent_dc_realize
);
2519 rc
->phases
.hold
= virtio_pci_bus_reset_hold
;
2522 static const TypeInfo virtio_pci_info
= {
2523 .name
= TYPE_VIRTIO_PCI
,
2524 .parent
= TYPE_PCI_DEVICE
,
2525 .instance_size
= sizeof(VirtIOPCIProxy
),
2526 .class_init
= virtio_pci_class_init
,
2527 .class_size
= sizeof(VirtioPCIClass
),
2531 static Property virtio_pci_generic_properties
[] = {
2532 DEFINE_PROP_ON_OFF_AUTO("disable-legacy", VirtIOPCIProxy
, disable_legacy
,
2534 DEFINE_PROP_BOOL("disable-modern", VirtIOPCIProxy
, disable_modern
, false),
2535 DEFINE_PROP_END_OF_LIST(),
2538 static void virtio_pci_base_class_init(ObjectClass
*klass
, void *data
)
2540 const VirtioPCIDeviceTypeInfo
*t
= data
;
2541 if (t
->class_init
) {
2542 t
->class_init(klass
, NULL
);
2546 static void virtio_pci_generic_class_init(ObjectClass
*klass
, void *data
)
2548 DeviceClass
*dc
= DEVICE_CLASS(klass
);
2550 device_class_set_props(dc
, virtio_pci_generic_properties
);
2553 static void virtio_pci_transitional_instance_init(Object
*obj
)
2555 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(obj
);
2557 proxy
->disable_legacy
= ON_OFF_AUTO_OFF
;
2558 proxy
->disable_modern
= false;
2561 static void virtio_pci_non_transitional_instance_init(Object
*obj
)
2563 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(obj
);
2565 proxy
->disable_legacy
= ON_OFF_AUTO_ON
;
2566 proxy
->disable_modern
= false;
2569 void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo
*t
)
2571 char *base_name
= NULL
;
2572 TypeInfo base_type_info
= {
2573 .name
= t
->base_name
,
2574 .parent
= t
->parent
? t
->parent
: TYPE_VIRTIO_PCI
,
2575 .instance_size
= t
->instance_size
,
2576 .instance_init
= t
->instance_init
,
2577 .instance_finalize
= t
->instance_finalize
,
2578 .class_size
= t
->class_size
,
2580 .interfaces
= t
->interfaces
,
2582 TypeInfo generic_type_info
= {
2583 .name
= t
->generic_name
,
2584 .parent
= base_type_info
.name
,
2585 .class_init
= virtio_pci_generic_class_init
,
2586 .interfaces
= (InterfaceInfo
[]) {
2587 { INTERFACE_PCIE_DEVICE
},
2588 { INTERFACE_CONVENTIONAL_PCI_DEVICE
},
2593 if (!base_type_info
.name
) {
2594 /* No base type -> register a single generic device type */
2595 /* use intermediate %s-base-type to add generic device props */
2596 base_name
= g_strdup_printf("%s-base-type", t
->generic_name
);
2597 base_type_info
.name
= base_name
;
2598 base_type_info
.class_init
= virtio_pci_generic_class_init
;
2600 generic_type_info
.parent
= base_name
;
2601 generic_type_info
.class_init
= virtio_pci_base_class_init
;
2602 generic_type_info
.class_data
= (void *)t
;
2604 assert(!t
->non_transitional_name
);
2605 assert(!t
->transitional_name
);
2607 base_type_info
.class_init
= virtio_pci_base_class_init
;
2608 base_type_info
.class_data
= (void *)t
;
2611 type_register(&base_type_info
);
2612 if (generic_type_info
.name
) {
2613 type_register(&generic_type_info
);
2616 if (t
->non_transitional_name
) {
2617 const TypeInfo non_transitional_type_info
= {
2618 .name
= t
->non_transitional_name
,
2619 .parent
= base_type_info
.name
,
2620 .instance_init
= virtio_pci_non_transitional_instance_init
,
2621 .interfaces
= (InterfaceInfo
[]) {
2622 { INTERFACE_PCIE_DEVICE
},
2623 { INTERFACE_CONVENTIONAL_PCI_DEVICE
},
2627 type_register(&non_transitional_type_info
);
2630 if (t
->transitional_name
) {
2631 const TypeInfo transitional_type_info
= {
2632 .name
= t
->transitional_name
,
2633 .parent
= base_type_info
.name
,
2634 .instance_init
= virtio_pci_transitional_instance_init
,
2635 .interfaces
= (InterfaceInfo
[]) {
2637 * Transitional virtio devices work only as Conventional PCI
2638 * devices because they require PIO ports.
2640 { INTERFACE_CONVENTIONAL_PCI_DEVICE
},
2644 type_register(&transitional_type_info
);
2649 unsigned virtio_pci_optimal_num_queues(unsigned fixed_queues
)
2652 * 1:1 vq to vCPU mapping is ideal because the same vCPU that submitted
2653 * virtqueue buffers can handle their completion. When a different vCPU
2654 * handles completion it may need to IPI the vCPU that submitted the
2655 * request and this adds overhead.
2657 * Virtqueues consume guest RAM and MSI-X vectors. This is wasteful in
2658 * guests with very many vCPUs and a device that is only used by a few
2659 * vCPUs. Unfortunately optimizing that case requires manual pinning inside
2660 * the guest, so those users might as well manually set the number of
2661 * queues. There is no upper limit that can be applied automatically and
2662 * doing so arbitrarily would result in a sudden performance drop once the
2663 * threshold number of vCPUs is exceeded.
2665 unsigned num_queues
= current_machine
->smp
.cpus
;
2668 * The maximum number of MSI-X vectors is PCI_MSIX_FLAGS_QSIZE + 1, but the
2669 * config change interrupt and the fixed virtqueues must be taken into
2672 num_queues
= MIN(num_queues
, PCI_MSIX_FLAGS_QSIZE
- fixed_queues
);
2675 * There is a limit to how many virtqueues a device can have.
2677 return MIN(num_queues
, VIRTIO_QUEUE_MAX
- fixed_queues
);
2680 /* virtio-pci-bus */
2682 static void virtio_pci_bus_new(VirtioBusState
*bus
, size_t bus_size
,
2683 VirtIOPCIProxy
*dev
)
2685 DeviceState
*qdev
= DEVICE(dev
);
2686 char virtio_bus_name
[] = "virtio-bus";
2688 qbus_init(bus
, bus_size
, TYPE_VIRTIO_PCI_BUS
, qdev
, virtio_bus_name
);
2691 static void virtio_pci_bus_class_init(ObjectClass
*klass
, void *data
)
2693 BusClass
*bus_class
= BUS_CLASS(klass
);
2694 VirtioBusClass
*k
= VIRTIO_BUS_CLASS(klass
);
2695 bus_class
->max_dev
= 1;
2696 k
->notify
= virtio_pci_notify
;
2697 k
->save_config
= virtio_pci_save_config
;
2698 k
->load_config
= virtio_pci_load_config
;
2699 k
->save_queue
= virtio_pci_save_queue
;
2700 k
->load_queue
= virtio_pci_load_queue
;
2701 k
->save_extra_state
= virtio_pci_save_extra_state
;
2702 k
->load_extra_state
= virtio_pci_load_extra_state
;
2703 k
->has_extra_state
= virtio_pci_has_extra_state
;
2704 k
->query_guest_notifiers
= virtio_pci_query_guest_notifiers
;
2705 k
->set_guest_notifiers
= virtio_pci_set_guest_notifiers
;
2706 k
->set_host_notifier_mr
= virtio_pci_set_host_notifier_mr
;
2707 k
->vmstate_change
= virtio_pci_vmstate_change
;
2708 k
->pre_plugged
= virtio_pci_pre_plugged
;
2709 k
->device_plugged
= virtio_pci_device_plugged
;
2710 k
->device_unplugged
= virtio_pci_device_unplugged
;
2711 k
->query_nvectors
= virtio_pci_query_nvectors
;
2712 k
->ioeventfd_enabled
= virtio_pci_ioeventfd_enabled
;
2713 k
->ioeventfd_assign
= virtio_pci_ioeventfd_assign
;
2714 k
->get_dma_as
= virtio_pci_get_dma_as
;
2715 k
->iommu_enabled
= virtio_pci_iommu_enabled
;
2716 k
->queue_enabled
= virtio_pci_queue_enabled
;
2719 static const TypeInfo virtio_pci_bus_info
= {
2720 .name
= TYPE_VIRTIO_PCI_BUS
,
2721 .parent
= TYPE_VIRTIO_BUS
,
2722 .instance_size
= sizeof(VirtioPCIBusState
),
2723 .class_size
= sizeof(VirtioPCIBusClass
),
2724 .class_init
= virtio_pci_bus_class_init
,
2727 static void virtio_pci_register_types(void)
2730 type_register_static(&virtio_pci_bus_info
);
2731 type_register_static(&virtio_pci_info
);
2734 type_init(virtio_pci_register_types
)