4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2009 CodeSourcery
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paul Brook <paul@codesourcery.com>
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
18 #include "qemu/osdep.h"
20 #include "exec/memop.h"
21 #include "standard-headers/linux/virtio_pci.h"
22 #include "hw/virtio/virtio.h"
23 #include "migration/qemu-file-types.h"
24 #include "hw/pci/pci.h"
25 #include "hw/pci/pci_bus.h"
26 #include "hw/qdev-properties.h"
27 #include "qapi/error.h"
28 #include "qemu/error-report.h"
29 #include "qemu/module.h"
30 #include "hw/pci/msi.h"
31 #include "hw/pci/msix.h"
32 #include "hw/loader.h"
33 #include "sysemu/kvm.h"
34 #include "virtio-pci.h"
35 #include "qemu/range.h"
36 #include "hw/virtio/virtio-bus.h"
37 #include "qapi/visitor.h"
39 #define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev))
41 #undef VIRTIO_PCI_CONFIG
43 /* The remaining space is defined by each driver as the per-driver
44 * configuration space */
45 #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
47 static void virtio_pci_bus_new(VirtioBusState
*bus
, size_t bus_size
,
49 static void virtio_pci_reset(DeviceState
*qdev
);
52 /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
53 static inline VirtIOPCIProxy
*to_virtio_pci_proxy(DeviceState
*d
)
55 return container_of(d
, VirtIOPCIProxy
, pci_dev
.qdev
);
58 /* DeviceState to VirtIOPCIProxy. Note: used on datapath,
59 * be careful and test performance if you change this.
61 static inline VirtIOPCIProxy
*to_virtio_pci_proxy_fast(DeviceState
*d
)
63 return container_of(d
, VirtIOPCIProxy
, pci_dev
.qdev
);
66 static void virtio_pci_notify(DeviceState
*d
, uint16_t vector
)
68 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy_fast(d
);
70 if (msix_enabled(&proxy
->pci_dev
))
71 msix_notify(&proxy
->pci_dev
, vector
);
73 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
74 pci_set_irq(&proxy
->pci_dev
, atomic_read(&vdev
->isr
) & 1);
78 static void virtio_pci_save_config(DeviceState
*d
, QEMUFile
*f
)
80 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
81 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
83 pci_device_save(&proxy
->pci_dev
, f
);
84 msix_save(&proxy
->pci_dev
, f
);
85 if (msix_present(&proxy
->pci_dev
))
86 qemu_put_be16(f
, vdev
->config_vector
);
89 static const VMStateDescription vmstate_virtio_pci_modern_queue_state
= {
90 .name
= "virtio_pci/modern_queue_state",
92 .minimum_version_id
= 1,
93 .fields
= (VMStateField
[]) {
94 VMSTATE_UINT16(num
, VirtIOPCIQueue
),
95 VMSTATE_UNUSED(1), /* enabled was stored as be16 */
96 VMSTATE_BOOL(enabled
, VirtIOPCIQueue
),
97 VMSTATE_UINT32_ARRAY(desc
, VirtIOPCIQueue
, 2),
98 VMSTATE_UINT32_ARRAY(avail
, VirtIOPCIQueue
, 2),
99 VMSTATE_UINT32_ARRAY(used
, VirtIOPCIQueue
, 2),
100 VMSTATE_END_OF_LIST()
104 static bool virtio_pci_modern_state_needed(void *opaque
)
106 VirtIOPCIProxy
*proxy
= opaque
;
108 return virtio_pci_modern(proxy
);
111 static const VMStateDescription vmstate_virtio_pci_modern_state_sub
= {
112 .name
= "virtio_pci/modern_state",
114 .minimum_version_id
= 1,
115 .needed
= &virtio_pci_modern_state_needed
,
116 .fields
= (VMStateField
[]) {
117 VMSTATE_UINT32(dfselect
, VirtIOPCIProxy
),
118 VMSTATE_UINT32(gfselect
, VirtIOPCIProxy
),
119 VMSTATE_UINT32_ARRAY(guest_features
, VirtIOPCIProxy
, 2),
120 VMSTATE_STRUCT_ARRAY(vqs
, VirtIOPCIProxy
, VIRTIO_QUEUE_MAX
, 0,
121 vmstate_virtio_pci_modern_queue_state
,
123 VMSTATE_END_OF_LIST()
127 static const VMStateDescription vmstate_virtio_pci
= {
128 .name
= "virtio_pci",
130 .minimum_version_id
= 1,
131 .minimum_version_id_old
= 1,
132 .fields
= (VMStateField
[]) {
133 VMSTATE_END_OF_LIST()
135 .subsections
= (const VMStateDescription
*[]) {
136 &vmstate_virtio_pci_modern_state_sub
,
141 static bool virtio_pci_has_extra_state(DeviceState
*d
)
143 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
145 return proxy
->flags
& VIRTIO_PCI_FLAG_MIGRATE_EXTRA
;
148 static void virtio_pci_save_extra_state(DeviceState
*d
, QEMUFile
*f
)
150 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
152 vmstate_save_state(f
, &vmstate_virtio_pci
, proxy
, NULL
);
155 static int virtio_pci_load_extra_state(DeviceState
*d
, QEMUFile
*f
)
157 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
159 return vmstate_load_state(f
, &vmstate_virtio_pci
, proxy
, 1);
162 static void virtio_pci_save_queue(DeviceState
*d
, int n
, QEMUFile
*f
)
164 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
165 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
167 if (msix_present(&proxy
->pci_dev
))
168 qemu_put_be16(f
, virtio_queue_vector(vdev
, n
));
171 static int virtio_pci_load_config(DeviceState
*d
, QEMUFile
*f
)
173 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
174 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
177 ret
= pci_device_load(&proxy
->pci_dev
, f
);
181 msix_unuse_all_vectors(&proxy
->pci_dev
);
182 msix_load(&proxy
->pci_dev
, f
);
183 if (msix_present(&proxy
->pci_dev
)) {
184 qemu_get_be16s(f
, &vdev
->config_vector
);
186 vdev
->config_vector
= VIRTIO_NO_VECTOR
;
188 if (vdev
->config_vector
!= VIRTIO_NO_VECTOR
) {
189 return msix_vector_use(&proxy
->pci_dev
, vdev
->config_vector
);
194 static int virtio_pci_load_queue(DeviceState
*d
, int n
, QEMUFile
*f
)
196 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
197 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
200 if (msix_present(&proxy
->pci_dev
)) {
201 qemu_get_be16s(f
, &vector
);
203 vector
= VIRTIO_NO_VECTOR
;
205 virtio_queue_set_vector(vdev
, n
, vector
);
206 if (vector
!= VIRTIO_NO_VECTOR
) {
207 return msix_vector_use(&proxy
->pci_dev
, vector
);
213 static bool virtio_pci_ioeventfd_enabled(DeviceState
*d
)
215 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
217 return (proxy
->flags
& VIRTIO_PCI_FLAG_USE_IOEVENTFD
) != 0;
220 #define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000
222 static inline int virtio_pci_queue_mem_mult(struct VirtIOPCIProxy
*proxy
)
224 return (proxy
->flags
& VIRTIO_PCI_FLAG_PAGE_PER_VQ
) ?
225 QEMU_VIRTIO_PCI_QUEUE_MEM_MULT
: 4;
228 static int virtio_pci_ioeventfd_assign(DeviceState
*d
, EventNotifier
*notifier
,
231 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
232 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
233 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
234 bool legacy
= virtio_pci_legacy(proxy
);
235 bool modern
= virtio_pci_modern(proxy
);
236 bool fast_mmio
= kvm_ioeventfd_any_length_enabled();
237 bool modern_pio
= proxy
->flags
& VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY
;
238 MemoryRegion
*modern_mr
= &proxy
->notify
.mr
;
239 MemoryRegion
*modern_notify_mr
= &proxy
->notify_pio
.mr
;
240 MemoryRegion
*legacy_mr
= &proxy
->bar
;
241 hwaddr modern_addr
= virtio_pci_queue_mem_mult(proxy
) *
242 virtio_get_queue_index(vq
);
243 hwaddr legacy_addr
= VIRTIO_PCI_QUEUE_NOTIFY
;
248 memory_region_add_eventfd(modern_mr
, modern_addr
, 0,
251 memory_region_add_eventfd(modern_mr
, modern_addr
, 2,
255 memory_region_add_eventfd(modern_notify_mr
, 0, 2,
260 memory_region_add_eventfd(legacy_mr
, legacy_addr
, 2,
266 memory_region_del_eventfd(modern_mr
, modern_addr
, 0,
269 memory_region_del_eventfd(modern_mr
, modern_addr
, 2,
273 memory_region_del_eventfd(modern_notify_mr
, 0, 2,
278 memory_region_del_eventfd(legacy_mr
, legacy_addr
, 2,
285 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy
*proxy
)
287 virtio_bus_start_ioeventfd(&proxy
->bus
);
290 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy
*proxy
)
292 virtio_bus_stop_ioeventfd(&proxy
->bus
);
295 static void virtio_ioport_write(void *opaque
, uint32_t addr
, uint32_t val
)
297 VirtIOPCIProxy
*proxy
= opaque
;
298 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
302 case VIRTIO_PCI_GUEST_FEATURES
:
303 /* Guest does not negotiate properly? We have to assume nothing. */
304 if (val
& (1 << VIRTIO_F_BAD_FEATURE
)) {
305 val
= virtio_bus_get_vdev_bad_features(&proxy
->bus
);
307 virtio_set_features(vdev
, val
);
309 case VIRTIO_PCI_QUEUE_PFN
:
310 pa
= (hwaddr
)val
<< VIRTIO_PCI_QUEUE_ADDR_SHIFT
;
312 virtio_pci_reset(DEVICE(proxy
));
315 virtio_queue_set_addr(vdev
, vdev
->queue_sel
, pa
);
317 case VIRTIO_PCI_QUEUE_SEL
:
318 if (val
< VIRTIO_QUEUE_MAX
)
319 vdev
->queue_sel
= val
;
321 case VIRTIO_PCI_QUEUE_NOTIFY
:
322 if (val
< VIRTIO_QUEUE_MAX
) {
323 virtio_queue_notify(vdev
, val
);
326 case VIRTIO_PCI_STATUS
:
327 if (!(val
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
328 virtio_pci_stop_ioeventfd(proxy
);
331 virtio_set_status(vdev
, val
& 0xFF);
333 if (val
& VIRTIO_CONFIG_S_DRIVER_OK
) {
334 virtio_pci_start_ioeventfd(proxy
);
337 if (vdev
->status
== 0) {
338 virtio_pci_reset(DEVICE(proxy
));
341 /* Linux before 2.6.34 drives the device without enabling
342 the PCI device bus master bit. Enable it automatically
343 for the guest. This is a PCI spec violation but so is
344 initiating DMA with bus master bit clear. */
345 if (val
== (VIRTIO_CONFIG_S_ACKNOWLEDGE
| VIRTIO_CONFIG_S_DRIVER
)) {
346 pci_default_write_config(&proxy
->pci_dev
, PCI_COMMAND
,
347 proxy
->pci_dev
.config
[PCI_COMMAND
] |
348 PCI_COMMAND_MASTER
, 1);
351 case VIRTIO_MSI_CONFIG_VECTOR
:
352 msix_vector_unuse(&proxy
->pci_dev
, vdev
->config_vector
);
353 /* Make it possible for guest to discover an error took place. */
354 if (msix_vector_use(&proxy
->pci_dev
, val
) < 0)
355 val
= VIRTIO_NO_VECTOR
;
356 vdev
->config_vector
= val
;
358 case VIRTIO_MSI_QUEUE_VECTOR
:
359 msix_vector_unuse(&proxy
->pci_dev
,
360 virtio_queue_vector(vdev
, vdev
->queue_sel
));
361 /* Make it possible for guest to discover an error took place. */
362 if (msix_vector_use(&proxy
->pci_dev
, val
) < 0)
363 val
= VIRTIO_NO_VECTOR
;
364 virtio_queue_set_vector(vdev
, vdev
->queue_sel
, val
);
367 error_report("%s: unexpected address 0x%x value 0x%x",
368 __func__
, addr
, val
);
373 static uint32_t virtio_ioport_read(VirtIOPCIProxy
*proxy
, uint32_t addr
)
375 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
376 uint32_t ret
= 0xFFFFFFFF;
379 case VIRTIO_PCI_HOST_FEATURES
:
380 ret
= vdev
->host_features
;
382 case VIRTIO_PCI_GUEST_FEATURES
:
383 ret
= vdev
->guest_features
;
385 case VIRTIO_PCI_QUEUE_PFN
:
386 ret
= virtio_queue_get_addr(vdev
, vdev
->queue_sel
)
387 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT
;
389 case VIRTIO_PCI_QUEUE_NUM
:
390 ret
= virtio_queue_get_num(vdev
, vdev
->queue_sel
);
392 case VIRTIO_PCI_QUEUE_SEL
:
393 ret
= vdev
->queue_sel
;
395 case VIRTIO_PCI_STATUS
:
399 /* reading from the ISR also clears it. */
400 ret
= atomic_xchg(&vdev
->isr
, 0);
401 pci_irq_deassert(&proxy
->pci_dev
);
403 case VIRTIO_MSI_CONFIG_VECTOR
:
404 ret
= vdev
->config_vector
;
406 case VIRTIO_MSI_QUEUE_VECTOR
:
407 ret
= virtio_queue_vector(vdev
, vdev
->queue_sel
);
416 static uint64_t virtio_pci_config_read(void *opaque
, hwaddr addr
,
419 VirtIOPCIProxy
*proxy
= opaque
;
420 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
421 uint32_t config
= VIRTIO_PCI_CONFIG_SIZE(&proxy
->pci_dev
);
424 return virtio_ioport_read(proxy
, addr
);
430 val
= virtio_config_readb(vdev
, addr
);
433 val
= virtio_config_readw(vdev
, addr
);
434 if (virtio_is_big_endian(vdev
)) {
439 val
= virtio_config_readl(vdev
, addr
);
440 if (virtio_is_big_endian(vdev
)) {
448 static void virtio_pci_config_write(void *opaque
, hwaddr addr
,
449 uint64_t val
, unsigned size
)
451 VirtIOPCIProxy
*proxy
= opaque
;
452 uint32_t config
= VIRTIO_PCI_CONFIG_SIZE(&proxy
->pci_dev
);
453 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
455 virtio_ioport_write(proxy
, addr
, val
);
460 * Virtio-PCI is odd. Ioports are LE but config space is target native
465 virtio_config_writeb(vdev
, addr
, val
);
468 if (virtio_is_big_endian(vdev
)) {
471 virtio_config_writew(vdev
, addr
, val
);
474 if (virtio_is_big_endian(vdev
)) {
477 virtio_config_writel(vdev
, addr
, val
);
482 static const MemoryRegionOps virtio_pci_config_ops
= {
483 .read
= virtio_pci_config_read
,
484 .write
= virtio_pci_config_write
,
486 .min_access_size
= 1,
487 .max_access_size
= 4,
489 .endianness
= DEVICE_LITTLE_ENDIAN
,
492 static MemoryRegion
*virtio_address_space_lookup(VirtIOPCIProxy
*proxy
,
493 hwaddr
*off
, int len
)
496 VirtIOPCIRegion
*reg
;
498 for (i
= 0; i
< ARRAY_SIZE(proxy
->regs
); ++i
) {
499 reg
= &proxy
->regs
[i
];
500 if (*off
>= reg
->offset
&&
501 *off
+ len
<= reg
->offset
+ reg
->size
) {
510 /* Below are generic functions to do memcpy from/to an address space,
511 * without byteswaps, with input validation.
513 * As regular address_space_* APIs all do some kind of byteswap at least for
514 * some host/target combinations, we are forced to explicitly convert to a
515 * known-endianness integer value.
516 * It doesn't really matter which endian format to go through, so the code
517 * below selects the endian that causes the least amount of work on the given
520 * Note: host pointer must be aligned.
523 void virtio_address_space_write(VirtIOPCIProxy
*proxy
, hwaddr addr
,
524 const uint8_t *buf
, int len
)
529 /* address_space_* APIs assume an aligned address.
530 * As address is under guest control, handle illegal values.
534 mr
= virtio_address_space_lookup(proxy
, &addr
, len
);
539 /* Make sure caller aligned buf properly */
540 assert(!(((uintptr_t)buf
) & (len
- 1)));
544 val
= pci_get_byte(buf
);
547 val
= pci_get_word(buf
);
550 val
= pci_get_long(buf
);
553 /* As length is under guest control, handle illegal values. */
556 memory_region_dispatch_write(mr
, addr
, val
, size_memop(len
) | MO_LE
,
557 MEMTXATTRS_UNSPECIFIED
);
561 virtio_address_space_read(VirtIOPCIProxy
*proxy
, hwaddr addr
,
562 uint8_t *buf
, int len
)
567 /* address_space_* APIs assume an aligned address.
568 * As address is under guest control, handle illegal values.
572 mr
= virtio_address_space_lookup(proxy
, &addr
, len
);
577 /* Make sure caller aligned buf properly */
578 assert(!(((uintptr_t)buf
) & (len
- 1)));
580 memory_region_dispatch_read(mr
, addr
, &val
, size_memop(len
) | MO_LE
,
581 MEMTXATTRS_UNSPECIFIED
);
584 pci_set_byte(buf
, val
);
587 pci_set_word(buf
, val
);
590 pci_set_long(buf
, val
);
593 /* As length is under guest control, handle illegal values. */
598 static void virtio_write_config(PCIDevice
*pci_dev
, uint32_t address
,
599 uint32_t val
, int len
)
601 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(pci_dev
);
602 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
603 struct virtio_pci_cfg_cap
*cfg
;
605 pci_default_write_config(pci_dev
, address
, val
, len
);
607 if (proxy
->flags
& VIRTIO_PCI_FLAG_INIT_FLR
) {
608 pcie_cap_flr_write_config(pci_dev
, address
, val
, len
);
611 if (range_covers_byte(address
, len
, PCI_COMMAND
)) {
612 if (!(pci_dev
->config
[PCI_COMMAND
] & PCI_COMMAND_MASTER
)) {
613 virtio_set_disabled(vdev
, true);
614 virtio_pci_stop_ioeventfd(proxy
);
615 virtio_set_status(vdev
, vdev
->status
& ~VIRTIO_CONFIG_S_DRIVER_OK
);
617 virtio_set_disabled(vdev
, false);
621 if (proxy
->config_cap
&&
622 ranges_overlap(address
, len
, proxy
->config_cap
+ offsetof(struct virtio_pci_cfg_cap
,
624 sizeof cfg
->pci_cfg_data
)) {
628 cfg
= (void *)(proxy
->pci_dev
.config
+ proxy
->config_cap
);
629 off
= le32_to_cpu(cfg
->cap
.offset
);
630 len
= le32_to_cpu(cfg
->cap
.length
);
632 if (len
== 1 || len
== 2 || len
== 4) {
633 assert(len
<= sizeof cfg
->pci_cfg_data
);
634 virtio_address_space_write(proxy
, off
, cfg
->pci_cfg_data
, len
);
639 static uint32_t virtio_read_config(PCIDevice
*pci_dev
,
640 uint32_t address
, int len
)
642 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(pci_dev
);
643 struct virtio_pci_cfg_cap
*cfg
;
645 if (proxy
->config_cap
&&
646 ranges_overlap(address
, len
, proxy
->config_cap
+ offsetof(struct virtio_pci_cfg_cap
,
648 sizeof cfg
->pci_cfg_data
)) {
652 cfg
= (void *)(proxy
->pci_dev
.config
+ proxy
->config_cap
);
653 off
= le32_to_cpu(cfg
->cap
.offset
);
654 len
= le32_to_cpu(cfg
->cap
.length
);
656 if (len
== 1 || len
== 2 || len
== 4) {
657 assert(len
<= sizeof cfg
->pci_cfg_data
);
658 virtio_address_space_read(proxy
, off
, cfg
->pci_cfg_data
, len
);
662 return pci_default_read_config(pci_dev
, address
, len
);
665 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy
*proxy
,
666 unsigned int queue_no
,
669 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
672 if (irqfd
->users
== 0) {
673 ret
= kvm_irqchip_add_msi_route(kvm_state
, vector
, &proxy
->pci_dev
);
683 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy
*proxy
,
686 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
687 if (--irqfd
->users
== 0) {
688 kvm_irqchip_release_virq(kvm_state
, irqfd
->virq
);
692 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy
*proxy
,
693 unsigned int queue_no
,
696 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
697 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
698 VirtQueue
*vq
= virtio_get_queue(vdev
, queue_no
);
699 EventNotifier
*n
= virtio_queue_get_guest_notifier(vq
);
700 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state
, n
, NULL
, irqfd
->virq
);
703 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy
*proxy
,
704 unsigned int queue_no
,
707 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
708 VirtQueue
*vq
= virtio_get_queue(vdev
, queue_no
);
709 EventNotifier
*n
= virtio_queue_get_guest_notifier(vq
);
710 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
713 ret
= kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state
, n
, irqfd
->virq
);
717 static int kvm_virtio_pci_vector_use(VirtIOPCIProxy
*proxy
, int nvqs
)
719 PCIDevice
*dev
= &proxy
->pci_dev
;
720 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
721 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
725 for (queue_no
= 0; queue_no
< nvqs
; queue_no
++) {
726 if (!virtio_queue_get_num(vdev
, queue_no
)) {
729 vector
= virtio_queue_vector(vdev
, queue_no
);
730 if (vector
>= msix_nr_vectors_allocated(dev
)) {
733 ret
= kvm_virtio_pci_vq_vector_use(proxy
, queue_no
, vector
);
737 /* If guest supports masking, set up irqfd now.
738 * Otherwise, delay until unmasked in the frontend.
740 if (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
) {
741 ret
= kvm_virtio_pci_irqfd_use(proxy
, queue_no
, vector
);
743 kvm_virtio_pci_vq_vector_release(proxy
, vector
);
751 while (--queue_no
>= 0) {
752 vector
= virtio_queue_vector(vdev
, queue_no
);
753 if (vector
>= msix_nr_vectors_allocated(dev
)) {
756 if (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
) {
757 kvm_virtio_pci_irqfd_release(proxy
, queue_no
, vector
);
759 kvm_virtio_pci_vq_vector_release(proxy
, vector
);
764 static void kvm_virtio_pci_vector_release(VirtIOPCIProxy
*proxy
, int nvqs
)
766 PCIDevice
*dev
= &proxy
->pci_dev
;
767 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
770 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
772 for (queue_no
= 0; queue_no
< nvqs
; queue_no
++) {
773 if (!virtio_queue_get_num(vdev
, queue_no
)) {
776 vector
= virtio_queue_vector(vdev
, queue_no
);
777 if (vector
>= msix_nr_vectors_allocated(dev
)) {
780 /* If guest supports masking, clean up irqfd now.
781 * Otherwise, it was cleaned when masked in the frontend.
783 if (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
) {
784 kvm_virtio_pci_irqfd_release(proxy
, queue_no
, vector
);
786 kvm_virtio_pci_vq_vector_release(proxy
, vector
);
790 static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy
*proxy
,
791 unsigned int queue_no
,
795 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
796 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
797 VirtQueue
*vq
= virtio_get_queue(vdev
, queue_no
);
798 EventNotifier
*n
= virtio_queue_get_guest_notifier(vq
);
802 if (proxy
->vector_irqfd
) {
803 irqfd
= &proxy
->vector_irqfd
[vector
];
804 if (irqfd
->msg
.data
!= msg
.data
|| irqfd
->msg
.address
!= msg
.address
) {
805 ret
= kvm_irqchip_update_msi_route(kvm_state
, irqfd
->virq
, msg
,
810 kvm_irqchip_commit_routes(kvm_state
);
814 /* If guest supports masking, irqfd is already setup, unmask it.
815 * Otherwise, set it up now.
817 if (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
) {
818 k
->guest_notifier_mask(vdev
, queue_no
, false);
819 /* Test after unmasking to avoid losing events. */
820 if (k
->guest_notifier_pending
&&
821 k
->guest_notifier_pending(vdev
, queue_no
)) {
822 event_notifier_set(n
);
825 ret
= kvm_virtio_pci_irqfd_use(proxy
, queue_no
, vector
);
830 static void virtio_pci_vq_vector_mask(VirtIOPCIProxy
*proxy
,
831 unsigned int queue_no
,
834 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
835 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
837 /* If guest supports masking, keep irqfd but mask it.
838 * Otherwise, clean it up now.
840 if (vdev
->use_guest_notifier_mask
&& k
->guest_notifier_mask
) {
841 k
->guest_notifier_mask(vdev
, queue_no
, true);
843 kvm_virtio_pci_irqfd_release(proxy
, queue_no
, vector
);
847 static int virtio_pci_vector_unmask(PCIDevice
*dev
, unsigned vector
,
850 VirtIOPCIProxy
*proxy
= container_of(dev
, VirtIOPCIProxy
, pci_dev
);
851 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
852 VirtQueue
*vq
= virtio_vector_first_queue(vdev
, vector
);
853 int ret
, index
, unmasked
= 0;
856 index
= virtio_get_queue_index(vq
);
857 if (!virtio_queue_get_num(vdev
, index
)) {
860 if (index
< proxy
->nvqs_with_notifiers
) {
861 ret
= virtio_pci_vq_vector_unmask(proxy
, index
, vector
, msg
);
867 vq
= virtio_vector_next_queue(vq
);
873 vq
= virtio_vector_first_queue(vdev
, vector
);
874 while (vq
&& unmasked
>= 0) {
875 index
= virtio_get_queue_index(vq
);
876 if (index
< proxy
->nvqs_with_notifiers
) {
877 virtio_pci_vq_vector_mask(proxy
, index
, vector
);
880 vq
= virtio_vector_next_queue(vq
);
885 static void virtio_pci_vector_mask(PCIDevice
*dev
, unsigned vector
)
887 VirtIOPCIProxy
*proxy
= container_of(dev
, VirtIOPCIProxy
, pci_dev
);
888 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
889 VirtQueue
*vq
= virtio_vector_first_queue(vdev
, vector
);
893 index
= virtio_get_queue_index(vq
);
894 if (!virtio_queue_get_num(vdev
, index
)) {
897 if (index
< proxy
->nvqs_with_notifiers
) {
898 virtio_pci_vq_vector_mask(proxy
, index
, vector
);
900 vq
= virtio_vector_next_queue(vq
);
904 static void virtio_pci_vector_poll(PCIDevice
*dev
,
905 unsigned int vector_start
,
906 unsigned int vector_end
)
908 VirtIOPCIProxy
*proxy
= container_of(dev
, VirtIOPCIProxy
, pci_dev
);
909 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
910 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
913 EventNotifier
*notifier
;
916 for (queue_no
= 0; queue_no
< proxy
->nvqs_with_notifiers
; queue_no
++) {
917 if (!virtio_queue_get_num(vdev
, queue_no
)) {
920 vector
= virtio_queue_vector(vdev
, queue_no
);
921 if (vector
< vector_start
|| vector
>= vector_end
||
922 !msix_is_masked(dev
, vector
)) {
925 vq
= virtio_get_queue(vdev
, queue_no
);
926 notifier
= virtio_queue_get_guest_notifier(vq
);
927 if (k
->guest_notifier_pending
) {
928 if (k
->guest_notifier_pending(vdev
, queue_no
)) {
929 msix_set_pending(dev
, vector
);
931 } else if (event_notifier_test_and_clear(notifier
)) {
932 msix_set_pending(dev
, vector
);
937 static int virtio_pci_set_guest_notifier(DeviceState
*d
, int n
, bool assign
,
940 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
941 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
942 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
943 VirtQueue
*vq
= virtio_get_queue(vdev
, n
);
944 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
947 int r
= event_notifier_init(notifier
, 0);
951 virtio_queue_set_guest_notifier_fd_handler(vq
, true, with_irqfd
);
953 virtio_queue_set_guest_notifier_fd_handler(vq
, false, with_irqfd
);
954 event_notifier_cleanup(notifier
);
957 if (!msix_enabled(&proxy
->pci_dev
) &&
958 vdev
->use_guest_notifier_mask
&&
959 vdc
->guest_notifier_mask
) {
960 vdc
->guest_notifier_mask(vdev
, n
, !assign
);
966 static bool virtio_pci_query_guest_notifiers(DeviceState
*d
)
968 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
969 return msix_enabled(&proxy
->pci_dev
);
972 static int virtio_pci_set_guest_notifiers(DeviceState
*d
, int nvqs
, bool assign
)
974 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
975 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
976 VirtioDeviceClass
*k
= VIRTIO_DEVICE_GET_CLASS(vdev
);
978 bool with_irqfd
= msix_enabled(&proxy
->pci_dev
) &&
979 kvm_msi_via_irqfd_enabled();
981 nvqs
= MIN(nvqs
, VIRTIO_QUEUE_MAX
);
983 /* When deassigning, pass a consistent nvqs value
984 * to avoid leaking notifiers.
986 assert(assign
|| nvqs
== proxy
->nvqs_with_notifiers
);
988 proxy
->nvqs_with_notifiers
= nvqs
;
990 /* Must unset vector notifier while guest notifier is still assigned */
991 if ((proxy
->vector_irqfd
|| k
->guest_notifier_mask
) && !assign
) {
992 msix_unset_vector_notifiers(&proxy
->pci_dev
);
993 if (proxy
->vector_irqfd
) {
994 kvm_virtio_pci_vector_release(proxy
, nvqs
);
995 g_free(proxy
->vector_irqfd
);
996 proxy
->vector_irqfd
= NULL
;
1000 for (n
= 0; n
< nvqs
; n
++) {
1001 if (!virtio_queue_get_num(vdev
, n
)) {
1005 r
= virtio_pci_set_guest_notifier(d
, n
, assign
, with_irqfd
);
1011 /* Must set vector notifier after guest notifier has been assigned */
1012 if ((with_irqfd
|| k
->guest_notifier_mask
) && assign
) {
1014 proxy
->vector_irqfd
=
1015 g_malloc0(sizeof(*proxy
->vector_irqfd
) *
1016 msix_nr_vectors_allocated(&proxy
->pci_dev
));
1017 r
= kvm_virtio_pci_vector_use(proxy
, nvqs
);
1022 r
= msix_set_vector_notifiers(&proxy
->pci_dev
,
1023 virtio_pci_vector_unmask
,
1024 virtio_pci_vector_mask
,
1025 virtio_pci_vector_poll
);
1027 goto notifiers_error
;
1036 kvm_virtio_pci_vector_release(proxy
, nvqs
);
1040 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
1043 virtio_pci_set_guest_notifier(d
, n
, !assign
, with_irqfd
);
1048 static int virtio_pci_set_host_notifier_mr(DeviceState
*d
, int n
,
1049 MemoryRegion
*mr
, bool assign
)
1051 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
1054 if (n
>= VIRTIO_QUEUE_MAX
|| !virtio_pci_modern(proxy
) ||
1055 virtio_pci_queue_mem_mult(proxy
) != memory_region_size(mr
)) {
1060 offset
= virtio_pci_queue_mem_mult(proxy
) * n
;
1061 memory_region_add_subregion_overlap(&proxy
->notify
.mr
, offset
, mr
, 1);
1063 memory_region_del_subregion(&proxy
->notify
.mr
, mr
);
1069 static void virtio_pci_vmstate_change(DeviceState
*d
, bool running
)
1071 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
1072 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1075 /* Old QEMU versions did not set bus master enable on status write.
1076 * Detect DRIVER set and enable it.
1078 if ((proxy
->flags
& VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION
) &&
1079 (vdev
->status
& VIRTIO_CONFIG_S_DRIVER
) &&
1080 !(proxy
->pci_dev
.config
[PCI_COMMAND
] & PCI_COMMAND_MASTER
)) {
1081 pci_default_write_config(&proxy
->pci_dev
, PCI_COMMAND
,
1082 proxy
->pci_dev
.config
[PCI_COMMAND
] |
1083 PCI_COMMAND_MASTER
, 1);
1085 virtio_pci_start_ioeventfd(proxy
);
1087 virtio_pci_stop_ioeventfd(proxy
);
1092 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
1095 static int virtio_pci_query_nvectors(DeviceState
*d
)
1097 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1099 return proxy
->nvectors
;
1102 static AddressSpace
*virtio_pci_get_dma_as(DeviceState
*d
)
1104 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1105 PCIDevice
*dev
= &proxy
->pci_dev
;
1107 return pci_get_address_space(dev
);
1110 static int virtio_pci_add_mem_cap(VirtIOPCIProxy
*proxy
,
1111 struct virtio_pci_cap
*cap
)
1113 PCIDevice
*dev
= &proxy
->pci_dev
;
1116 offset
= pci_add_capability(dev
, PCI_CAP_ID_VNDR
, 0,
1117 cap
->cap_len
, &error_abort
);
1119 assert(cap
->cap_len
>= sizeof *cap
);
1120 memcpy(dev
->config
+ offset
+ PCI_CAP_FLAGS
, &cap
->cap_len
,
1121 cap
->cap_len
- PCI_CAP_FLAGS
);
1126 static uint64_t virtio_pci_common_read(void *opaque
, hwaddr addr
,
1129 VirtIOPCIProxy
*proxy
= opaque
;
1130 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1135 case VIRTIO_PCI_COMMON_DFSELECT
:
1136 val
= proxy
->dfselect
;
1138 case VIRTIO_PCI_COMMON_DF
:
1139 if (proxy
->dfselect
<= 1) {
1140 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_GET_CLASS(vdev
);
1142 val
= (vdev
->host_features
& ~vdc
->legacy_features
) >>
1143 (32 * proxy
->dfselect
);
1146 case VIRTIO_PCI_COMMON_GFSELECT
:
1147 val
= proxy
->gfselect
;
1149 case VIRTIO_PCI_COMMON_GF
:
1150 if (proxy
->gfselect
< ARRAY_SIZE(proxy
->guest_features
)) {
1151 val
= proxy
->guest_features
[proxy
->gfselect
];
1154 case VIRTIO_PCI_COMMON_MSIX
:
1155 val
= vdev
->config_vector
;
1157 case VIRTIO_PCI_COMMON_NUMQ
:
1158 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; ++i
) {
1159 if (virtio_queue_get_num(vdev
, i
)) {
1164 case VIRTIO_PCI_COMMON_STATUS
:
1167 case VIRTIO_PCI_COMMON_CFGGENERATION
:
1168 val
= vdev
->generation
;
1170 case VIRTIO_PCI_COMMON_Q_SELECT
:
1171 val
= vdev
->queue_sel
;
1173 case VIRTIO_PCI_COMMON_Q_SIZE
:
1174 val
= virtio_queue_get_num(vdev
, vdev
->queue_sel
);
1176 case VIRTIO_PCI_COMMON_Q_MSIX
:
1177 val
= virtio_queue_vector(vdev
, vdev
->queue_sel
);
1179 case VIRTIO_PCI_COMMON_Q_ENABLE
:
1180 val
= proxy
->vqs
[vdev
->queue_sel
].enabled
;
1182 case VIRTIO_PCI_COMMON_Q_NOFF
:
1183 /* Simply map queues in order */
1184 val
= vdev
->queue_sel
;
1186 case VIRTIO_PCI_COMMON_Q_DESCLO
:
1187 val
= proxy
->vqs
[vdev
->queue_sel
].desc
[0];
1189 case VIRTIO_PCI_COMMON_Q_DESCHI
:
1190 val
= proxy
->vqs
[vdev
->queue_sel
].desc
[1];
1192 case VIRTIO_PCI_COMMON_Q_AVAILLO
:
1193 val
= proxy
->vqs
[vdev
->queue_sel
].avail
[0];
1195 case VIRTIO_PCI_COMMON_Q_AVAILHI
:
1196 val
= proxy
->vqs
[vdev
->queue_sel
].avail
[1];
1198 case VIRTIO_PCI_COMMON_Q_USEDLO
:
1199 val
= proxy
->vqs
[vdev
->queue_sel
].used
[0];
1201 case VIRTIO_PCI_COMMON_Q_USEDHI
:
1202 val
= proxy
->vqs
[vdev
->queue_sel
].used
[1];
1211 static void virtio_pci_common_write(void *opaque
, hwaddr addr
,
1212 uint64_t val
, unsigned size
)
1214 VirtIOPCIProxy
*proxy
= opaque
;
1215 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1218 case VIRTIO_PCI_COMMON_DFSELECT
:
1219 proxy
->dfselect
= val
;
1221 case VIRTIO_PCI_COMMON_GFSELECT
:
1222 proxy
->gfselect
= val
;
1224 case VIRTIO_PCI_COMMON_GF
:
1225 if (proxy
->gfselect
< ARRAY_SIZE(proxy
->guest_features
)) {
1226 proxy
->guest_features
[proxy
->gfselect
] = val
;
1227 virtio_set_features(vdev
,
1228 (((uint64_t)proxy
->guest_features
[1]) << 32) |
1229 proxy
->guest_features
[0]);
1232 case VIRTIO_PCI_COMMON_MSIX
:
1233 msix_vector_unuse(&proxy
->pci_dev
, vdev
->config_vector
);
1234 /* Make it possible for guest to discover an error took place. */
1235 if (msix_vector_use(&proxy
->pci_dev
, val
) < 0) {
1236 val
= VIRTIO_NO_VECTOR
;
1238 vdev
->config_vector
= val
;
1240 case VIRTIO_PCI_COMMON_STATUS
:
1241 if (!(val
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1242 virtio_pci_stop_ioeventfd(proxy
);
1245 virtio_set_status(vdev
, val
& 0xFF);
1247 if (val
& VIRTIO_CONFIG_S_DRIVER_OK
) {
1248 virtio_pci_start_ioeventfd(proxy
);
1251 if (vdev
->status
== 0) {
1252 virtio_pci_reset(DEVICE(proxy
));
1256 case VIRTIO_PCI_COMMON_Q_SELECT
:
1257 if (val
< VIRTIO_QUEUE_MAX
) {
1258 vdev
->queue_sel
= val
;
1261 case VIRTIO_PCI_COMMON_Q_SIZE
:
1262 proxy
->vqs
[vdev
->queue_sel
].num
= val
;
1263 virtio_queue_set_num(vdev
, vdev
->queue_sel
,
1264 proxy
->vqs
[vdev
->queue_sel
].num
);
1266 case VIRTIO_PCI_COMMON_Q_MSIX
:
1267 msix_vector_unuse(&proxy
->pci_dev
,
1268 virtio_queue_vector(vdev
, vdev
->queue_sel
));
1269 /* Make it possible for guest to discover an error took place. */
1270 if (msix_vector_use(&proxy
->pci_dev
, val
) < 0) {
1271 val
= VIRTIO_NO_VECTOR
;
1273 virtio_queue_set_vector(vdev
, vdev
->queue_sel
, val
);
1275 case VIRTIO_PCI_COMMON_Q_ENABLE
:
1276 virtio_queue_set_num(vdev
, vdev
->queue_sel
,
1277 proxy
->vqs
[vdev
->queue_sel
].num
);
1278 virtio_queue_set_rings(vdev
, vdev
->queue_sel
,
1279 ((uint64_t)proxy
->vqs
[vdev
->queue_sel
].desc
[1]) << 32 |
1280 proxy
->vqs
[vdev
->queue_sel
].desc
[0],
1281 ((uint64_t)proxy
->vqs
[vdev
->queue_sel
].avail
[1]) << 32 |
1282 proxy
->vqs
[vdev
->queue_sel
].avail
[0],
1283 ((uint64_t)proxy
->vqs
[vdev
->queue_sel
].used
[1]) << 32 |
1284 proxy
->vqs
[vdev
->queue_sel
].used
[0]);
1285 proxy
->vqs
[vdev
->queue_sel
].enabled
= 1;
1287 case VIRTIO_PCI_COMMON_Q_DESCLO
:
1288 proxy
->vqs
[vdev
->queue_sel
].desc
[0] = val
;
1290 case VIRTIO_PCI_COMMON_Q_DESCHI
:
1291 proxy
->vqs
[vdev
->queue_sel
].desc
[1] = val
;
1293 case VIRTIO_PCI_COMMON_Q_AVAILLO
:
1294 proxy
->vqs
[vdev
->queue_sel
].avail
[0] = val
;
1296 case VIRTIO_PCI_COMMON_Q_AVAILHI
:
1297 proxy
->vqs
[vdev
->queue_sel
].avail
[1] = val
;
1299 case VIRTIO_PCI_COMMON_Q_USEDLO
:
1300 proxy
->vqs
[vdev
->queue_sel
].used
[0] = val
;
1302 case VIRTIO_PCI_COMMON_Q_USEDHI
:
1303 proxy
->vqs
[vdev
->queue_sel
].used
[1] = val
;
1311 static uint64_t virtio_pci_notify_read(void *opaque
, hwaddr addr
,
1317 static void virtio_pci_notify_write(void *opaque
, hwaddr addr
,
1318 uint64_t val
, unsigned size
)
1320 VirtIODevice
*vdev
= opaque
;
1321 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(DEVICE(vdev
)->parent_bus
->parent
);
1322 unsigned queue
= addr
/ virtio_pci_queue_mem_mult(proxy
);
1324 if (queue
< VIRTIO_QUEUE_MAX
) {
1325 virtio_queue_notify(vdev
, queue
);
1329 static void virtio_pci_notify_write_pio(void *opaque
, hwaddr addr
,
1330 uint64_t val
, unsigned size
)
1332 VirtIODevice
*vdev
= opaque
;
1333 unsigned queue
= val
;
1335 if (queue
< VIRTIO_QUEUE_MAX
) {
1336 virtio_queue_notify(vdev
, queue
);
1340 static uint64_t virtio_pci_isr_read(void *opaque
, hwaddr addr
,
1343 VirtIOPCIProxy
*proxy
= opaque
;
1344 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1345 uint64_t val
= atomic_xchg(&vdev
->isr
, 0);
1346 pci_irq_deassert(&proxy
->pci_dev
);
1351 static void virtio_pci_isr_write(void *opaque
, hwaddr addr
,
1352 uint64_t val
, unsigned size
)
1356 static uint64_t virtio_pci_device_read(void *opaque
, hwaddr addr
,
1359 VirtIODevice
*vdev
= opaque
;
1364 val
= virtio_config_modern_readb(vdev
, addr
);
1367 val
= virtio_config_modern_readw(vdev
, addr
);
1370 val
= virtio_config_modern_readl(vdev
, addr
);
1376 static void virtio_pci_device_write(void *opaque
, hwaddr addr
,
1377 uint64_t val
, unsigned size
)
1379 VirtIODevice
*vdev
= opaque
;
1382 virtio_config_modern_writeb(vdev
, addr
, val
);
1385 virtio_config_modern_writew(vdev
, addr
, val
);
1388 virtio_config_modern_writel(vdev
, addr
, val
);
1393 static void virtio_pci_modern_regions_init(VirtIOPCIProxy
*proxy
)
1395 static const MemoryRegionOps common_ops
= {
1396 .read
= virtio_pci_common_read
,
1397 .write
= virtio_pci_common_write
,
1399 .min_access_size
= 1,
1400 .max_access_size
= 4,
1402 .endianness
= DEVICE_LITTLE_ENDIAN
,
1404 static const MemoryRegionOps isr_ops
= {
1405 .read
= virtio_pci_isr_read
,
1406 .write
= virtio_pci_isr_write
,
1408 .min_access_size
= 1,
1409 .max_access_size
= 4,
1411 .endianness
= DEVICE_LITTLE_ENDIAN
,
1413 static const MemoryRegionOps device_ops
= {
1414 .read
= virtio_pci_device_read
,
1415 .write
= virtio_pci_device_write
,
1417 .min_access_size
= 1,
1418 .max_access_size
= 4,
1420 .endianness
= DEVICE_LITTLE_ENDIAN
,
1422 static const MemoryRegionOps notify_ops
= {
1423 .read
= virtio_pci_notify_read
,
1424 .write
= virtio_pci_notify_write
,
1426 .min_access_size
= 1,
1427 .max_access_size
= 4,
1429 .endianness
= DEVICE_LITTLE_ENDIAN
,
1431 static const MemoryRegionOps notify_pio_ops
= {
1432 .read
= virtio_pci_notify_read
,
1433 .write
= virtio_pci_notify_write_pio
,
1435 .min_access_size
= 1,
1436 .max_access_size
= 4,
1438 .endianness
= DEVICE_LITTLE_ENDIAN
,
1442 memory_region_init_io(&proxy
->common
.mr
, OBJECT(proxy
),
1445 "virtio-pci-common",
1446 proxy
->common
.size
);
1448 memory_region_init_io(&proxy
->isr
.mr
, OBJECT(proxy
),
1454 memory_region_init_io(&proxy
->device
.mr
, OBJECT(proxy
),
1456 virtio_bus_get_device(&proxy
->bus
),
1457 "virtio-pci-device",
1458 proxy
->device
.size
);
1460 memory_region_init_io(&proxy
->notify
.mr
, OBJECT(proxy
),
1462 virtio_bus_get_device(&proxy
->bus
),
1463 "virtio-pci-notify",
1464 proxy
->notify
.size
);
1466 memory_region_init_io(&proxy
->notify_pio
.mr
, OBJECT(proxy
),
1468 virtio_bus_get_device(&proxy
->bus
),
1469 "virtio-pci-notify-pio",
1470 proxy
->notify_pio
.size
);
1473 static void virtio_pci_modern_region_map(VirtIOPCIProxy
*proxy
,
1474 VirtIOPCIRegion
*region
,
1475 struct virtio_pci_cap
*cap
,
1479 memory_region_add_subregion(mr
, region
->offset
, ®ion
->mr
);
1481 cap
->cfg_type
= region
->type
;
1483 cap
->offset
= cpu_to_le32(region
->offset
);
1484 cap
->length
= cpu_to_le32(region
->size
);
1485 virtio_pci_add_mem_cap(proxy
, cap
);
1489 static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy
*proxy
,
1490 VirtIOPCIRegion
*region
,
1491 struct virtio_pci_cap
*cap
)
1493 virtio_pci_modern_region_map(proxy
, region
, cap
,
1494 &proxy
->modern_bar
, proxy
->modern_mem_bar_idx
);
1497 static void virtio_pci_modern_io_region_map(VirtIOPCIProxy
*proxy
,
1498 VirtIOPCIRegion
*region
,
1499 struct virtio_pci_cap
*cap
)
1501 virtio_pci_modern_region_map(proxy
, region
, cap
,
1502 &proxy
->io_bar
, proxy
->modern_io_bar_idx
);
1505 static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy
*proxy
,
1506 VirtIOPCIRegion
*region
)
1508 memory_region_del_subregion(&proxy
->modern_bar
,
1512 static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy
*proxy
,
1513 VirtIOPCIRegion
*region
)
1515 memory_region_del_subregion(&proxy
->io_bar
,
1519 static void virtio_pci_pre_plugged(DeviceState
*d
, Error
**errp
)
1521 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1522 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1524 if (virtio_pci_modern(proxy
)) {
1525 virtio_add_feature(&vdev
->host_features
, VIRTIO_F_VERSION_1
);
1528 virtio_add_feature(&vdev
->host_features
, VIRTIO_F_BAD_FEATURE
);
1531 /* This is called by virtio-bus just after the device is plugged. */
1532 static void virtio_pci_device_plugged(DeviceState
*d
, Error
**errp
)
1534 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1535 VirtioBusState
*bus
= &proxy
->bus
;
1536 bool legacy
= virtio_pci_legacy(proxy
);
1538 bool modern_pio
= proxy
->flags
& VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY
;
1541 VirtIODevice
*vdev
= virtio_bus_get_device(&proxy
->bus
);
1544 * Virtio capabilities present without
1545 * VIRTIO_F_VERSION_1 confuses guests
1547 if (!proxy
->ignore_backend_features
&&
1548 !virtio_has_feature(vdev
->host_features
, VIRTIO_F_VERSION_1
)) {
1549 virtio_pci_disable_modern(proxy
);
1552 error_setg(errp
, "Device doesn't support modern mode, and legacy"
1553 " mode is disabled");
1554 error_append_hint(errp
, "Set disable-legacy to off\n");
1560 modern
= virtio_pci_modern(proxy
);
1562 config
= proxy
->pci_dev
.config
;
1563 if (proxy
->class_code
) {
1564 pci_config_set_class(config
, proxy
->class_code
);
1568 if (virtio_host_has_feature(vdev
, VIRTIO_F_IOMMU_PLATFORM
)) {
1569 error_setg(errp
, "VIRTIO_F_IOMMU_PLATFORM was supported by"
1570 " neither legacy nor transitional device");
1574 * Legacy and transitional devices use specific subsystem IDs.
1575 * Note that the subsystem vendor ID (config + PCI_SUBSYSTEM_VENDOR_ID)
1576 * is set to PCI_SUBVENDOR_ID_REDHAT_QUMRANET by default.
1578 pci_set_word(config
+ PCI_SUBSYSTEM_ID
, virtio_bus_get_vdev_id(bus
));
1580 /* pure virtio-1.0 */
1581 pci_set_word(config
+ PCI_VENDOR_ID
,
1582 PCI_VENDOR_ID_REDHAT_QUMRANET
);
1583 pci_set_word(config
+ PCI_DEVICE_ID
,
1584 0x1040 + virtio_bus_get_vdev_id(bus
));
1585 pci_config_set_revision(config
, 1);
1587 config
[PCI_INTERRUPT_PIN
] = 1;
1591 struct virtio_pci_cap cap
= {
1592 .cap_len
= sizeof cap
,
1594 struct virtio_pci_notify_cap notify
= {
1595 .cap
.cap_len
= sizeof notify
,
1596 .notify_off_multiplier
=
1597 cpu_to_le32(virtio_pci_queue_mem_mult(proxy
)),
1599 struct virtio_pci_cfg_cap cfg
= {
1600 .cap
.cap_len
= sizeof cfg
,
1601 .cap
.cfg_type
= VIRTIO_PCI_CAP_PCI_CFG
,
1603 struct virtio_pci_notify_cap notify_pio
= {
1604 .cap
.cap_len
= sizeof notify
,
1605 .notify_off_multiplier
= cpu_to_le32(0x0),
1608 struct virtio_pci_cfg_cap
*cfg_mask
;
1610 virtio_pci_modern_regions_init(proxy
);
1612 virtio_pci_modern_mem_region_map(proxy
, &proxy
->common
, &cap
);
1613 virtio_pci_modern_mem_region_map(proxy
, &proxy
->isr
, &cap
);
1614 virtio_pci_modern_mem_region_map(proxy
, &proxy
->device
, &cap
);
1615 virtio_pci_modern_mem_region_map(proxy
, &proxy
->notify
, ¬ify
.cap
);
1618 memory_region_init(&proxy
->io_bar
, OBJECT(proxy
),
1619 "virtio-pci-io", 0x4);
1621 pci_register_bar(&proxy
->pci_dev
, proxy
->modern_io_bar_idx
,
1622 PCI_BASE_ADDRESS_SPACE_IO
, &proxy
->io_bar
);
1624 virtio_pci_modern_io_region_map(proxy
, &proxy
->notify_pio
,
1628 pci_register_bar(&proxy
->pci_dev
, proxy
->modern_mem_bar_idx
,
1629 PCI_BASE_ADDRESS_SPACE_MEMORY
|
1630 PCI_BASE_ADDRESS_MEM_PREFETCH
|
1631 PCI_BASE_ADDRESS_MEM_TYPE_64
,
1632 &proxy
->modern_bar
);
1634 proxy
->config_cap
= virtio_pci_add_mem_cap(proxy
, &cfg
.cap
);
1635 cfg_mask
= (void *)(proxy
->pci_dev
.wmask
+ proxy
->config_cap
);
1636 pci_set_byte(&cfg_mask
->cap
.bar
, ~0x0);
1637 pci_set_long((uint8_t *)&cfg_mask
->cap
.offset
, ~0x0);
1638 pci_set_long((uint8_t *)&cfg_mask
->cap
.length
, ~0x0);
1639 pci_set_long(cfg_mask
->pci_cfg_data
, ~0x0);
1642 if (proxy
->nvectors
) {
1643 int err
= msix_init_exclusive_bar(&proxy
->pci_dev
, proxy
->nvectors
,
1644 proxy
->msix_bar_idx
, NULL
);
1646 /* Notice when a system that supports MSIx can't initialize it */
1647 if (err
!= -ENOTSUP
) {
1648 warn_report("unable to init msix vectors to %" PRIu32
,
1651 proxy
->nvectors
= 0;
1655 proxy
->pci_dev
.config_write
= virtio_write_config
;
1656 proxy
->pci_dev
.config_read
= virtio_read_config
;
1659 size
= VIRTIO_PCI_REGION_SIZE(&proxy
->pci_dev
)
1660 + virtio_bus_get_vdev_config_len(bus
);
1661 size
= pow2ceil(size
);
1663 memory_region_init_io(&proxy
->bar
, OBJECT(proxy
),
1664 &virtio_pci_config_ops
,
1665 proxy
, "virtio-pci", size
);
1667 pci_register_bar(&proxy
->pci_dev
, proxy
->legacy_io_bar_idx
,
1668 PCI_BASE_ADDRESS_SPACE_IO
, &proxy
->bar
);
1672 static void virtio_pci_device_unplugged(DeviceState
*d
)
1674 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1675 bool modern
= virtio_pci_modern(proxy
);
1676 bool modern_pio
= proxy
->flags
& VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY
;
1678 virtio_pci_stop_ioeventfd(proxy
);
1681 virtio_pci_modern_mem_region_unmap(proxy
, &proxy
->common
);
1682 virtio_pci_modern_mem_region_unmap(proxy
, &proxy
->isr
);
1683 virtio_pci_modern_mem_region_unmap(proxy
, &proxy
->device
);
1684 virtio_pci_modern_mem_region_unmap(proxy
, &proxy
->notify
);
1686 virtio_pci_modern_io_region_unmap(proxy
, &proxy
->notify_pio
);
1691 static void virtio_pci_realize(PCIDevice
*pci_dev
, Error
**errp
)
1693 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(pci_dev
);
1694 VirtioPCIClass
*k
= VIRTIO_PCI_GET_CLASS(pci_dev
);
1695 bool pcie_port
= pci_bus_is_express(pci_get_bus(pci_dev
)) &&
1696 !pci_bus_is_root(pci_get_bus(pci_dev
));
1698 if (kvm_enabled() && !kvm_has_many_ioeventfds()) {
1699 proxy
->flags
&= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD
;
1703 * virtio pci bar layout used by default.
1704 * subclasses can re-arrange things if needed.
1706 * region 0 -- virtio legacy io bar
1707 * region 1 -- msi-x bar
1708 * region 4+5 -- virtio modern memory (64bit) bar
1711 proxy
->legacy_io_bar_idx
= 0;
1712 proxy
->msix_bar_idx
= 1;
1713 proxy
->modern_io_bar_idx
= 2;
1714 proxy
->modern_mem_bar_idx
= 4;
1716 proxy
->common
.offset
= 0x0;
1717 proxy
->common
.size
= 0x1000;
1718 proxy
->common
.type
= VIRTIO_PCI_CAP_COMMON_CFG
;
1720 proxy
->isr
.offset
= 0x1000;
1721 proxy
->isr
.size
= 0x1000;
1722 proxy
->isr
.type
= VIRTIO_PCI_CAP_ISR_CFG
;
1724 proxy
->device
.offset
= 0x2000;
1725 proxy
->device
.size
= 0x1000;
1726 proxy
->device
.type
= VIRTIO_PCI_CAP_DEVICE_CFG
;
1728 proxy
->notify
.offset
= 0x3000;
1729 proxy
->notify
.size
= virtio_pci_queue_mem_mult(proxy
) * VIRTIO_QUEUE_MAX
;
1730 proxy
->notify
.type
= VIRTIO_PCI_CAP_NOTIFY_CFG
;
1732 proxy
->notify_pio
.offset
= 0x0;
1733 proxy
->notify_pio
.size
= 0x4;
1734 proxy
->notify_pio
.type
= VIRTIO_PCI_CAP_NOTIFY_CFG
;
1736 /* subclasses can enforce modern, so do this unconditionally */
1737 memory_region_init(&proxy
->modern_bar
, OBJECT(proxy
), "virtio-pci",
1738 /* PCI BAR regions must be powers of 2 */
1739 pow2ceil(proxy
->notify
.offset
+ proxy
->notify
.size
));
1741 if (proxy
->disable_legacy
== ON_OFF_AUTO_AUTO
) {
1742 proxy
->disable_legacy
= pcie_port
? ON_OFF_AUTO_ON
: ON_OFF_AUTO_OFF
;
1745 if (!virtio_pci_modern(proxy
) && !virtio_pci_legacy(proxy
)) {
1746 error_setg(errp
, "device cannot work as neither modern nor legacy mode"
1748 error_append_hint(errp
, "Set either disable-modern or disable-legacy"
1753 if (pcie_port
&& pci_is_express(pci_dev
)) {
1756 pos
= pcie_endpoint_cap_init(pci_dev
, 0);
1759 pos
= pci_add_capability(pci_dev
, PCI_CAP_ID_PM
, 0,
1760 PCI_PM_SIZEOF
, errp
);
1765 pci_dev
->exp
.pm_cap
= pos
;
1768 * Indicates that this function complies with revision 1.2 of the
1769 * PCI Power Management Interface Specification.
1771 pci_set_word(pci_dev
->config
+ pos
+ PCI_PM_PMC
, 0x3);
1773 if (proxy
->flags
& VIRTIO_PCI_FLAG_INIT_DEVERR
) {
1774 /* Init error enabling flags */
1775 pcie_cap_deverr_init(pci_dev
);
1778 if (proxy
->flags
& VIRTIO_PCI_FLAG_INIT_LNKCTL
) {
1779 /* Init Link Control Register */
1780 pcie_cap_lnkctl_init(pci_dev
);
1783 if (proxy
->flags
& VIRTIO_PCI_FLAG_INIT_PM
) {
1784 /* Init Power Management Control Register */
1785 pci_set_word(pci_dev
->wmask
+ pos
+ PCI_PM_CTRL
,
1786 PCI_PM_CTRL_STATE_MASK
);
1789 if (proxy
->flags
& VIRTIO_PCI_FLAG_ATS
) {
1790 pcie_ats_init(pci_dev
, 256);
1793 if (proxy
->flags
& VIRTIO_PCI_FLAG_INIT_FLR
) {
1794 /* Set Function Level Reset capability bit */
1795 pcie_cap_flr_init(pci_dev
);
1799 * make future invocations of pci_is_express() return false
1800 * and pci_config_size() return PCI_CONFIG_SPACE_SIZE.
1802 pci_dev
->cap_present
&= ~QEMU_PCI_CAP_EXPRESS
;
1805 virtio_pci_bus_new(&proxy
->bus
, sizeof(proxy
->bus
), proxy
);
1807 k
->realize(proxy
, errp
);
1811 static void virtio_pci_exit(PCIDevice
*pci_dev
)
1813 msix_uninit_exclusive_bar(pci_dev
);
1816 static void virtio_pci_reset(DeviceState
*qdev
)
1818 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(qdev
);
1819 VirtioBusState
*bus
= VIRTIO_BUS(&proxy
->bus
);
1820 PCIDevice
*dev
= PCI_DEVICE(qdev
);
1823 virtio_pci_stop_ioeventfd(proxy
);
1824 virtio_bus_reset(bus
);
1825 msix_unuse_all_vectors(&proxy
->pci_dev
);
1827 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
1828 proxy
->vqs
[i
].enabled
= 0;
1829 proxy
->vqs
[i
].num
= 0;
1830 proxy
->vqs
[i
].desc
[0] = proxy
->vqs
[i
].desc
[1] = 0;
1831 proxy
->vqs
[i
].avail
[0] = proxy
->vqs
[i
].avail
[1] = 0;
1832 proxy
->vqs
[i
].used
[0] = proxy
->vqs
[i
].used
[1] = 0;
1835 if (pci_is_express(dev
)) {
1836 pcie_cap_deverr_reset(dev
);
1837 pcie_cap_lnkctl_reset(dev
);
1839 pci_set_word(dev
->config
+ dev
->exp
.pm_cap
+ PCI_PM_CTRL
, 0);
1843 static Property virtio_pci_properties
[] = {
1844 DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy
, flags
,
1845 VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT
, false),
1846 DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy
, flags
,
1847 VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT
, true),
1848 DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy
, flags
,
1849 VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT
, false),
1850 DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy
, flags
,
1851 VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT
, false),
1852 DEFINE_PROP_BIT("page-per-vq", VirtIOPCIProxy
, flags
,
1853 VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT
, false),
1854 DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy
,
1855 ignore_backend_features
, false),
1856 DEFINE_PROP_BIT("ats", VirtIOPCIProxy
, flags
,
1857 VIRTIO_PCI_FLAG_ATS_BIT
, false),
1858 DEFINE_PROP_BIT("x-pcie-deverr-init", VirtIOPCIProxy
, flags
,
1859 VIRTIO_PCI_FLAG_INIT_DEVERR_BIT
, true),
1860 DEFINE_PROP_BIT("x-pcie-lnkctl-init", VirtIOPCIProxy
, flags
,
1861 VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT
, true),
1862 DEFINE_PROP_BIT("x-pcie-pm-init", VirtIOPCIProxy
, flags
,
1863 VIRTIO_PCI_FLAG_INIT_PM_BIT
, true),
1864 DEFINE_PROP_BIT("x-pcie-flr-init", VirtIOPCIProxy
, flags
,
1865 VIRTIO_PCI_FLAG_INIT_FLR_BIT
, true),
1866 DEFINE_PROP_END_OF_LIST(),
1869 static void virtio_pci_dc_realize(DeviceState
*qdev
, Error
**errp
)
1871 VirtioPCIClass
*vpciklass
= VIRTIO_PCI_GET_CLASS(qdev
);
1872 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(qdev
);
1873 PCIDevice
*pci_dev
= &proxy
->pci_dev
;
1875 if (!(proxy
->flags
& VIRTIO_PCI_FLAG_DISABLE_PCIE
) &&
1876 virtio_pci_modern(proxy
)) {
1877 pci_dev
->cap_present
|= QEMU_PCI_CAP_EXPRESS
;
1880 vpciklass
->parent_dc_realize(qdev
, errp
);
1883 static void virtio_pci_class_init(ObjectClass
*klass
, void *data
)
1885 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1886 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
1887 VirtioPCIClass
*vpciklass
= VIRTIO_PCI_CLASS(klass
);
1889 device_class_set_props(dc
, virtio_pci_properties
);
1890 k
->realize
= virtio_pci_realize
;
1891 k
->exit
= virtio_pci_exit
;
1892 k
->vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
;
1893 k
->revision
= VIRTIO_PCI_ABI_VERSION
;
1894 k
->class_id
= PCI_CLASS_OTHERS
;
1895 device_class_set_parent_realize(dc
, virtio_pci_dc_realize
,
1896 &vpciklass
->parent_dc_realize
);
1897 dc
->reset
= virtio_pci_reset
;
1900 static const TypeInfo virtio_pci_info
= {
1901 .name
= TYPE_VIRTIO_PCI
,
1902 .parent
= TYPE_PCI_DEVICE
,
1903 .instance_size
= sizeof(VirtIOPCIProxy
),
1904 .class_init
= virtio_pci_class_init
,
1905 .class_size
= sizeof(VirtioPCIClass
),
1909 static Property virtio_pci_generic_properties
[] = {
1910 DEFINE_PROP_ON_OFF_AUTO("disable-legacy", VirtIOPCIProxy
, disable_legacy
,
1912 DEFINE_PROP_BOOL("disable-modern", VirtIOPCIProxy
, disable_modern
, false),
1913 DEFINE_PROP_END_OF_LIST(),
1916 static void virtio_pci_base_class_init(ObjectClass
*klass
, void *data
)
1918 const VirtioPCIDeviceTypeInfo
*t
= data
;
1919 if (t
->class_init
) {
1920 t
->class_init(klass
, NULL
);
1924 static void virtio_pci_generic_class_init(ObjectClass
*klass
, void *data
)
1926 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1928 device_class_set_props(dc
, virtio_pci_generic_properties
);
1931 static void virtio_pci_transitional_instance_init(Object
*obj
)
1933 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(obj
);
1935 proxy
->disable_legacy
= ON_OFF_AUTO_OFF
;
1936 proxy
->disable_modern
= false;
1939 static void virtio_pci_non_transitional_instance_init(Object
*obj
)
1941 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(obj
);
1943 proxy
->disable_legacy
= ON_OFF_AUTO_ON
;
1944 proxy
->disable_modern
= false;
1947 void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo
*t
)
1949 char *base_name
= NULL
;
1950 TypeInfo base_type_info
= {
1951 .name
= t
->base_name
,
1952 .parent
= t
->parent
? t
->parent
: TYPE_VIRTIO_PCI
,
1953 .instance_size
= t
->instance_size
,
1954 .instance_init
= t
->instance_init
,
1955 .class_size
= t
->class_size
,
1957 .interfaces
= t
->interfaces
,
1959 TypeInfo generic_type_info
= {
1960 .name
= t
->generic_name
,
1961 .parent
= base_type_info
.name
,
1962 .class_init
= virtio_pci_generic_class_init
,
1963 .interfaces
= (InterfaceInfo
[]) {
1964 { INTERFACE_PCIE_DEVICE
},
1965 { INTERFACE_CONVENTIONAL_PCI_DEVICE
},
1970 if (!base_type_info
.name
) {
1971 /* No base type -> register a single generic device type */
1972 /* use intermediate %s-base-type to add generic device props */
1973 base_name
= g_strdup_printf("%s-base-type", t
->generic_name
);
1974 base_type_info
.name
= base_name
;
1975 base_type_info
.class_init
= virtio_pci_generic_class_init
;
1977 generic_type_info
.parent
= base_name
;
1978 generic_type_info
.class_init
= virtio_pci_base_class_init
;
1979 generic_type_info
.class_data
= (void *)t
;
1981 assert(!t
->non_transitional_name
);
1982 assert(!t
->transitional_name
);
1984 base_type_info
.class_init
= virtio_pci_base_class_init
;
1985 base_type_info
.class_data
= (void *)t
;
1988 type_register(&base_type_info
);
1989 if (generic_type_info
.name
) {
1990 type_register(&generic_type_info
);
1993 if (t
->non_transitional_name
) {
1994 const TypeInfo non_transitional_type_info
= {
1995 .name
= t
->non_transitional_name
,
1996 .parent
= base_type_info
.name
,
1997 .instance_init
= virtio_pci_non_transitional_instance_init
,
1998 .interfaces
= (InterfaceInfo
[]) {
1999 { INTERFACE_PCIE_DEVICE
},
2000 { INTERFACE_CONVENTIONAL_PCI_DEVICE
},
2004 type_register(&non_transitional_type_info
);
2007 if (t
->transitional_name
) {
2008 const TypeInfo transitional_type_info
= {
2009 .name
= t
->transitional_name
,
2010 .parent
= base_type_info
.name
,
2011 .instance_init
= virtio_pci_transitional_instance_init
,
2012 .interfaces
= (InterfaceInfo
[]) {
2014 * Transitional virtio devices work only as Conventional PCI
2015 * devices because they require PIO ports.
2017 { INTERFACE_CONVENTIONAL_PCI_DEVICE
},
2021 type_register(&transitional_type_info
);
2026 /* virtio-pci-bus */
2028 static void virtio_pci_bus_new(VirtioBusState
*bus
, size_t bus_size
,
2029 VirtIOPCIProxy
*dev
)
2031 DeviceState
*qdev
= DEVICE(dev
);
2032 char virtio_bus_name
[] = "virtio-bus";
2034 qbus_create_inplace(bus
, bus_size
, TYPE_VIRTIO_PCI_BUS
, qdev
,
2038 static void virtio_pci_bus_class_init(ObjectClass
*klass
, void *data
)
2040 BusClass
*bus_class
= BUS_CLASS(klass
);
2041 VirtioBusClass
*k
= VIRTIO_BUS_CLASS(klass
);
2042 bus_class
->max_dev
= 1;
2043 k
->notify
= virtio_pci_notify
;
2044 k
->save_config
= virtio_pci_save_config
;
2045 k
->load_config
= virtio_pci_load_config
;
2046 k
->save_queue
= virtio_pci_save_queue
;
2047 k
->load_queue
= virtio_pci_load_queue
;
2048 k
->save_extra_state
= virtio_pci_save_extra_state
;
2049 k
->load_extra_state
= virtio_pci_load_extra_state
;
2050 k
->has_extra_state
= virtio_pci_has_extra_state
;
2051 k
->query_guest_notifiers
= virtio_pci_query_guest_notifiers
;
2052 k
->set_guest_notifiers
= virtio_pci_set_guest_notifiers
;
2053 k
->set_host_notifier_mr
= virtio_pci_set_host_notifier_mr
;
2054 k
->vmstate_change
= virtio_pci_vmstate_change
;
2055 k
->pre_plugged
= virtio_pci_pre_plugged
;
2056 k
->device_plugged
= virtio_pci_device_plugged
;
2057 k
->device_unplugged
= virtio_pci_device_unplugged
;
2058 k
->query_nvectors
= virtio_pci_query_nvectors
;
2059 k
->ioeventfd_enabled
= virtio_pci_ioeventfd_enabled
;
2060 k
->ioeventfd_assign
= virtio_pci_ioeventfd_assign
;
2061 k
->get_dma_as
= virtio_pci_get_dma_as
;
2064 static const TypeInfo virtio_pci_bus_info
= {
2065 .name
= TYPE_VIRTIO_PCI_BUS
,
2066 .parent
= TYPE_VIRTIO_BUS
,
2067 .instance_size
= sizeof(VirtioPCIBusState
),
2068 .class_init
= virtio_pci_bus_class_init
,
2071 static void virtio_pci_register_types(void)
2074 type_register_static(&virtio_pci_bus_info
);
2075 type_register_static(&virtio_pci_info
);
2078 type_init(virtio_pci_register_types
)