4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2009 CodeSourcery
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paul Brook <paul@codesourcery.com>
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
20 #include "hw/virtio.h"
21 #include "hw/virtio-blk.h"
22 #include "hw/virtio-net.h"
23 #include "hw/virtio-serial.h"
24 #include "hw/virtio-scsi.h"
25 #include "hw/pci/pci.h"
26 #include "qemu/error-report.h"
27 #include "hw/pci/msi.h"
28 #include "hw/pci/msix.h"
29 #include "hw/loader.h"
30 #include "sysemu/kvm.h"
31 #include "sysemu/blockdev.h"
32 #include "hw/virtio-pci.h"
33 #include "qemu/range.h"
34 #include "hw/virtio-bus.h"
36 /* from Linux's linux/virtio_pci.h */
38 /* A 32-bit r/o bitmask of the features supported by the host */
39 #define VIRTIO_PCI_HOST_FEATURES 0
41 /* A 32-bit r/w bitmask of features activated by the guest */
42 #define VIRTIO_PCI_GUEST_FEATURES 4
44 /* A 32-bit r/w PFN for the currently selected queue */
45 #define VIRTIO_PCI_QUEUE_PFN 8
47 /* A 16-bit r/o queue size for the currently selected queue */
48 #define VIRTIO_PCI_QUEUE_NUM 12
50 /* A 16-bit r/w queue selector */
51 #define VIRTIO_PCI_QUEUE_SEL 14
53 /* A 16-bit r/w queue notifier */
54 #define VIRTIO_PCI_QUEUE_NOTIFY 16
56 /* An 8-bit device status register. */
57 #define VIRTIO_PCI_STATUS 18
59 /* An 8-bit r/o interrupt status register. Reading the value will return the
60 * current contents of the ISR and will also clear it. This is effectively
61 * a read-and-acknowledge. */
62 #define VIRTIO_PCI_ISR 19
64 /* MSI-X registers: only enabled if MSI-X is enabled. */
65 /* A 16-bit vector for configuration changes. */
66 #define VIRTIO_MSI_CONFIG_VECTOR 20
67 /* A 16-bit vector for selected queue notifications. */
68 #define VIRTIO_MSI_QUEUE_VECTOR 22
70 /* Config space size */
71 #define VIRTIO_PCI_CONFIG_NOMSI 20
72 #define VIRTIO_PCI_CONFIG_MSI 24
73 #define VIRTIO_PCI_REGION_SIZE(dev) (msix_present(dev) ? \
74 VIRTIO_PCI_CONFIG_MSI : \
75 VIRTIO_PCI_CONFIG_NOMSI)
77 /* The remaining space is defined by each driver as the per-driver
78 * configuration space */
79 #define VIRTIO_PCI_CONFIG(dev) (msix_enabled(dev) ? \
80 VIRTIO_PCI_CONFIG_MSI : \
81 VIRTIO_PCI_CONFIG_NOMSI)
83 /* How many bits to shift physical queue address written to QUEUE_PFN.
84 * 12 is historical, and due to x86 page size. */
85 #define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
87 /* Flags track per-device state like workarounds for quirks in older guests. */
88 #define VIRTIO_PCI_FLAG_BUS_MASTER_BUG (1 << 0)
90 /* QEMU doesn't strictly need write barriers since everything runs in
91 * lock-step. We'll leave the calls to wmb() in though to make it obvious for
92 * KVM or if kqemu gets SMP support.
94 #define wmb() do { } while (0)
96 /* HACK for virtio to determine if it's running a big endian guest */
97 bool virtio_is_big_endian(void);
100 /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
101 static inline VirtIOPCIProxy
*to_virtio_pci_proxy(DeviceState
*d
)
103 return container_of(d
, VirtIOPCIProxy
, pci_dev
.qdev
);
106 /* DeviceState to VirtIOPCIProxy. Note: used on datapath,
107 * be careful and test performance if you change this.
109 static inline VirtIOPCIProxy
*to_virtio_pci_proxy_fast(DeviceState
*d
)
111 return container_of(d
, VirtIOPCIProxy
, pci_dev
.qdev
);
114 static void virtio_pci_notify(DeviceState
*d
, uint16_t vector
)
116 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy_fast(d
);
117 if (msix_enabled(&proxy
->pci_dev
))
118 msix_notify(&proxy
->pci_dev
, vector
);
120 qemu_set_irq(proxy
->pci_dev
.irq
[0], proxy
->vdev
->isr
& 1);
123 static void virtio_pci_save_config(DeviceState
*d
, QEMUFile
*f
)
125 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
126 pci_device_save(&proxy
->pci_dev
, f
);
127 msix_save(&proxy
->pci_dev
, f
);
128 if (msix_present(&proxy
->pci_dev
))
129 qemu_put_be16(f
, proxy
->vdev
->config_vector
);
132 static void virtio_pci_save_queue(DeviceState
*d
, int n
, QEMUFile
*f
)
134 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
135 if (msix_present(&proxy
->pci_dev
))
136 qemu_put_be16(f
, virtio_queue_vector(proxy
->vdev
, n
));
139 static int virtio_pci_load_config(DeviceState
*d
, QEMUFile
*f
)
141 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
143 ret
= pci_device_load(&proxy
->pci_dev
, f
);
147 msix_unuse_all_vectors(&proxy
->pci_dev
);
148 msix_load(&proxy
->pci_dev
, f
);
149 if (msix_present(&proxy
->pci_dev
)) {
150 qemu_get_be16s(f
, &proxy
->vdev
->config_vector
);
152 proxy
->vdev
->config_vector
= VIRTIO_NO_VECTOR
;
154 if (proxy
->vdev
->config_vector
!= VIRTIO_NO_VECTOR
) {
155 return msix_vector_use(&proxy
->pci_dev
, proxy
->vdev
->config_vector
);
160 static int virtio_pci_load_queue(DeviceState
*d
, int n
, QEMUFile
*f
)
162 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
164 if (msix_present(&proxy
->pci_dev
)) {
165 qemu_get_be16s(f
, &vector
);
167 vector
= VIRTIO_NO_VECTOR
;
169 virtio_queue_set_vector(proxy
->vdev
, n
, vector
);
170 if (vector
!= VIRTIO_NO_VECTOR
) {
171 return msix_vector_use(&proxy
->pci_dev
, vector
);
176 static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy
*proxy
,
177 int n
, bool assign
, bool set_handler
)
179 VirtQueue
*vq
= virtio_get_queue(proxy
->vdev
, n
);
180 EventNotifier
*notifier
= virtio_queue_get_host_notifier(vq
);
184 r
= event_notifier_init(notifier
, 1);
186 error_report("%s: unable to init event notifier: %d",
190 virtio_queue_set_host_notifier_fd_handler(vq
, true, set_handler
);
191 memory_region_add_eventfd(&proxy
->bar
, VIRTIO_PCI_QUEUE_NOTIFY
, 2,
194 memory_region_del_eventfd(&proxy
->bar
, VIRTIO_PCI_QUEUE_NOTIFY
, 2,
196 virtio_queue_set_host_notifier_fd_handler(vq
, false, false);
197 event_notifier_cleanup(notifier
);
202 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy
*proxy
)
206 if (!(proxy
->flags
& VIRTIO_PCI_FLAG_USE_IOEVENTFD
) ||
207 proxy
->ioeventfd_disabled
||
208 proxy
->ioeventfd_started
) {
212 for (n
= 0; n
< VIRTIO_PCI_QUEUE_MAX
; n
++) {
213 if (!virtio_queue_get_num(proxy
->vdev
, n
)) {
217 r
= virtio_pci_set_host_notifier_internal(proxy
, n
, true, true);
222 proxy
->ioeventfd_started
= true;
227 if (!virtio_queue_get_num(proxy
->vdev
, n
)) {
231 r
= virtio_pci_set_host_notifier_internal(proxy
, n
, false, false);
234 proxy
->ioeventfd_started
= false;
235 error_report("%s: failed. Fallback to a userspace (slower).", __func__
);
238 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy
*proxy
)
243 if (!proxy
->ioeventfd_started
) {
247 for (n
= 0; n
< VIRTIO_PCI_QUEUE_MAX
; n
++) {
248 if (!virtio_queue_get_num(proxy
->vdev
, n
)) {
252 r
= virtio_pci_set_host_notifier_internal(proxy
, n
, false, false);
255 proxy
->ioeventfd_started
= false;
258 static void virtio_pci_reset(DeviceState
*d
)
260 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
261 virtio_pci_stop_ioeventfd(proxy
);
262 virtio_reset(proxy
->vdev
);
263 msix_unuse_all_vectors(&proxy
->pci_dev
);
264 proxy
->flags
&= ~VIRTIO_PCI_FLAG_BUS_MASTER_BUG
;
267 static void virtio_ioport_write(void *opaque
, uint32_t addr
, uint32_t val
)
269 VirtIOPCIProxy
*proxy
= opaque
;
270 VirtIODevice
*vdev
= proxy
->vdev
;
274 case VIRTIO_PCI_GUEST_FEATURES
:
275 /* Guest does not negotiate properly? We have to assume nothing. */
276 if (val
& (1 << VIRTIO_F_BAD_FEATURE
)) {
277 val
= vdev
->bad_features
? vdev
->bad_features(vdev
) : 0;
279 virtio_set_features(vdev
, val
);
281 case VIRTIO_PCI_QUEUE_PFN
:
282 pa
= (hwaddr
)val
<< VIRTIO_PCI_QUEUE_ADDR_SHIFT
;
284 virtio_pci_stop_ioeventfd(proxy
);
285 virtio_reset(proxy
->vdev
);
286 msix_unuse_all_vectors(&proxy
->pci_dev
);
289 virtio_queue_set_addr(vdev
, vdev
->queue_sel
, pa
);
291 case VIRTIO_PCI_QUEUE_SEL
:
292 if (val
< VIRTIO_PCI_QUEUE_MAX
)
293 vdev
->queue_sel
= val
;
295 case VIRTIO_PCI_QUEUE_NOTIFY
:
296 if (val
< VIRTIO_PCI_QUEUE_MAX
) {
297 virtio_queue_notify(vdev
, val
);
300 case VIRTIO_PCI_STATUS
:
301 if (!(val
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
302 virtio_pci_stop_ioeventfd(proxy
);
305 virtio_set_status(vdev
, val
& 0xFF);
307 if (val
& VIRTIO_CONFIG_S_DRIVER_OK
) {
308 virtio_pci_start_ioeventfd(proxy
);
311 if (vdev
->status
== 0) {
312 virtio_reset(proxy
->vdev
);
313 msix_unuse_all_vectors(&proxy
->pci_dev
);
316 /* Linux before 2.6.34 sets the device as OK without enabling
317 the PCI device bus master bit. In this case we need to disable
318 some safety checks. */
319 if ((val
& VIRTIO_CONFIG_S_DRIVER_OK
) &&
320 !(proxy
->pci_dev
.config
[PCI_COMMAND
] & PCI_COMMAND_MASTER
)) {
321 proxy
->flags
|= VIRTIO_PCI_FLAG_BUS_MASTER_BUG
;
324 case VIRTIO_MSI_CONFIG_VECTOR
:
325 msix_vector_unuse(&proxy
->pci_dev
, vdev
->config_vector
);
326 /* Make it possible for guest to discover an error took place. */
327 if (msix_vector_use(&proxy
->pci_dev
, val
) < 0)
328 val
= VIRTIO_NO_VECTOR
;
329 vdev
->config_vector
= val
;
331 case VIRTIO_MSI_QUEUE_VECTOR
:
332 msix_vector_unuse(&proxy
->pci_dev
,
333 virtio_queue_vector(vdev
, vdev
->queue_sel
));
334 /* Make it possible for guest to discover an error took place. */
335 if (msix_vector_use(&proxy
->pci_dev
, val
) < 0)
336 val
= VIRTIO_NO_VECTOR
;
337 virtio_queue_set_vector(vdev
, vdev
->queue_sel
, val
);
340 error_report("%s: unexpected address 0x%x value 0x%x",
341 __func__
, addr
, val
);
346 static uint32_t virtio_ioport_read(VirtIOPCIProxy
*proxy
, uint32_t addr
)
348 VirtIODevice
*vdev
= proxy
->vdev
;
349 uint32_t ret
= 0xFFFFFFFF;
352 case VIRTIO_PCI_HOST_FEATURES
:
353 ret
= proxy
->host_features
;
355 case VIRTIO_PCI_GUEST_FEATURES
:
356 ret
= vdev
->guest_features
;
358 case VIRTIO_PCI_QUEUE_PFN
:
359 ret
= virtio_queue_get_addr(vdev
, vdev
->queue_sel
)
360 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT
;
362 case VIRTIO_PCI_QUEUE_NUM
:
363 ret
= virtio_queue_get_num(vdev
, vdev
->queue_sel
);
365 case VIRTIO_PCI_QUEUE_SEL
:
366 ret
= vdev
->queue_sel
;
368 case VIRTIO_PCI_STATUS
:
372 /* reading from the ISR also clears it. */
375 qemu_set_irq(proxy
->pci_dev
.irq
[0], 0);
377 case VIRTIO_MSI_CONFIG_VECTOR
:
378 ret
= vdev
->config_vector
;
380 case VIRTIO_MSI_QUEUE_VECTOR
:
381 ret
= virtio_queue_vector(vdev
, vdev
->queue_sel
);
390 static uint64_t virtio_pci_config_read(void *opaque
, hwaddr addr
,
393 VirtIOPCIProxy
*proxy
= opaque
;
394 uint32_t config
= VIRTIO_PCI_CONFIG(&proxy
->pci_dev
);
397 return virtio_ioport_read(proxy
, addr
);
403 val
= virtio_config_readb(proxy
->vdev
, addr
);
406 val
= virtio_config_readw(proxy
->vdev
, addr
);
407 if (virtio_is_big_endian()) {
412 val
= virtio_config_readl(proxy
->vdev
, addr
);
413 if (virtio_is_big_endian()) {
421 static void virtio_pci_config_write(void *opaque
, hwaddr addr
,
422 uint64_t val
, unsigned size
)
424 VirtIOPCIProxy
*proxy
= opaque
;
425 uint32_t config
= VIRTIO_PCI_CONFIG(&proxy
->pci_dev
);
427 virtio_ioport_write(proxy
, addr
, val
);
432 * Virtio-PCI is odd. Ioports are LE but config space is target native
437 virtio_config_writeb(proxy
->vdev
, addr
, val
);
440 if (virtio_is_big_endian()) {
443 virtio_config_writew(proxy
->vdev
, addr
, val
);
446 if (virtio_is_big_endian()) {
449 virtio_config_writel(proxy
->vdev
, addr
, val
);
454 static const MemoryRegionOps virtio_pci_config_ops
= {
455 .read
= virtio_pci_config_read
,
456 .write
= virtio_pci_config_write
,
458 .min_access_size
= 1,
459 .max_access_size
= 4,
461 .endianness
= DEVICE_LITTLE_ENDIAN
,
464 static void virtio_write_config(PCIDevice
*pci_dev
, uint32_t address
,
465 uint32_t val
, int len
)
467 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
469 pci_default_write_config(pci_dev
, address
, val
, len
);
471 if (range_covers_byte(address
, len
, PCI_COMMAND
) &&
472 !(pci_dev
->config
[PCI_COMMAND
] & PCI_COMMAND_MASTER
) &&
473 !(proxy
->flags
& VIRTIO_PCI_FLAG_BUS_MASTER_BUG
)) {
474 virtio_pci_stop_ioeventfd(proxy
);
475 virtio_set_status(proxy
->vdev
,
476 proxy
->vdev
->status
& ~VIRTIO_CONFIG_S_DRIVER_OK
);
480 static unsigned virtio_pci_get_features(DeviceState
*d
)
482 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
483 return proxy
->host_features
;
486 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy
*proxy
,
487 unsigned int queue_no
,
491 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
494 if (irqfd
->users
== 0) {
495 ret
= kvm_irqchip_add_msi_route(kvm_state
, msg
);
505 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy
*proxy
,
508 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
509 if (--irqfd
->users
== 0) {
510 kvm_irqchip_release_virq(kvm_state
, irqfd
->virq
);
514 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy
*proxy
,
515 unsigned int queue_no
,
518 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
519 VirtQueue
*vq
= virtio_get_queue(proxy
->vdev
, queue_no
);
520 EventNotifier
*n
= virtio_queue_get_guest_notifier(vq
);
522 ret
= kvm_irqchip_add_irqfd_notifier(kvm_state
, n
, irqfd
->virq
);
526 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy
*proxy
,
527 unsigned int queue_no
,
530 VirtQueue
*vq
= virtio_get_queue(proxy
->vdev
, queue_no
);
531 EventNotifier
*n
= virtio_queue_get_guest_notifier(vq
);
532 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
535 ret
= kvm_irqchip_remove_irqfd_notifier(kvm_state
, n
, irqfd
->virq
);
539 static int kvm_virtio_pci_vector_use(VirtIOPCIProxy
*proxy
, int nvqs
)
541 PCIDevice
*dev
= &proxy
->pci_dev
;
542 VirtIODevice
*vdev
= proxy
->vdev
;
547 for (queue_no
= 0; queue_no
< nvqs
; queue_no
++) {
548 if (!virtio_queue_get_num(vdev
, queue_no
)) {
551 vector
= virtio_queue_vector(vdev
, queue_no
);
552 if (vector
>= msix_nr_vectors_allocated(dev
)) {
555 msg
= msix_get_message(dev
, vector
);
556 ret
= kvm_virtio_pci_vq_vector_use(proxy
, queue_no
, vector
, msg
);
560 /* If guest supports masking, set up irqfd now.
561 * Otherwise, delay until unmasked in the frontend.
563 if (proxy
->vdev
->guest_notifier_mask
) {
564 ret
= kvm_virtio_pci_irqfd_use(proxy
, queue_no
, vector
);
566 kvm_virtio_pci_vq_vector_release(proxy
, vector
);
574 while (--queue_no
>= 0) {
575 vector
= virtio_queue_vector(vdev
, queue_no
);
576 if (vector
>= msix_nr_vectors_allocated(dev
)) {
579 if (proxy
->vdev
->guest_notifier_mask
) {
580 kvm_virtio_pci_irqfd_release(proxy
, queue_no
, vector
);
582 kvm_virtio_pci_vq_vector_release(proxy
, vector
);
587 static void kvm_virtio_pci_vector_release(VirtIOPCIProxy
*proxy
, int nvqs
)
589 PCIDevice
*dev
= &proxy
->pci_dev
;
590 VirtIODevice
*vdev
= proxy
->vdev
;
594 for (queue_no
= 0; queue_no
< nvqs
; queue_no
++) {
595 if (!virtio_queue_get_num(vdev
, queue_no
)) {
598 vector
= virtio_queue_vector(vdev
, queue_no
);
599 if (vector
>= msix_nr_vectors_allocated(dev
)) {
602 /* If guest supports masking, clean up irqfd now.
603 * Otherwise, it was cleaned when masked in the frontend.
605 if (proxy
->vdev
->guest_notifier_mask
) {
606 kvm_virtio_pci_irqfd_release(proxy
, queue_no
, vector
);
608 kvm_virtio_pci_vq_vector_release(proxy
, vector
);
612 static int kvm_virtio_pci_vq_vector_unmask(VirtIOPCIProxy
*proxy
,
613 unsigned int queue_no
,
617 VirtQueue
*vq
= virtio_get_queue(proxy
->vdev
, queue_no
);
618 EventNotifier
*n
= virtio_queue_get_guest_notifier(vq
);
619 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
622 if (irqfd
->msg
.data
!= msg
.data
|| irqfd
->msg
.address
!= msg
.address
) {
623 ret
= kvm_irqchip_update_msi_route(kvm_state
, irqfd
->virq
, msg
);
629 /* If guest supports masking, irqfd is already setup, unmask it.
630 * Otherwise, set it up now.
632 if (proxy
->vdev
->guest_notifier_mask
) {
633 proxy
->vdev
->guest_notifier_mask(proxy
->vdev
, queue_no
, false);
634 /* Test after unmasking to avoid losing events. */
635 if (proxy
->vdev
->guest_notifier_pending
&&
636 proxy
->vdev
->guest_notifier_pending(proxy
->vdev
, queue_no
)) {
637 event_notifier_set(n
);
640 ret
= kvm_virtio_pci_irqfd_use(proxy
, queue_no
, vector
);
645 static void kvm_virtio_pci_vq_vector_mask(VirtIOPCIProxy
*proxy
,
646 unsigned int queue_no
,
649 /* If guest supports masking, keep irqfd but mask it.
650 * Otherwise, clean it up now.
652 if (proxy
->vdev
->guest_notifier_mask
) {
653 proxy
->vdev
->guest_notifier_mask(proxy
->vdev
, queue_no
, true);
655 kvm_virtio_pci_irqfd_release(proxy
, queue_no
, vector
);
659 static int kvm_virtio_pci_vector_unmask(PCIDevice
*dev
, unsigned vector
,
662 VirtIOPCIProxy
*proxy
= container_of(dev
, VirtIOPCIProxy
, pci_dev
);
663 VirtIODevice
*vdev
= proxy
->vdev
;
666 for (queue_no
= 0; queue_no
< proxy
->nvqs_with_notifiers
; queue_no
++) {
667 if (!virtio_queue_get_num(vdev
, queue_no
)) {
670 if (virtio_queue_vector(vdev
, queue_no
) != vector
) {
673 ret
= kvm_virtio_pci_vq_vector_unmask(proxy
, queue_no
, vector
, msg
);
681 while (--queue_no
>= 0) {
682 if (virtio_queue_vector(vdev
, queue_no
) != vector
) {
685 kvm_virtio_pci_vq_vector_mask(proxy
, queue_no
, vector
);
690 static void kvm_virtio_pci_vector_mask(PCIDevice
*dev
, unsigned vector
)
692 VirtIOPCIProxy
*proxy
= container_of(dev
, VirtIOPCIProxy
, pci_dev
);
693 VirtIODevice
*vdev
= proxy
->vdev
;
696 for (queue_no
= 0; queue_no
< proxy
->nvqs_with_notifiers
; queue_no
++) {
697 if (!virtio_queue_get_num(vdev
, queue_no
)) {
700 if (virtio_queue_vector(vdev
, queue_no
) != vector
) {
703 kvm_virtio_pci_vq_vector_mask(proxy
, queue_no
, vector
);
707 static void kvm_virtio_pci_vector_poll(PCIDevice
*dev
,
708 unsigned int vector_start
,
709 unsigned int vector_end
)
711 VirtIOPCIProxy
*proxy
= container_of(dev
, VirtIOPCIProxy
, pci_dev
);
712 VirtIODevice
*vdev
= proxy
->vdev
;
715 EventNotifier
*notifier
;
718 for (queue_no
= 0; queue_no
< proxy
->nvqs_with_notifiers
; queue_no
++) {
719 if (!virtio_queue_get_num(vdev
, queue_no
)) {
722 vector
= virtio_queue_vector(vdev
, queue_no
);
723 if (vector
< vector_start
|| vector
>= vector_end
||
724 !msix_is_masked(dev
, vector
)) {
727 vq
= virtio_get_queue(vdev
, queue_no
);
728 notifier
= virtio_queue_get_guest_notifier(vq
);
729 if (vdev
->guest_notifier_pending
) {
730 if (vdev
->guest_notifier_pending(vdev
, queue_no
)) {
731 msix_set_pending(dev
, vector
);
733 } else if (event_notifier_test_and_clear(notifier
)) {
734 msix_set_pending(dev
, vector
);
739 static int virtio_pci_set_guest_notifier(DeviceState
*d
, int n
, bool assign
,
742 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
743 VirtQueue
*vq
= virtio_get_queue(proxy
->vdev
, n
);
744 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
747 int r
= event_notifier_init(notifier
, 0);
751 virtio_queue_set_guest_notifier_fd_handler(vq
, true, with_irqfd
);
753 virtio_queue_set_guest_notifier_fd_handler(vq
, false, with_irqfd
);
754 event_notifier_cleanup(notifier
);
760 static bool virtio_pci_query_guest_notifiers(DeviceState
*d
)
762 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
763 return msix_enabled(&proxy
->pci_dev
);
766 static int virtio_pci_set_guest_notifiers(DeviceState
*d
, int nvqs
, bool assign
)
768 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
769 VirtIODevice
*vdev
= proxy
->vdev
;
771 bool with_irqfd
= msix_enabled(&proxy
->pci_dev
) &&
772 kvm_msi_via_irqfd_enabled();
774 nvqs
= MIN(nvqs
, VIRTIO_PCI_QUEUE_MAX
);
776 /* When deassigning, pass a consistent nvqs value
777 * to avoid leaking notifiers.
779 assert(assign
|| nvqs
== proxy
->nvqs_with_notifiers
);
781 proxy
->nvqs_with_notifiers
= nvqs
;
783 /* Must unset vector notifier while guest notifier is still assigned */
784 if (proxy
->vector_irqfd
&& !assign
) {
785 msix_unset_vector_notifiers(&proxy
->pci_dev
);
786 kvm_virtio_pci_vector_release(proxy
, nvqs
);
787 g_free(proxy
->vector_irqfd
);
788 proxy
->vector_irqfd
= NULL
;
791 for (n
= 0; n
< nvqs
; n
++) {
792 if (!virtio_queue_get_num(vdev
, n
)) {
796 r
= virtio_pci_set_guest_notifier(d
, n
, assign
,
797 kvm_msi_via_irqfd_enabled());
803 /* Must set vector notifier after guest notifier has been assigned */
804 if (with_irqfd
&& assign
) {
805 proxy
->vector_irqfd
=
806 g_malloc0(sizeof(*proxy
->vector_irqfd
) *
807 msix_nr_vectors_allocated(&proxy
->pci_dev
));
808 r
= kvm_virtio_pci_vector_use(proxy
, nvqs
);
812 r
= msix_set_vector_notifiers(&proxy
->pci_dev
,
813 kvm_virtio_pci_vector_unmask
,
814 kvm_virtio_pci_vector_mask
,
815 kvm_virtio_pci_vector_poll
);
817 goto notifiers_error
;
825 kvm_virtio_pci_vector_release(proxy
, nvqs
);
828 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
831 virtio_pci_set_guest_notifier(d
, n
, !assign
, with_irqfd
);
836 static int virtio_pci_set_host_notifier(DeviceState
*d
, int n
, bool assign
)
838 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
840 /* Stop using ioeventfd for virtqueue kick if the device starts using host
841 * notifiers. This makes it easy to avoid stepping on each others' toes.
843 proxy
->ioeventfd_disabled
= assign
;
845 virtio_pci_stop_ioeventfd(proxy
);
847 /* We don't need to start here: it's not needed because backend
848 * currently only stops on status change away from ok,
849 * reset, vmstop and such. If we do add code to start here,
850 * need to check vmstate, device state etc. */
851 return virtio_pci_set_host_notifier_internal(proxy
, n
, assign
, false);
854 static void virtio_pci_vmstate_change(DeviceState
*d
, bool running
)
856 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
859 /* Try to find out if the guest has bus master disabled, but is
860 in ready state. Then we have a buggy guest OS. */
861 if ((proxy
->vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
) &&
862 !(proxy
->pci_dev
.config
[PCI_COMMAND
] & PCI_COMMAND_MASTER
)) {
863 proxy
->flags
|= VIRTIO_PCI_FLAG_BUS_MASTER_BUG
;
865 virtio_pci_start_ioeventfd(proxy
);
867 virtio_pci_stop_ioeventfd(proxy
);
871 static const VirtIOBindings virtio_pci_bindings
= {
872 .notify
= virtio_pci_notify
,
873 .save_config
= virtio_pci_save_config
,
874 .load_config
= virtio_pci_load_config
,
875 .save_queue
= virtio_pci_save_queue
,
876 .load_queue
= virtio_pci_load_queue
,
877 .get_features
= virtio_pci_get_features
,
878 .query_guest_notifiers
= virtio_pci_query_guest_notifiers
,
879 .set_host_notifier
= virtio_pci_set_host_notifier
,
880 .set_guest_notifiers
= virtio_pci_set_guest_notifiers
,
881 .vmstate_change
= virtio_pci_vmstate_change
,
884 void virtio_init_pci(VirtIOPCIProxy
*proxy
, VirtIODevice
*vdev
)
891 config
= proxy
->pci_dev
.config
;
893 if (proxy
->class_code
) {
894 pci_config_set_class(config
, proxy
->class_code
);
896 pci_set_word(config
+ PCI_SUBSYSTEM_VENDOR_ID
,
897 pci_get_word(config
+ PCI_VENDOR_ID
));
898 pci_set_word(config
+ PCI_SUBSYSTEM_ID
, vdev
->device_id
);
899 config
[PCI_INTERRUPT_PIN
] = 1;
901 if (vdev
->nvectors
&&
902 msix_init_exclusive_bar(&proxy
->pci_dev
, vdev
->nvectors
, 1)) {
906 proxy
->pci_dev
.config_write
= virtio_write_config
;
908 size
= VIRTIO_PCI_REGION_SIZE(&proxy
->pci_dev
) + vdev
->config_len
;
910 size
= 1 << qemu_fls(size
);
912 memory_region_init_io(&proxy
->bar
, &virtio_pci_config_ops
, proxy
,
914 pci_register_bar(&proxy
->pci_dev
, 0, PCI_BASE_ADDRESS_SPACE_IO
,
917 if (!kvm_has_many_ioeventfds()) {
918 proxy
->flags
&= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD
;
921 virtio_bind_device(vdev
, &virtio_pci_bindings
, DEVICE(proxy
));
922 proxy
->host_features
|= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY
;
923 proxy
->host_features
|= 0x1 << VIRTIO_F_BAD_FEATURE
;
924 proxy
->host_features
= vdev
->get_features(vdev
, proxy
->host_features
);
927 static int virtio_blk_init_pci(PCIDevice
*pci_dev
)
929 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
932 if (proxy
->class_code
!= PCI_CLASS_STORAGE_SCSI
&&
933 proxy
->class_code
!= PCI_CLASS_STORAGE_OTHER
)
934 proxy
->class_code
= PCI_CLASS_STORAGE_SCSI
;
936 vdev
= virtio_blk_init(&pci_dev
->qdev
, &proxy
->blk
);
940 vdev
->nvectors
= proxy
->nvectors
;
941 virtio_init_pci(proxy
, vdev
);
942 /* make the actual value visible */
943 proxy
->nvectors
= vdev
->nvectors
;
947 static void virtio_exit_pci(PCIDevice
*pci_dev
)
949 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
951 memory_region_destroy(&proxy
->bar
);
952 msix_uninit_exclusive_bar(pci_dev
);
955 static void virtio_blk_exit_pci(PCIDevice
*pci_dev
)
957 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
959 virtio_pci_stop_ioeventfd(proxy
);
960 virtio_blk_exit(proxy
->vdev
);
961 virtio_exit_pci(pci_dev
);
964 static int virtio_serial_init_pci(PCIDevice
*pci_dev
)
966 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
969 if (proxy
->class_code
!= PCI_CLASS_COMMUNICATION_OTHER
&&
970 proxy
->class_code
!= PCI_CLASS_DISPLAY_OTHER
&& /* qemu 0.10 */
971 proxy
->class_code
!= PCI_CLASS_OTHERS
) /* qemu-kvm */
972 proxy
->class_code
= PCI_CLASS_COMMUNICATION_OTHER
;
974 vdev
= virtio_serial_init(&pci_dev
->qdev
, &proxy
->serial
);
979 /* backwards-compatibility with machines that were created with
980 DEV_NVECTORS_UNSPECIFIED */
981 vdev
->nvectors
= proxy
->nvectors
== DEV_NVECTORS_UNSPECIFIED
982 ? proxy
->serial
.max_virtserial_ports
+ 1
984 virtio_init_pci(proxy
, vdev
);
985 proxy
->nvectors
= vdev
->nvectors
;
989 static void virtio_serial_exit_pci(PCIDevice
*pci_dev
)
991 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
993 virtio_pci_stop_ioeventfd(proxy
);
994 virtio_serial_exit(proxy
->vdev
);
995 virtio_exit_pci(pci_dev
);
998 static int virtio_net_init_pci(PCIDevice
*pci_dev
)
1000 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
1003 vdev
= virtio_net_init(&pci_dev
->qdev
, &proxy
->nic
, &proxy
->net
,
1004 proxy
->host_features
);
1006 vdev
->nvectors
= proxy
->nvectors
;
1007 virtio_init_pci(proxy
, vdev
);
1009 /* make the actual value visible */
1010 proxy
->nvectors
= vdev
->nvectors
;
1014 static void virtio_net_exit_pci(PCIDevice
*pci_dev
)
1016 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
1018 virtio_pci_stop_ioeventfd(proxy
);
1019 virtio_net_exit(proxy
->vdev
);
1020 virtio_exit_pci(pci_dev
);
1023 static int virtio_balloon_init_pci(PCIDevice
*pci_dev
)
1025 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
1028 if (proxy
->class_code
!= PCI_CLASS_OTHERS
&&
1029 proxy
->class_code
!= PCI_CLASS_MEMORY_RAM
) { /* qemu < 1.1 */
1030 proxy
->class_code
= PCI_CLASS_OTHERS
;
1033 vdev
= virtio_balloon_init(&pci_dev
->qdev
);
1037 virtio_init_pci(proxy
, vdev
);
1041 static void virtio_balloon_exit_pci(PCIDevice
*pci_dev
)
1043 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
1045 virtio_pci_stop_ioeventfd(proxy
);
1046 virtio_balloon_exit(proxy
->vdev
);
1047 virtio_exit_pci(pci_dev
);
1050 static int virtio_rng_init_pci(PCIDevice
*pci_dev
)
1052 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
1055 if (proxy
->rng
.rng
== NULL
) {
1056 proxy
->rng
.default_backend
= RNG_RANDOM(object_new(TYPE_RNG_RANDOM
));
1058 object_property_add_child(OBJECT(pci_dev
),
1060 OBJECT(proxy
->rng
.default_backend
),
1063 object_property_set_link(OBJECT(pci_dev
),
1064 OBJECT(proxy
->rng
.default_backend
),
1068 vdev
= virtio_rng_init(&pci_dev
->qdev
, &proxy
->rng
);
1072 virtio_init_pci(proxy
, vdev
);
1076 static void virtio_rng_exit_pci(PCIDevice
*pci_dev
)
1078 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
1080 virtio_pci_stop_ioeventfd(proxy
);
1081 virtio_rng_exit(proxy
->vdev
);
1082 virtio_exit_pci(pci_dev
);
1085 static Property virtio_blk_properties
[] = {
1086 DEFINE_PROP_HEX32("class", VirtIOPCIProxy
, class_code
, 0),
1087 DEFINE_BLOCK_PROPERTIES(VirtIOPCIProxy
, blk
.conf
),
1088 DEFINE_BLOCK_CHS_PROPERTIES(VirtIOPCIProxy
, blk
.conf
),
1089 DEFINE_PROP_STRING("serial", VirtIOPCIProxy
, blk
.serial
),
1091 DEFINE_PROP_BIT("scsi", VirtIOPCIProxy
, blk
.scsi
, 0, true),
1093 DEFINE_PROP_BIT("config-wce", VirtIOPCIProxy
, blk
.config_wce
, 0, true),
1094 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy
, flags
, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT
, true),
1095 #ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
1096 DEFINE_PROP_BIT("x-data-plane", VirtIOPCIProxy
, blk
.data_plane
, 0, false),
1098 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy
, nvectors
, 2),
1099 DEFINE_VIRTIO_BLK_FEATURES(VirtIOPCIProxy
, host_features
),
1100 DEFINE_PROP_END_OF_LIST(),
1103 static void virtio_blk_class_init(ObjectClass
*klass
, void *data
)
1105 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1106 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
1108 k
->init
= virtio_blk_init_pci
;
1109 k
->exit
= virtio_blk_exit_pci
;
1110 k
->vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
;
1111 k
->device_id
= PCI_DEVICE_ID_VIRTIO_BLOCK
;
1112 k
->revision
= VIRTIO_PCI_ABI_VERSION
;
1113 k
->class_id
= PCI_CLASS_STORAGE_SCSI
;
1114 dc
->reset
= virtio_pci_reset
;
1115 dc
->props
= virtio_blk_properties
;
1118 static const TypeInfo virtio_blk_info
= {
1119 .name
= "virtio-blk-pci",
1120 .parent
= TYPE_PCI_DEVICE
,
1121 .instance_size
= sizeof(VirtIOPCIProxy
),
1122 .class_init
= virtio_blk_class_init
,
1125 static Property virtio_net_properties
[] = {
1126 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy
, flags
, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT
, false),
1127 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy
, nvectors
, 3),
1128 DEFINE_VIRTIO_NET_FEATURES(VirtIOPCIProxy
, host_features
),
1129 DEFINE_NIC_PROPERTIES(VirtIOPCIProxy
, nic
),
1130 DEFINE_PROP_UINT32("x-txtimer", VirtIOPCIProxy
, net
.txtimer
, TX_TIMER_INTERVAL
),
1131 DEFINE_PROP_INT32("x-txburst", VirtIOPCIProxy
, net
.txburst
, TX_BURST
),
1132 DEFINE_PROP_STRING("tx", VirtIOPCIProxy
, net
.tx
),
1133 DEFINE_PROP_END_OF_LIST(),
1136 static void virtio_net_class_init(ObjectClass
*klass
, void *data
)
1138 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1139 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
1141 k
->init
= virtio_net_init_pci
;
1142 k
->exit
= virtio_net_exit_pci
;
1143 k
->romfile
= "pxe-virtio.rom";
1144 k
->vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
;
1145 k
->device_id
= PCI_DEVICE_ID_VIRTIO_NET
;
1146 k
->revision
= VIRTIO_PCI_ABI_VERSION
;
1147 k
->class_id
= PCI_CLASS_NETWORK_ETHERNET
;
1148 dc
->reset
= virtio_pci_reset
;
1149 dc
->props
= virtio_net_properties
;
1152 static const TypeInfo virtio_net_info
= {
1153 .name
= "virtio-net-pci",
1154 .parent
= TYPE_PCI_DEVICE
,
1155 .instance_size
= sizeof(VirtIOPCIProxy
),
1156 .class_init
= virtio_net_class_init
,
1159 static Property virtio_serial_properties
[] = {
1160 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy
, flags
, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT
, true),
1161 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy
, nvectors
, 2),
1162 DEFINE_PROP_HEX32("class", VirtIOPCIProxy
, class_code
, 0),
1163 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy
, host_features
),
1164 DEFINE_PROP_UINT32("max_ports", VirtIOPCIProxy
, serial
.max_virtserial_ports
, 31),
1165 DEFINE_PROP_END_OF_LIST(),
1168 static void virtio_serial_class_init(ObjectClass
*klass
, void *data
)
1170 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1171 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
1173 k
->init
= virtio_serial_init_pci
;
1174 k
->exit
= virtio_serial_exit_pci
;
1175 k
->vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
;
1176 k
->device_id
= PCI_DEVICE_ID_VIRTIO_CONSOLE
;
1177 k
->revision
= VIRTIO_PCI_ABI_VERSION
;
1178 k
->class_id
= PCI_CLASS_COMMUNICATION_OTHER
;
1179 dc
->reset
= virtio_pci_reset
;
1180 dc
->props
= virtio_serial_properties
;
1183 static const TypeInfo virtio_serial_info
= {
1184 .name
= "virtio-serial-pci",
1185 .parent
= TYPE_PCI_DEVICE
,
1186 .instance_size
= sizeof(VirtIOPCIProxy
),
1187 .class_init
= virtio_serial_class_init
,
1190 static Property virtio_balloon_properties
[] = {
1191 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy
, host_features
),
1192 DEFINE_PROP_HEX32("class", VirtIOPCIProxy
, class_code
, 0),
1193 DEFINE_PROP_END_OF_LIST(),
1196 static void virtio_balloon_class_init(ObjectClass
*klass
, void *data
)
1198 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1199 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
1201 k
->init
= virtio_balloon_init_pci
;
1202 k
->exit
= virtio_balloon_exit_pci
;
1203 k
->vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
;
1204 k
->device_id
= PCI_DEVICE_ID_VIRTIO_BALLOON
;
1205 k
->revision
= VIRTIO_PCI_ABI_VERSION
;
1206 k
->class_id
= PCI_CLASS_OTHERS
;
1207 dc
->reset
= virtio_pci_reset
;
1208 dc
->props
= virtio_balloon_properties
;
1211 static const TypeInfo virtio_balloon_info
= {
1212 .name
= "virtio-balloon-pci",
1213 .parent
= TYPE_PCI_DEVICE
,
1214 .instance_size
= sizeof(VirtIOPCIProxy
),
1215 .class_init
= virtio_balloon_class_init
,
1218 static void virtio_rng_initfn(Object
*obj
)
1220 PCIDevice
*pci_dev
= PCI_DEVICE(obj
);
1221 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
1223 object_property_add_link(obj
, "rng", TYPE_RNG_BACKEND
,
1224 (Object
**)&proxy
->rng
.rng
, NULL
);
1227 static Property virtio_rng_properties
[] = {
1228 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy
, host_features
),
1229 /* Set a default rate limit of 2^47 bytes per minute or roughly 2TB/s. If
1230 you have an entropy source capable of generating more entropy than this
1231 and you can pass it through via virtio-rng, then hats off to you. Until
1232 then, this is unlimited for all practical purposes.
1234 DEFINE_PROP_UINT64("max-bytes", VirtIOPCIProxy
, rng
.max_bytes
, INT64_MAX
),
1235 DEFINE_PROP_UINT32("period", VirtIOPCIProxy
, rng
.period_ms
, 1 << 16),
1236 DEFINE_PROP_END_OF_LIST(),
1239 static void virtio_rng_class_init(ObjectClass
*klass
, void *data
)
1241 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1242 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
1244 k
->init
= virtio_rng_init_pci
;
1245 k
->exit
= virtio_rng_exit_pci
;
1246 k
->vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
;
1247 k
->device_id
= PCI_DEVICE_ID_VIRTIO_RNG
;
1248 k
->revision
= VIRTIO_PCI_ABI_VERSION
;
1249 k
->class_id
= PCI_CLASS_OTHERS
;
1250 dc
->reset
= virtio_pci_reset
;
1251 dc
->props
= virtio_rng_properties
;
1254 static const TypeInfo virtio_rng_info
= {
1255 .name
= "virtio-rng-pci",
1256 .parent
= TYPE_PCI_DEVICE
,
1257 .instance_size
= sizeof(VirtIOPCIProxy
),
1258 .instance_init
= virtio_rng_initfn
,
1259 .class_init
= virtio_rng_class_init
,
1262 static int virtio_scsi_init_pci(PCIDevice
*pci_dev
)
1264 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
1267 vdev
= virtio_scsi_init(&pci_dev
->qdev
, &proxy
->scsi
);
1272 vdev
->nvectors
= proxy
->nvectors
== DEV_NVECTORS_UNSPECIFIED
1273 ? proxy
->scsi
.num_queues
+ 3
1275 virtio_init_pci(proxy
, vdev
);
1277 /* make the actual value visible */
1278 proxy
->nvectors
= vdev
->nvectors
;
1282 static void virtio_scsi_exit_pci(PCIDevice
*pci_dev
)
1284 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
1286 virtio_scsi_exit(proxy
->vdev
);
1287 virtio_exit_pci(pci_dev
);
1290 static Property virtio_scsi_properties
[] = {
1291 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy
, flags
, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT
, true),
1292 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy
, nvectors
, DEV_NVECTORS_UNSPECIFIED
),
1293 DEFINE_VIRTIO_SCSI_PROPERTIES(VirtIOPCIProxy
, host_features
, scsi
),
1294 DEFINE_PROP_END_OF_LIST(),
1297 static void virtio_scsi_class_init(ObjectClass
*klass
, void *data
)
1299 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1300 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
1302 k
->init
= virtio_scsi_init_pci
;
1303 k
->exit
= virtio_scsi_exit_pci
;
1304 k
->vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
;
1305 k
->device_id
= PCI_DEVICE_ID_VIRTIO_SCSI
;
1307 k
->class_id
= PCI_CLASS_STORAGE_SCSI
;
1308 dc
->reset
= virtio_pci_reset
;
1309 dc
->props
= virtio_scsi_properties
;
1312 static const TypeInfo virtio_scsi_info
= {
1313 .name
= "virtio-scsi-pci",
1314 .parent
= TYPE_PCI_DEVICE
,
1315 .instance_size
= sizeof(VirtIOPCIProxy
),
1316 .class_init
= virtio_scsi_class_init
,
1319 #ifdef CONFIG_VIRTFS
1320 static int virtio_9p_init_pci(PCIDevice
*pci_dev
)
1322 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
1325 vdev
= virtio_9p_init(&pci_dev
->qdev
, &proxy
->fsconf
);
1326 vdev
->nvectors
= proxy
->nvectors
;
1327 virtio_init_pci(proxy
, vdev
);
1328 /* make the actual value visible */
1329 proxy
->nvectors
= vdev
->nvectors
;
1333 static Property virtio_9p_properties
[] = {
1334 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy
, flags
, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT
, true),
1335 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy
, nvectors
, 2),
1336 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy
, host_features
),
1337 DEFINE_PROP_STRING("mount_tag", VirtIOPCIProxy
, fsconf
.tag
),
1338 DEFINE_PROP_STRING("fsdev", VirtIOPCIProxy
, fsconf
.fsdev_id
),
1339 DEFINE_PROP_END_OF_LIST(),
1342 static void virtio_9p_class_init(ObjectClass
*klass
, void *data
)
1344 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1345 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
1347 k
->init
= virtio_9p_init_pci
;
1348 k
->vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
;
1349 k
->device_id
= PCI_DEVICE_ID_VIRTIO_9P
;
1350 k
->revision
= VIRTIO_PCI_ABI_VERSION
;
1352 dc
->props
= virtio_9p_properties
;
1353 dc
->reset
= virtio_pci_reset
;
1356 static const TypeInfo virtio_9p_info
= {
1357 .name
= "virtio-9p-pci",
1358 .parent
= TYPE_PCI_DEVICE
,
1359 .instance_size
= sizeof(VirtIOPCIProxy
),
1360 .class_init
= virtio_9p_class_init
,
1365 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
1368 /* This is called by virtio-bus just after the device is plugged. */
1369 static void virtio_pci_device_plugged(DeviceState
*d
)
1371 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(d
);
1372 VirtioBusState
*bus
= &proxy
->bus
;
1376 proxy
->vdev
= bus
->vdev
;
1378 config
= proxy
->pci_dev
.config
;
1379 if (proxy
->class_code
) {
1380 pci_config_set_class(config
, proxy
->class_code
);
1382 pci_set_word(config
+ PCI_SUBSYSTEM_VENDOR_ID
,
1383 pci_get_word(config
+ PCI_VENDOR_ID
));
1384 pci_set_word(config
+ PCI_SUBSYSTEM_ID
, virtio_bus_get_vdev_id(bus
));
1385 config
[PCI_INTERRUPT_PIN
] = 1;
1387 if (proxy
->nvectors
&&
1388 msix_init_exclusive_bar(&proxy
->pci_dev
, proxy
->nvectors
, 1)) {
1389 proxy
->nvectors
= 0;
1392 proxy
->pci_dev
.config_write
= virtio_write_config
;
1394 size
= VIRTIO_PCI_REGION_SIZE(&proxy
->pci_dev
)
1395 + virtio_bus_get_vdev_config_len(bus
);
1396 if (size
& (size
- 1)) {
1397 size
= 1 << qemu_fls(size
);
1400 memory_region_init_io(&proxy
->bar
, &virtio_pci_config_ops
, proxy
,
1401 "virtio-pci", size
);
1402 pci_register_bar(&proxy
->pci_dev
, 0, PCI_BASE_ADDRESS_SPACE_IO
,
1405 if (!kvm_has_many_ioeventfds()) {
1406 proxy
->flags
&= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD
;
1409 proxy
->host_features
|= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY
;
1410 proxy
->host_features
|= 0x1 << VIRTIO_F_BAD_FEATURE
;
1411 proxy
->host_features
= virtio_bus_get_vdev_features(bus
,
1412 proxy
->host_features
);
1415 /* This is called by virtio-bus just before the device is unplugged. */
1416 static void virtio_pci_device_unplug(DeviceState
*d
)
1418 VirtIOPCIProxy
*dev
= VIRTIO_PCI(d
);
1419 virtio_pci_stop_ioeventfd(dev
);
1422 static int virtio_pci_init(PCIDevice
*pci_dev
)
1424 VirtIOPCIProxy
*dev
= VIRTIO_PCI(pci_dev
);
1425 VirtioPCIClass
*k
= VIRTIO_PCI_GET_CLASS(pci_dev
);
1426 virtio_pci_bus_new(&dev
->bus
, dev
);
1427 if (k
->init
!= NULL
) {
1428 return k
->init(dev
);
1433 static void virtio_pci_exit(PCIDevice
*pci_dev
)
1435 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(pci_dev
);
1436 VirtioBusState
*bus
= VIRTIO_BUS(&proxy
->bus
);
1437 BusState
*qbus
= BUS(&proxy
->bus
);
1438 virtio_bus_destroy_device(bus
);
1440 virtio_exit_pci(pci_dev
);
1444 * This will be renamed virtio_pci_reset at the end of the series.
1445 * virtio_pci_reset is still in use at this moment.
1447 static void virtio_pci_rst(DeviceState
*qdev
)
1449 VirtIOPCIProxy
*proxy
= VIRTIO_PCI(qdev
);
1450 VirtioBusState
*bus
= VIRTIO_BUS(&proxy
->bus
);
1451 virtio_pci_stop_ioeventfd(proxy
);
1452 virtio_bus_reset(bus
);
1453 msix_unuse_all_vectors(&proxy
->pci_dev
);
1454 proxy
->flags
&= ~VIRTIO_PCI_FLAG_BUS_MASTER_BUG
;
1457 static void virtio_pci_class_init(ObjectClass
*klass
, void *data
)
1459 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1460 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
1462 k
->init
= virtio_pci_init
;
1463 k
->exit
= virtio_pci_exit
;
1464 k
->vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
;
1465 k
->revision
= VIRTIO_PCI_ABI_VERSION
;
1466 k
->class_id
= PCI_CLASS_OTHERS
;
1467 dc
->reset
= virtio_pci_rst
;
1470 static const TypeInfo virtio_pci_info
= {
1471 .name
= TYPE_VIRTIO_PCI
,
1472 .parent
= TYPE_PCI_DEVICE
,
1473 .instance_size
= sizeof(VirtIOPCIProxy
),
1474 .class_init
= virtio_pci_class_init
,
1475 .class_size
= sizeof(VirtioPCIClass
),
1479 /* virtio-pci-bus */
1481 void virtio_pci_bus_new(VirtioBusState
*bus
, VirtIOPCIProxy
*dev
)
1483 DeviceState
*qdev
= DEVICE(dev
);
1485 qbus_create_inplace((BusState
*)bus
, TYPE_VIRTIO_PCI_BUS
, qdev
, NULL
);
1487 qbus
->allow_hotplug
= 0;
1490 static void virtio_pci_bus_class_init(ObjectClass
*klass
, void *data
)
1492 BusClass
*bus_class
= BUS_CLASS(klass
);
1493 VirtioBusClass
*k
= VIRTIO_BUS_CLASS(klass
);
1494 bus_class
->max_dev
= 1;
1495 k
->notify
= virtio_pci_notify
;
1496 k
->save_config
= virtio_pci_save_config
;
1497 k
->load_config
= virtio_pci_load_config
;
1498 k
->save_queue
= virtio_pci_save_queue
;
1499 k
->load_queue
= virtio_pci_load_queue
;
1500 k
->get_features
= virtio_pci_get_features
;
1501 k
->query_guest_notifiers
= virtio_pci_query_guest_notifiers
;
1502 k
->set_host_notifier
= virtio_pci_set_host_notifier
;
1503 k
->set_guest_notifiers
= virtio_pci_set_guest_notifiers
;
1504 k
->vmstate_change
= virtio_pci_vmstate_change
;
1505 k
->device_plugged
= virtio_pci_device_plugged
;
1506 k
->device_unplug
= virtio_pci_device_unplug
;
1509 static const TypeInfo virtio_pci_bus_info
= {
1510 .name
= TYPE_VIRTIO_PCI_BUS
,
1511 .parent
= TYPE_VIRTIO_BUS
,
1512 .instance_size
= sizeof(VirtioPCIBusState
),
1513 .class_init
= virtio_pci_bus_class_init
,
1516 static void virtio_pci_register_types(void)
1518 type_register_static(&virtio_blk_info
);
1519 type_register_static(&virtio_net_info
);
1520 type_register_static(&virtio_serial_info
);
1521 type_register_static(&virtio_balloon_info
);
1522 type_register_static(&virtio_scsi_info
);
1523 type_register_static(&virtio_rng_info
);
1524 type_register_static(&virtio_pci_bus_info
);
1525 type_register_static(&virtio_pci_info
);
1526 #ifdef CONFIG_VIRTFS
1527 type_register_static(&virtio_9p_info
);
1531 type_init(virtio_pci_register_types
)