4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2009 CodeSourcery
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paul Brook <paul@codesourcery.com>
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
21 #include "virtio-blk.h"
22 #include "virtio-net.h"
23 #include "virtio-serial.h"
24 #include "virtio-scsi.h"
26 #include "qemu/error-report.h"
30 #include "sysemu/kvm.h"
31 #include "sysemu/blockdev.h"
32 #include "virtio-pci.h"
33 #include "qemu/range.h"
35 /* from Linux's linux/virtio_pci.h */
37 /* A 32-bit r/o bitmask of the features supported by the host */
38 #define VIRTIO_PCI_HOST_FEATURES 0
40 /* A 32-bit r/w bitmask of features activated by the guest */
41 #define VIRTIO_PCI_GUEST_FEATURES 4
43 /* A 32-bit r/w PFN for the currently selected queue */
44 #define VIRTIO_PCI_QUEUE_PFN 8
46 /* A 16-bit r/o queue size for the currently selected queue */
47 #define VIRTIO_PCI_QUEUE_NUM 12
49 /* A 16-bit r/w queue selector */
50 #define VIRTIO_PCI_QUEUE_SEL 14
52 /* A 16-bit r/w queue notifier */
53 #define VIRTIO_PCI_QUEUE_NOTIFY 16
55 /* An 8-bit device status register. */
56 #define VIRTIO_PCI_STATUS 18
58 /* An 8-bit r/o interrupt status register. Reading the value will return the
59 * current contents of the ISR and will also clear it. This is effectively
60 * a read-and-acknowledge. */
61 #define VIRTIO_PCI_ISR 19
63 /* MSI-X registers: only enabled if MSI-X is enabled. */
64 /* A 16-bit vector for configuration changes. */
65 #define VIRTIO_MSI_CONFIG_VECTOR 20
66 /* A 16-bit vector for selected queue notifications. */
67 #define VIRTIO_MSI_QUEUE_VECTOR 22
69 /* Config space size */
70 #define VIRTIO_PCI_CONFIG_NOMSI 20
71 #define VIRTIO_PCI_CONFIG_MSI 24
72 #define VIRTIO_PCI_REGION_SIZE(dev) (msix_present(dev) ? \
73 VIRTIO_PCI_CONFIG_MSI : \
74 VIRTIO_PCI_CONFIG_NOMSI)
76 /* The remaining space is defined by each driver as the per-driver
77 * configuration space */
78 #define VIRTIO_PCI_CONFIG(dev) (msix_enabled(dev) ? \
79 VIRTIO_PCI_CONFIG_MSI : \
80 VIRTIO_PCI_CONFIG_NOMSI)
82 /* How many bits to shift physical queue address written to QUEUE_PFN.
83 * 12 is historical, and due to x86 page size. */
84 #define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
86 /* Flags track per-device state like workarounds for quirks in older guests. */
87 #define VIRTIO_PCI_FLAG_BUS_MASTER_BUG (1 << 0)
89 /* QEMU doesn't strictly need write barriers since everything runs in
90 * lock-step. We'll leave the calls to wmb() in though to make it obvious for
91 * KVM or if kqemu gets SMP support.
93 #define wmb() do { } while (0)
95 /* HACK for virtio to determine if it's running a big endian guest */
96 bool virtio_is_big_endian(void);
99 /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
100 static inline VirtIOPCIProxy
*to_virtio_pci_proxy(DeviceState
*d
)
102 return container_of(d
, VirtIOPCIProxy
, pci_dev
.qdev
);
105 /* DeviceState to VirtIOPCIProxy. Note: used on datapath,
106 * be careful and test performance if you change this.
108 static inline VirtIOPCIProxy
*to_virtio_pci_proxy_fast(DeviceState
*d
)
110 return container_of(d
, VirtIOPCIProxy
, pci_dev
.qdev
);
113 static void virtio_pci_notify(DeviceState
*d
, uint16_t vector
)
115 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy_fast(d
);
116 if (msix_enabled(&proxy
->pci_dev
))
117 msix_notify(&proxy
->pci_dev
, vector
);
119 qemu_set_irq(proxy
->pci_dev
.irq
[0], proxy
->vdev
->isr
& 1);
122 static void virtio_pci_save_config(DeviceState
*d
, QEMUFile
*f
)
124 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
125 pci_device_save(&proxy
->pci_dev
, f
);
126 msix_save(&proxy
->pci_dev
, f
);
127 if (msix_present(&proxy
->pci_dev
))
128 qemu_put_be16(f
, proxy
->vdev
->config_vector
);
131 static void virtio_pci_save_queue(DeviceState
*d
, int n
, QEMUFile
*f
)
133 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
134 if (msix_present(&proxy
->pci_dev
))
135 qemu_put_be16(f
, virtio_queue_vector(proxy
->vdev
, n
));
138 static int virtio_pci_load_config(DeviceState
*d
, QEMUFile
*f
)
140 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
142 ret
= pci_device_load(&proxy
->pci_dev
, f
);
146 msix_unuse_all_vectors(&proxy
->pci_dev
);
147 msix_load(&proxy
->pci_dev
, f
);
148 if (msix_present(&proxy
->pci_dev
)) {
149 qemu_get_be16s(f
, &proxy
->vdev
->config_vector
);
151 proxy
->vdev
->config_vector
= VIRTIO_NO_VECTOR
;
153 if (proxy
->vdev
->config_vector
!= VIRTIO_NO_VECTOR
) {
154 return msix_vector_use(&proxy
->pci_dev
, proxy
->vdev
->config_vector
);
159 static int virtio_pci_load_queue(DeviceState
*d
, int n
, QEMUFile
*f
)
161 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
163 if (msix_present(&proxy
->pci_dev
)) {
164 qemu_get_be16s(f
, &vector
);
166 vector
= VIRTIO_NO_VECTOR
;
168 virtio_queue_set_vector(proxy
->vdev
, n
, vector
);
169 if (vector
!= VIRTIO_NO_VECTOR
) {
170 return msix_vector_use(&proxy
->pci_dev
, vector
);
175 static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy
*proxy
,
176 int n
, bool assign
, bool set_handler
)
178 VirtQueue
*vq
= virtio_get_queue(proxy
->vdev
, n
);
179 EventNotifier
*notifier
= virtio_queue_get_host_notifier(vq
);
183 r
= event_notifier_init(notifier
, 1);
185 error_report("%s: unable to init event notifier: %d",
189 virtio_queue_set_host_notifier_fd_handler(vq
, true, set_handler
);
190 memory_region_add_eventfd(&proxy
->bar
, VIRTIO_PCI_QUEUE_NOTIFY
, 2,
193 memory_region_del_eventfd(&proxy
->bar
, VIRTIO_PCI_QUEUE_NOTIFY
, 2,
195 virtio_queue_set_host_notifier_fd_handler(vq
, false, false);
196 event_notifier_cleanup(notifier
);
201 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy
*proxy
)
205 if (!(proxy
->flags
& VIRTIO_PCI_FLAG_USE_IOEVENTFD
) ||
206 proxy
->ioeventfd_disabled
||
207 proxy
->ioeventfd_started
) {
211 for (n
= 0; n
< VIRTIO_PCI_QUEUE_MAX
; n
++) {
212 if (!virtio_queue_get_num(proxy
->vdev
, n
)) {
216 r
= virtio_pci_set_host_notifier_internal(proxy
, n
, true, true);
221 proxy
->ioeventfd_started
= true;
226 if (!virtio_queue_get_num(proxy
->vdev
, n
)) {
230 r
= virtio_pci_set_host_notifier_internal(proxy
, n
, false, false);
233 proxy
->ioeventfd_started
= false;
234 error_report("%s: failed. Fallback to a userspace (slower).", __func__
);
237 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy
*proxy
)
242 if (!proxy
->ioeventfd_started
) {
246 for (n
= 0; n
< VIRTIO_PCI_QUEUE_MAX
; n
++) {
247 if (!virtio_queue_get_num(proxy
->vdev
, n
)) {
251 r
= virtio_pci_set_host_notifier_internal(proxy
, n
, false, false);
254 proxy
->ioeventfd_started
= false;
257 void virtio_pci_reset(DeviceState
*d
)
259 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
260 virtio_pci_stop_ioeventfd(proxy
);
261 virtio_reset(proxy
->vdev
);
262 msix_unuse_all_vectors(&proxy
->pci_dev
);
263 proxy
->flags
&= ~VIRTIO_PCI_FLAG_BUS_MASTER_BUG
;
266 static void virtio_ioport_write(void *opaque
, uint32_t addr
, uint32_t val
)
268 VirtIOPCIProxy
*proxy
= opaque
;
269 VirtIODevice
*vdev
= proxy
->vdev
;
273 case VIRTIO_PCI_GUEST_FEATURES
:
274 /* Guest does not negotiate properly? We have to assume nothing. */
275 if (val
& (1 << VIRTIO_F_BAD_FEATURE
)) {
276 val
= vdev
->bad_features
? vdev
->bad_features(vdev
) : 0;
278 virtio_set_features(vdev
, val
);
280 case VIRTIO_PCI_QUEUE_PFN
:
281 pa
= (hwaddr
)val
<< VIRTIO_PCI_QUEUE_ADDR_SHIFT
;
283 virtio_pci_stop_ioeventfd(proxy
);
284 virtio_reset(proxy
->vdev
);
285 msix_unuse_all_vectors(&proxy
->pci_dev
);
288 virtio_queue_set_addr(vdev
, vdev
->queue_sel
, pa
);
290 case VIRTIO_PCI_QUEUE_SEL
:
291 if (val
< VIRTIO_PCI_QUEUE_MAX
)
292 vdev
->queue_sel
= val
;
294 case VIRTIO_PCI_QUEUE_NOTIFY
:
295 if (val
< VIRTIO_PCI_QUEUE_MAX
) {
296 virtio_queue_notify(vdev
, val
);
299 case VIRTIO_PCI_STATUS
:
300 if (!(val
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
301 virtio_pci_stop_ioeventfd(proxy
);
304 virtio_set_status(vdev
, val
& 0xFF);
306 if (val
& VIRTIO_CONFIG_S_DRIVER_OK
) {
307 virtio_pci_start_ioeventfd(proxy
);
310 if (vdev
->status
== 0) {
311 virtio_reset(proxy
->vdev
);
312 msix_unuse_all_vectors(&proxy
->pci_dev
);
315 /* Linux before 2.6.34 sets the device as OK without enabling
316 the PCI device bus master bit. In this case we need to disable
317 some safety checks. */
318 if ((val
& VIRTIO_CONFIG_S_DRIVER_OK
) &&
319 !(proxy
->pci_dev
.config
[PCI_COMMAND
] & PCI_COMMAND_MASTER
)) {
320 proxy
->flags
|= VIRTIO_PCI_FLAG_BUS_MASTER_BUG
;
323 case VIRTIO_MSI_CONFIG_VECTOR
:
324 msix_vector_unuse(&proxy
->pci_dev
, vdev
->config_vector
);
325 /* Make it possible for guest to discover an error took place. */
326 if (msix_vector_use(&proxy
->pci_dev
, val
) < 0)
327 val
= VIRTIO_NO_VECTOR
;
328 vdev
->config_vector
= val
;
330 case VIRTIO_MSI_QUEUE_VECTOR
:
331 msix_vector_unuse(&proxy
->pci_dev
,
332 virtio_queue_vector(vdev
, vdev
->queue_sel
));
333 /* Make it possible for guest to discover an error took place. */
334 if (msix_vector_use(&proxy
->pci_dev
, val
) < 0)
335 val
= VIRTIO_NO_VECTOR
;
336 virtio_queue_set_vector(vdev
, vdev
->queue_sel
, val
);
339 error_report("%s: unexpected address 0x%x value 0x%x",
340 __func__
, addr
, val
);
345 static uint32_t virtio_ioport_read(VirtIOPCIProxy
*proxy
, uint32_t addr
)
347 VirtIODevice
*vdev
= proxy
->vdev
;
348 uint32_t ret
= 0xFFFFFFFF;
351 case VIRTIO_PCI_HOST_FEATURES
:
352 ret
= proxy
->host_features
;
354 case VIRTIO_PCI_GUEST_FEATURES
:
355 ret
= vdev
->guest_features
;
357 case VIRTIO_PCI_QUEUE_PFN
:
358 ret
= virtio_queue_get_addr(vdev
, vdev
->queue_sel
)
359 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT
;
361 case VIRTIO_PCI_QUEUE_NUM
:
362 ret
= virtio_queue_get_num(vdev
, vdev
->queue_sel
);
364 case VIRTIO_PCI_QUEUE_SEL
:
365 ret
= vdev
->queue_sel
;
367 case VIRTIO_PCI_STATUS
:
371 /* reading from the ISR also clears it. */
374 qemu_set_irq(proxy
->pci_dev
.irq
[0], 0);
376 case VIRTIO_MSI_CONFIG_VECTOR
:
377 ret
= vdev
->config_vector
;
379 case VIRTIO_MSI_QUEUE_VECTOR
:
380 ret
= virtio_queue_vector(vdev
, vdev
->queue_sel
);
389 static uint64_t virtio_pci_config_read(void *opaque
, hwaddr addr
,
392 VirtIOPCIProxy
*proxy
= opaque
;
393 uint32_t config
= VIRTIO_PCI_CONFIG(&proxy
->pci_dev
);
396 return virtio_ioport_read(proxy
, addr
);
402 val
= virtio_config_readb(proxy
->vdev
, addr
);
405 val
= virtio_config_readw(proxy
->vdev
, addr
);
406 if (virtio_is_big_endian()) {
411 val
= virtio_config_readl(proxy
->vdev
, addr
);
412 if (virtio_is_big_endian()) {
420 static void virtio_pci_config_write(void *opaque
, hwaddr addr
,
421 uint64_t val
, unsigned size
)
423 VirtIOPCIProxy
*proxy
= opaque
;
424 uint32_t config
= VIRTIO_PCI_CONFIG(&proxy
->pci_dev
);
426 virtio_ioport_write(proxy
, addr
, val
);
431 * Virtio-PCI is odd. Ioports are LE but config space is target native
436 virtio_config_writeb(proxy
->vdev
, addr
, val
);
439 if (virtio_is_big_endian()) {
442 virtio_config_writew(proxy
->vdev
, addr
, val
);
445 if (virtio_is_big_endian()) {
448 virtio_config_writel(proxy
->vdev
, addr
, val
);
453 static const MemoryRegionOps virtio_pci_config_ops
= {
454 .read
= virtio_pci_config_read
,
455 .write
= virtio_pci_config_write
,
457 .min_access_size
= 1,
458 .max_access_size
= 4,
460 .endianness
= DEVICE_LITTLE_ENDIAN
,
463 static void virtio_write_config(PCIDevice
*pci_dev
, uint32_t address
,
464 uint32_t val
, int len
)
466 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
468 pci_default_write_config(pci_dev
, address
, val
, len
);
470 if (range_covers_byte(address
, len
, PCI_COMMAND
) &&
471 !(pci_dev
->config
[PCI_COMMAND
] & PCI_COMMAND_MASTER
) &&
472 !(proxy
->flags
& VIRTIO_PCI_FLAG_BUS_MASTER_BUG
)) {
473 virtio_pci_stop_ioeventfd(proxy
);
474 virtio_set_status(proxy
->vdev
,
475 proxy
->vdev
->status
& ~VIRTIO_CONFIG_S_DRIVER_OK
);
479 static unsigned virtio_pci_get_features(DeviceState
*d
)
481 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
482 return proxy
->host_features
;
485 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy
*proxy
,
486 unsigned int queue_no
,
490 VirtQueue
*vq
= virtio_get_queue(proxy
->vdev
, queue_no
);
491 EventNotifier
*n
= virtio_queue_get_guest_notifier(vq
);
492 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
495 if (irqfd
->users
== 0) {
496 ret
= kvm_irqchip_add_msi_route(kvm_state
, msg
);
504 ret
= kvm_irqchip_add_irqfd_notifier(kvm_state
, n
, irqfd
->virq
);
506 if (--irqfd
->users
== 0) {
507 kvm_irqchip_release_virq(kvm_state
, irqfd
->virq
);
514 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy
*proxy
,
515 unsigned int queue_no
,
518 VirtQueue
*vq
= virtio_get_queue(proxy
->vdev
, queue_no
);
519 EventNotifier
*n
= virtio_queue_get_guest_notifier(vq
);
520 VirtIOIRQFD
*irqfd
= &proxy
->vector_irqfd
[vector
];
523 ret
= kvm_irqchip_remove_irqfd_notifier(kvm_state
, n
, irqfd
->virq
);
526 if (--irqfd
->users
== 0) {
527 kvm_irqchip_release_virq(kvm_state
, irqfd
->virq
);
531 static int kvm_virtio_pci_vector_use(PCIDevice
*dev
, unsigned vector
,
534 VirtIOPCIProxy
*proxy
= container_of(dev
, VirtIOPCIProxy
, pci_dev
);
535 VirtIODevice
*vdev
= proxy
->vdev
;
538 for (queue_no
= 0; queue_no
< VIRTIO_PCI_QUEUE_MAX
; queue_no
++) {
539 if (!virtio_queue_get_num(vdev
, queue_no
)) {
542 if (virtio_queue_vector(vdev
, queue_no
) != vector
) {
545 ret
= kvm_virtio_pci_vq_vector_use(proxy
, queue_no
, vector
, msg
);
553 while (--queue_no
>= 0) {
554 if (virtio_queue_vector(vdev
, queue_no
) != vector
) {
557 kvm_virtio_pci_vq_vector_release(proxy
, queue_no
, vector
);
562 static void kvm_virtio_pci_vector_release(PCIDevice
*dev
, unsigned vector
)
564 VirtIOPCIProxy
*proxy
= container_of(dev
, VirtIOPCIProxy
, pci_dev
);
565 VirtIODevice
*vdev
= proxy
->vdev
;
568 for (queue_no
= 0; queue_no
< VIRTIO_PCI_QUEUE_MAX
; queue_no
++) {
569 if (!virtio_queue_get_num(vdev
, queue_no
)) {
572 if (virtio_queue_vector(vdev
, queue_no
) != vector
) {
575 kvm_virtio_pci_vq_vector_release(proxy
, queue_no
, vector
);
579 static void kvm_virtio_pci_vector_poll(PCIDevice
*dev
,
580 unsigned int vector_start
,
581 unsigned int vector_end
)
583 VirtIOPCIProxy
*proxy
= container_of(dev
, VirtIOPCIProxy
, pci_dev
);
584 VirtIODevice
*vdev
= proxy
->vdev
;
587 EventNotifier
*notifier
;
590 for (queue_no
= 0; queue_no
< VIRTIO_PCI_QUEUE_MAX
; queue_no
++) {
591 if (!virtio_queue_get_num(vdev
, queue_no
)) {
594 vector
= virtio_queue_vector(vdev
, queue_no
);
595 if (vector
< vector_start
|| vector
>= vector_end
||
596 !msix_is_masked(dev
, vector
)) {
599 vq
= virtio_get_queue(vdev
, queue_no
);
600 notifier
= virtio_queue_get_guest_notifier(vq
);
601 if (event_notifier_test_and_clear(notifier
)) {
602 msix_set_pending(dev
, vector
);
607 static int virtio_pci_set_guest_notifier(DeviceState
*d
, int n
, bool assign
,
610 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
611 VirtQueue
*vq
= virtio_get_queue(proxy
->vdev
, n
);
612 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
615 int r
= event_notifier_init(notifier
, 0);
619 virtio_queue_set_guest_notifier_fd_handler(vq
, true, with_irqfd
);
621 virtio_queue_set_guest_notifier_fd_handler(vq
, false, with_irqfd
);
622 event_notifier_cleanup(notifier
);
628 static bool virtio_pci_query_guest_notifiers(DeviceState
*d
)
630 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
631 return msix_enabled(&proxy
->pci_dev
);
634 static int virtio_pci_set_guest_notifiers(DeviceState
*d
, bool assign
)
636 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
637 VirtIODevice
*vdev
= proxy
->vdev
;
639 bool with_irqfd
= msix_enabled(&proxy
->pci_dev
) &&
640 kvm_msi_via_irqfd_enabled();
642 /* Must unset vector notifier while guest notifier is still assigned */
643 if (proxy
->vector_irqfd
&& !assign
) {
644 msix_unset_vector_notifiers(&proxy
->pci_dev
);
645 g_free(proxy
->vector_irqfd
);
646 proxy
->vector_irqfd
= NULL
;
649 for (n
= 0; n
< VIRTIO_PCI_QUEUE_MAX
; n
++) {
650 if (!virtio_queue_get_num(vdev
, n
)) {
654 r
= virtio_pci_set_guest_notifier(d
, n
, assign
,
655 kvm_msi_via_irqfd_enabled());
661 /* Must set vector notifier after guest notifier has been assigned */
662 if (with_irqfd
&& assign
) {
663 proxy
->vector_irqfd
=
664 g_malloc0(sizeof(*proxy
->vector_irqfd
) *
665 msix_nr_vectors_allocated(&proxy
->pci_dev
));
666 r
= msix_set_vector_notifiers(&proxy
->pci_dev
,
667 kvm_virtio_pci_vector_use
,
668 kvm_virtio_pci_vector_release
,
669 kvm_virtio_pci_vector_poll
);
678 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
681 virtio_pci_set_guest_notifier(d
, n
, !assign
, with_irqfd
);
686 static int virtio_pci_set_host_notifier(DeviceState
*d
, int n
, bool assign
)
688 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
690 /* Stop using ioeventfd for virtqueue kick if the device starts using host
691 * notifiers. This makes it easy to avoid stepping on each others' toes.
693 proxy
->ioeventfd_disabled
= assign
;
695 virtio_pci_stop_ioeventfd(proxy
);
697 /* We don't need to start here: it's not needed because backend
698 * currently only stops on status change away from ok,
699 * reset, vmstop and such. If we do add code to start here,
700 * need to check vmstate, device state etc. */
701 return virtio_pci_set_host_notifier_internal(proxy
, n
, assign
, false);
704 static void virtio_pci_vmstate_change(DeviceState
*d
, bool running
)
706 VirtIOPCIProxy
*proxy
= to_virtio_pci_proxy(d
);
709 /* Try to find out if the guest has bus master disabled, but is
710 in ready state. Then we have a buggy guest OS. */
711 if ((proxy
->vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
) &&
712 !(proxy
->pci_dev
.config
[PCI_COMMAND
] & PCI_COMMAND_MASTER
)) {
713 proxy
->flags
|= VIRTIO_PCI_FLAG_BUS_MASTER_BUG
;
715 virtio_pci_start_ioeventfd(proxy
);
717 virtio_pci_stop_ioeventfd(proxy
);
721 static const VirtIOBindings virtio_pci_bindings
= {
722 .notify
= virtio_pci_notify
,
723 .save_config
= virtio_pci_save_config
,
724 .load_config
= virtio_pci_load_config
,
725 .save_queue
= virtio_pci_save_queue
,
726 .load_queue
= virtio_pci_load_queue
,
727 .get_features
= virtio_pci_get_features
,
728 .query_guest_notifiers
= virtio_pci_query_guest_notifiers
,
729 .set_host_notifier
= virtio_pci_set_host_notifier
,
730 .set_guest_notifiers
= virtio_pci_set_guest_notifiers
,
731 .vmstate_change
= virtio_pci_vmstate_change
,
734 void virtio_init_pci(VirtIOPCIProxy
*proxy
, VirtIODevice
*vdev
)
741 config
= proxy
->pci_dev
.config
;
743 if (proxy
->class_code
) {
744 pci_config_set_class(config
, proxy
->class_code
);
746 pci_set_word(config
+ PCI_SUBSYSTEM_VENDOR_ID
,
747 pci_get_word(config
+ PCI_VENDOR_ID
));
748 pci_set_word(config
+ PCI_SUBSYSTEM_ID
, vdev
->device_id
);
749 config
[PCI_INTERRUPT_PIN
] = 1;
751 if (vdev
->nvectors
&&
752 msix_init_exclusive_bar(&proxy
->pci_dev
, vdev
->nvectors
, 1)) {
756 proxy
->pci_dev
.config_write
= virtio_write_config
;
758 size
= VIRTIO_PCI_REGION_SIZE(&proxy
->pci_dev
) + vdev
->config_len
;
760 size
= 1 << qemu_fls(size
);
762 memory_region_init_io(&proxy
->bar
, &virtio_pci_config_ops
, proxy
,
764 pci_register_bar(&proxy
->pci_dev
, 0, PCI_BASE_ADDRESS_SPACE_IO
,
767 if (!kvm_has_many_ioeventfds()) {
768 proxy
->flags
&= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD
;
771 virtio_bind_device(vdev
, &virtio_pci_bindings
, DEVICE(proxy
));
772 proxy
->host_features
|= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY
;
773 proxy
->host_features
|= 0x1 << VIRTIO_F_BAD_FEATURE
;
774 proxy
->host_features
= vdev
->get_features(vdev
, proxy
->host_features
);
777 static int virtio_blk_init_pci(PCIDevice
*pci_dev
)
779 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
782 if (proxy
->class_code
!= PCI_CLASS_STORAGE_SCSI
&&
783 proxy
->class_code
!= PCI_CLASS_STORAGE_OTHER
)
784 proxy
->class_code
= PCI_CLASS_STORAGE_SCSI
;
786 vdev
= virtio_blk_init(&pci_dev
->qdev
, &proxy
->blk
);
790 vdev
->nvectors
= proxy
->nvectors
;
791 virtio_init_pci(proxy
, vdev
);
792 /* make the actual value visible */
793 proxy
->nvectors
= vdev
->nvectors
;
797 static void virtio_exit_pci(PCIDevice
*pci_dev
)
799 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
801 memory_region_destroy(&proxy
->bar
);
802 msix_uninit_exclusive_bar(pci_dev
);
805 static void virtio_blk_exit_pci(PCIDevice
*pci_dev
)
807 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
809 virtio_pci_stop_ioeventfd(proxy
);
810 virtio_blk_exit(proxy
->vdev
);
811 virtio_exit_pci(pci_dev
);
814 static int virtio_serial_init_pci(PCIDevice
*pci_dev
)
816 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
819 if (proxy
->class_code
!= PCI_CLASS_COMMUNICATION_OTHER
&&
820 proxy
->class_code
!= PCI_CLASS_DISPLAY_OTHER
&& /* qemu 0.10 */
821 proxy
->class_code
!= PCI_CLASS_OTHERS
) /* qemu-kvm */
822 proxy
->class_code
= PCI_CLASS_COMMUNICATION_OTHER
;
824 vdev
= virtio_serial_init(&pci_dev
->qdev
, &proxy
->serial
);
828 vdev
->nvectors
= proxy
->nvectors
== DEV_NVECTORS_UNSPECIFIED
829 ? proxy
->serial
.max_virtserial_ports
+ 1
831 virtio_init_pci(proxy
, vdev
);
832 proxy
->nvectors
= vdev
->nvectors
;
836 static void virtio_serial_exit_pci(PCIDevice
*pci_dev
)
838 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
840 virtio_pci_stop_ioeventfd(proxy
);
841 virtio_serial_exit(proxy
->vdev
);
842 virtio_exit_pci(pci_dev
);
845 static int virtio_net_init_pci(PCIDevice
*pci_dev
)
847 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
850 vdev
= virtio_net_init(&pci_dev
->qdev
, &proxy
->nic
, &proxy
->net
);
852 vdev
->nvectors
= proxy
->nvectors
;
853 virtio_init_pci(proxy
, vdev
);
855 /* make the actual value visible */
856 proxy
->nvectors
= vdev
->nvectors
;
860 static void virtio_net_exit_pci(PCIDevice
*pci_dev
)
862 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
864 virtio_pci_stop_ioeventfd(proxy
);
865 virtio_net_exit(proxy
->vdev
);
866 virtio_exit_pci(pci_dev
);
869 static int virtio_balloon_init_pci(PCIDevice
*pci_dev
)
871 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
874 if (proxy
->class_code
!= PCI_CLASS_OTHERS
&&
875 proxy
->class_code
!= PCI_CLASS_MEMORY_RAM
) { /* qemu < 1.1 */
876 proxy
->class_code
= PCI_CLASS_OTHERS
;
879 vdev
= virtio_balloon_init(&pci_dev
->qdev
);
883 virtio_init_pci(proxy
, vdev
);
887 static void virtio_balloon_exit_pci(PCIDevice
*pci_dev
)
889 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
891 virtio_pci_stop_ioeventfd(proxy
);
892 virtio_balloon_exit(proxy
->vdev
);
893 virtio_exit_pci(pci_dev
);
896 static int virtio_rng_init_pci(PCIDevice
*pci_dev
)
898 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
901 if (proxy
->rng
.rng
== NULL
) {
902 proxy
->rng
.default_backend
= RNG_RANDOM(object_new(TYPE_RNG_RANDOM
));
904 object_property_add_child(OBJECT(pci_dev
),
906 OBJECT(proxy
->rng
.default_backend
),
909 object_property_set_link(OBJECT(pci_dev
),
910 OBJECT(proxy
->rng
.default_backend
),
914 vdev
= virtio_rng_init(&pci_dev
->qdev
, &proxy
->rng
);
918 virtio_init_pci(proxy
, vdev
);
922 static void virtio_rng_exit_pci(PCIDevice
*pci_dev
)
924 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
926 virtio_pci_stop_ioeventfd(proxy
);
927 virtio_rng_exit(proxy
->vdev
);
928 virtio_exit_pci(pci_dev
);
931 static Property virtio_blk_properties
[] = {
932 DEFINE_PROP_HEX32("class", VirtIOPCIProxy
, class_code
, 0),
933 DEFINE_BLOCK_PROPERTIES(VirtIOPCIProxy
, blk
.conf
),
934 DEFINE_BLOCK_CHS_PROPERTIES(VirtIOPCIProxy
, blk
.conf
),
935 DEFINE_PROP_STRING("serial", VirtIOPCIProxy
, blk
.serial
),
937 DEFINE_PROP_BIT("scsi", VirtIOPCIProxy
, blk
.scsi
, 0, true),
939 DEFINE_PROP_BIT("config-wce", VirtIOPCIProxy
, blk
.config_wce
, 0, true),
940 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy
, flags
, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT
, true),
941 #ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
942 DEFINE_PROP_BIT("x-data-plane", VirtIOPCIProxy
, blk
.data_plane
, 0, false),
944 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy
, nvectors
, 2),
945 DEFINE_VIRTIO_BLK_FEATURES(VirtIOPCIProxy
, host_features
),
946 DEFINE_PROP_END_OF_LIST(),
949 static void virtio_blk_class_init(ObjectClass
*klass
, void *data
)
951 DeviceClass
*dc
= DEVICE_CLASS(klass
);
952 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
954 k
->init
= virtio_blk_init_pci
;
955 k
->exit
= virtio_blk_exit_pci
;
956 k
->vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
;
957 k
->device_id
= PCI_DEVICE_ID_VIRTIO_BLOCK
;
958 k
->revision
= VIRTIO_PCI_ABI_VERSION
;
959 k
->class_id
= PCI_CLASS_STORAGE_SCSI
;
960 dc
->reset
= virtio_pci_reset
;
961 dc
->props
= virtio_blk_properties
;
964 static TypeInfo virtio_blk_info
= {
965 .name
= "virtio-blk-pci",
966 .parent
= TYPE_PCI_DEVICE
,
967 .instance_size
= sizeof(VirtIOPCIProxy
),
968 .class_init
= virtio_blk_class_init
,
971 static Property virtio_net_properties
[] = {
972 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy
, flags
, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT
, false),
973 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy
, nvectors
, 3),
974 DEFINE_VIRTIO_NET_FEATURES(VirtIOPCIProxy
, host_features
),
975 DEFINE_NIC_PROPERTIES(VirtIOPCIProxy
, nic
),
976 DEFINE_PROP_UINT32("x-txtimer", VirtIOPCIProxy
, net
.txtimer
, TX_TIMER_INTERVAL
),
977 DEFINE_PROP_INT32("x-txburst", VirtIOPCIProxy
, net
.txburst
, TX_BURST
),
978 DEFINE_PROP_STRING("tx", VirtIOPCIProxy
, net
.tx
),
979 DEFINE_PROP_END_OF_LIST(),
982 static void virtio_net_class_init(ObjectClass
*klass
, void *data
)
984 DeviceClass
*dc
= DEVICE_CLASS(klass
);
985 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
987 k
->init
= virtio_net_init_pci
;
988 k
->exit
= virtio_net_exit_pci
;
989 k
->romfile
= "pxe-virtio.rom";
990 k
->vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
;
991 k
->device_id
= PCI_DEVICE_ID_VIRTIO_NET
;
992 k
->revision
= VIRTIO_PCI_ABI_VERSION
;
993 k
->class_id
= PCI_CLASS_NETWORK_ETHERNET
;
994 dc
->reset
= virtio_pci_reset
;
995 dc
->props
= virtio_net_properties
;
998 static TypeInfo virtio_net_info
= {
999 .name
= "virtio-net-pci",
1000 .parent
= TYPE_PCI_DEVICE
,
1001 .instance_size
= sizeof(VirtIOPCIProxy
),
1002 .class_init
= virtio_net_class_init
,
1005 static Property virtio_serial_properties
[] = {
1006 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy
, flags
, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT
, true),
1007 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy
, nvectors
, DEV_NVECTORS_UNSPECIFIED
),
1008 DEFINE_PROP_HEX32("class", VirtIOPCIProxy
, class_code
, 0),
1009 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy
, host_features
),
1010 DEFINE_PROP_UINT32("max_ports", VirtIOPCIProxy
, serial
.max_virtserial_ports
, 31),
1011 DEFINE_PROP_END_OF_LIST(),
1014 static void virtio_serial_class_init(ObjectClass
*klass
, void *data
)
1016 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1017 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
1019 k
->init
= virtio_serial_init_pci
;
1020 k
->exit
= virtio_serial_exit_pci
;
1021 k
->vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
;
1022 k
->device_id
= PCI_DEVICE_ID_VIRTIO_CONSOLE
;
1023 k
->revision
= VIRTIO_PCI_ABI_VERSION
;
1024 k
->class_id
= PCI_CLASS_COMMUNICATION_OTHER
;
1025 dc
->reset
= virtio_pci_reset
;
1026 dc
->props
= virtio_serial_properties
;
1029 static TypeInfo virtio_serial_info
= {
1030 .name
= "virtio-serial-pci",
1031 .parent
= TYPE_PCI_DEVICE
,
1032 .instance_size
= sizeof(VirtIOPCIProxy
),
1033 .class_init
= virtio_serial_class_init
,
1036 static Property virtio_balloon_properties
[] = {
1037 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy
, host_features
),
1038 DEFINE_PROP_HEX32("class", VirtIOPCIProxy
, class_code
, 0),
1039 DEFINE_PROP_END_OF_LIST(),
1042 static void virtio_balloon_class_init(ObjectClass
*klass
, void *data
)
1044 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1045 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
1047 k
->init
= virtio_balloon_init_pci
;
1048 k
->exit
= virtio_balloon_exit_pci
;
1049 k
->vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
;
1050 k
->device_id
= PCI_DEVICE_ID_VIRTIO_BALLOON
;
1051 k
->revision
= VIRTIO_PCI_ABI_VERSION
;
1052 k
->class_id
= PCI_CLASS_OTHERS
;
1053 dc
->reset
= virtio_pci_reset
;
1054 dc
->props
= virtio_balloon_properties
;
1057 static TypeInfo virtio_balloon_info
= {
1058 .name
= "virtio-balloon-pci",
1059 .parent
= TYPE_PCI_DEVICE
,
1060 .instance_size
= sizeof(VirtIOPCIProxy
),
1061 .class_init
= virtio_balloon_class_init
,
1064 static void virtio_rng_initfn(Object
*obj
)
1066 PCIDevice
*pci_dev
= PCI_DEVICE(obj
);
1067 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
1069 object_property_add_link(obj
, "rng", TYPE_RNG_BACKEND
,
1070 (Object
**)&proxy
->rng
.rng
, NULL
);
1073 static Property virtio_rng_properties
[] = {
1074 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy
, host_features
),
1075 /* Set a default rate limit of 2^47 bytes per minute or roughly 2TB/s. If
1076 you have an entropy source capable of generating more entropy than this
1077 and you can pass it through via virtio-rng, then hats off to you. Until
1078 then, this is unlimited for all practical purposes.
1080 DEFINE_PROP_UINT64("max-bytes", VirtIOPCIProxy
, rng
.max_bytes
, INT64_MAX
),
1081 DEFINE_PROP_UINT32("period", VirtIOPCIProxy
, rng
.period_ms
, 1 << 16),
1082 DEFINE_PROP_END_OF_LIST(),
1085 static void virtio_rng_class_init(ObjectClass
*klass
, void *data
)
1087 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1088 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
1090 k
->init
= virtio_rng_init_pci
;
1091 k
->exit
= virtio_rng_exit_pci
;
1092 k
->vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
;
1093 k
->device_id
= PCI_DEVICE_ID_VIRTIO_RNG
;
1094 k
->revision
= VIRTIO_PCI_ABI_VERSION
;
1095 k
->class_id
= PCI_CLASS_OTHERS
;
1096 dc
->reset
= virtio_pci_reset
;
1097 dc
->props
= virtio_rng_properties
;
1100 static TypeInfo virtio_rng_info
= {
1101 .name
= "virtio-rng-pci",
1102 .parent
= TYPE_PCI_DEVICE
,
1103 .instance_size
= sizeof(VirtIOPCIProxy
),
1104 .instance_init
= virtio_rng_initfn
,
1105 .class_init
= virtio_rng_class_init
,
1108 static int virtio_scsi_init_pci(PCIDevice
*pci_dev
)
1110 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
1113 vdev
= virtio_scsi_init(&pci_dev
->qdev
, &proxy
->scsi
);
1118 vdev
->nvectors
= proxy
->nvectors
== DEV_NVECTORS_UNSPECIFIED
1119 ? proxy
->scsi
.num_queues
+ 3
1121 virtio_init_pci(proxy
, vdev
);
1123 /* make the actual value visible */
1124 proxy
->nvectors
= vdev
->nvectors
;
1128 static void virtio_scsi_exit_pci(PCIDevice
*pci_dev
)
1130 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
1132 virtio_scsi_exit(proxy
->vdev
);
1133 virtio_exit_pci(pci_dev
);
1136 static Property virtio_scsi_properties
[] = {
1137 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy
, flags
, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT
, true),
1138 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy
, nvectors
, DEV_NVECTORS_UNSPECIFIED
),
1139 DEFINE_VIRTIO_SCSI_PROPERTIES(VirtIOPCIProxy
, host_features
, scsi
),
1140 DEFINE_PROP_END_OF_LIST(),
1143 static void virtio_scsi_class_init(ObjectClass
*klass
, void *data
)
1145 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1146 PCIDeviceClass
*k
= PCI_DEVICE_CLASS(klass
);
1148 k
->init
= virtio_scsi_init_pci
;
1149 k
->exit
= virtio_scsi_exit_pci
;
1150 k
->vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
;
1151 k
->device_id
= PCI_DEVICE_ID_VIRTIO_SCSI
;
1153 k
->class_id
= PCI_CLASS_STORAGE_SCSI
;
1154 dc
->reset
= virtio_pci_reset
;
1155 dc
->props
= virtio_scsi_properties
;
1158 static TypeInfo virtio_scsi_info
= {
1159 .name
= "virtio-scsi-pci",
1160 .parent
= TYPE_PCI_DEVICE
,
1161 .instance_size
= sizeof(VirtIOPCIProxy
),
1162 .class_init
= virtio_scsi_class_init
,
1165 static void virtio_pci_register_types(void)
1167 type_register_static(&virtio_blk_info
);
1168 type_register_static(&virtio_net_info
);
1169 type_register_static(&virtio_serial_info
);
1170 type_register_static(&virtio_balloon_info
);
1171 type_register_static(&virtio_scsi_info
);
1172 type_register_static(&virtio_rng_info
);
1175 type_init(virtio_pci_register_types
)