4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2009 CodeSourcery
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paul Brook <paul@codesourcery.com>
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
19 #include "virtio-blk.h"
20 #include "virtio-net.h"
21 #include "virtio-serial.h"
23 #include "qemu-error.h"
29 #include "virtio-pci.h"
31 /* from Linux's linux/virtio_pci.h */
33 /* A 32-bit r/o bitmask of the features supported by the host */
34 #define VIRTIO_PCI_HOST_FEATURES 0
36 /* A 32-bit r/w bitmask of features activated by the guest */
37 #define VIRTIO_PCI_GUEST_FEATURES 4
39 /* A 32-bit r/w PFN for the currently selected queue */
40 #define VIRTIO_PCI_QUEUE_PFN 8
42 /* A 16-bit r/o queue size for the currently selected queue */
43 #define VIRTIO_PCI_QUEUE_NUM 12
45 /* A 16-bit r/w queue selector */
46 #define VIRTIO_PCI_QUEUE_SEL 14
48 /* A 16-bit r/w queue notifier */
49 #define VIRTIO_PCI_QUEUE_NOTIFY 16
51 /* An 8-bit device status register. */
52 #define VIRTIO_PCI_STATUS 18
54 /* An 8-bit r/o interrupt status register. Reading the value will return the
55 * current contents of the ISR and will also clear it. This is effectively
56 * a read-and-acknowledge. */
57 #define VIRTIO_PCI_ISR 19
59 /* MSI-X registers: only enabled if MSI-X is enabled. */
60 /* A 16-bit vector for configuration changes. */
61 #define VIRTIO_MSI_CONFIG_VECTOR 20
62 /* A 16-bit vector for selected queue notifications. */
63 #define VIRTIO_MSI_QUEUE_VECTOR 22
65 /* Config space size */
66 #define VIRTIO_PCI_CONFIG_NOMSI 20
67 #define VIRTIO_PCI_CONFIG_MSI 24
68 #define VIRTIO_PCI_REGION_SIZE(dev) (msix_present(dev) ? \
69 VIRTIO_PCI_CONFIG_MSI : \
70 VIRTIO_PCI_CONFIG_NOMSI)
72 /* The remaining space is defined by each driver as the per-driver
73 * configuration space */
74 #define VIRTIO_PCI_CONFIG(dev) (msix_enabled(dev) ? \
75 VIRTIO_PCI_CONFIG_MSI : \
76 VIRTIO_PCI_CONFIG_NOMSI)
78 /* Virtio ABI version, if we increment this, we break the guest driver. */
79 #define VIRTIO_PCI_ABI_VERSION 0
81 /* How many bits to shift physical queue address written to QUEUE_PFN.
82 * 12 is historical, and due to x86 page size. */
83 #define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
85 /* Flags track per-device state like workarounds for quirks in older guests. */
86 #define VIRTIO_PCI_FLAG_BUS_MASTER_BUG (1 << 0)
88 /* Performance improves when virtqueue kick processing is decoupled from the
89 * vcpu thread using ioeventfd for some devices. */
90 #define VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT 1
91 #define VIRTIO_PCI_FLAG_USE_IOEVENTFD (1 << VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT)
93 /* QEMU doesn't strictly need write barriers since everything runs in
94 * lock-step. We'll leave the calls to wmb() in though to make it obvious for
95 * KVM or if kqemu gets SMP support.
97 #define wmb() do { } while (0)
101 static void virtio_pci_notify(void *opaque
, uint16_t vector
)
103 VirtIOPCIProxy
*proxy
= opaque
;
104 if (msix_enabled(&proxy
->pci_dev
))
105 msix_notify(&proxy
->pci_dev
, vector
);
107 qemu_set_irq(proxy
->pci_dev
.irq
[0], proxy
->vdev
->isr
& 1);
110 static void virtio_pci_save_config(void * opaque
, QEMUFile
*f
)
112 VirtIOPCIProxy
*proxy
= opaque
;
113 pci_device_save(&proxy
->pci_dev
, f
);
114 msix_save(&proxy
->pci_dev
, f
);
115 if (msix_present(&proxy
->pci_dev
))
116 qemu_put_be16(f
, proxy
->vdev
->config_vector
);
119 static void virtio_pci_save_queue(void * opaque
, int n
, QEMUFile
*f
)
121 VirtIOPCIProxy
*proxy
= opaque
;
122 if (msix_present(&proxy
->pci_dev
))
123 qemu_put_be16(f
, virtio_queue_vector(proxy
->vdev
, n
));
126 static int virtio_pci_load_config(void * opaque
, QEMUFile
*f
)
128 VirtIOPCIProxy
*proxy
= opaque
;
130 ret
= pci_device_load(&proxy
->pci_dev
, f
);
134 msix_load(&proxy
->pci_dev
, f
);
135 if (msix_present(&proxy
->pci_dev
)) {
136 qemu_get_be16s(f
, &proxy
->vdev
->config_vector
);
138 proxy
->vdev
->config_vector
= VIRTIO_NO_VECTOR
;
140 if (proxy
->vdev
->config_vector
!= VIRTIO_NO_VECTOR
) {
141 return msix_vector_use(&proxy
->pci_dev
, proxy
->vdev
->config_vector
);
146 static int virtio_pci_load_queue(void * opaque
, int n
, QEMUFile
*f
)
148 VirtIOPCIProxy
*proxy
= opaque
;
150 if (msix_present(&proxy
->pci_dev
)) {
151 qemu_get_be16s(f
, &vector
);
153 vector
= VIRTIO_NO_VECTOR
;
155 virtio_queue_set_vector(proxy
->vdev
, n
, vector
);
156 if (vector
!= VIRTIO_NO_VECTOR
) {
157 return msix_vector_use(&proxy
->pci_dev
, vector
);
162 static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy
*proxy
,
165 VirtQueue
*vq
= virtio_get_queue(proxy
->vdev
, n
);
166 EventNotifier
*notifier
= virtio_queue_get_host_notifier(vq
);
169 r
= event_notifier_init(notifier
, 1);
171 error_report("%s: unable to init event notifier: %d",
175 r
= kvm_set_ioeventfd_pio_word(event_notifier_get_fd(notifier
),
176 proxy
->addr
+ VIRTIO_PCI_QUEUE_NOTIFY
,
179 error_report("%s: unable to map ioeventfd: %d",
181 event_notifier_cleanup(notifier
);
184 r
= kvm_set_ioeventfd_pio_word(event_notifier_get_fd(notifier
),
185 proxy
->addr
+ VIRTIO_PCI_QUEUE_NOTIFY
,
188 error_report("%s: unable to unmap ioeventfd: %d",
193 /* Handle the race condition where the guest kicked and we deassigned
194 * before we got around to handling the kick.
196 if (event_notifier_test_and_clear(notifier
)) {
197 virtio_queue_notify_vq(vq
);
200 event_notifier_cleanup(notifier
);
205 static void virtio_pci_host_notifier_read(void *opaque
)
207 VirtQueue
*vq
= opaque
;
208 EventNotifier
*n
= virtio_queue_get_host_notifier(vq
);
209 if (event_notifier_test_and_clear(n
)) {
210 virtio_queue_notify_vq(vq
);
214 static void virtio_pci_set_host_notifier_fd_handler(VirtIOPCIProxy
*proxy
,
217 VirtQueue
*vq
= virtio_get_queue(proxy
->vdev
, n
);
218 EventNotifier
*notifier
= virtio_queue_get_host_notifier(vq
);
220 qemu_set_fd_handler(event_notifier_get_fd(notifier
),
221 virtio_pci_host_notifier_read
, NULL
, vq
);
223 qemu_set_fd_handler(event_notifier_get_fd(notifier
),
228 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy
*proxy
)
232 if (!(proxy
->flags
& VIRTIO_PCI_FLAG_USE_IOEVENTFD
) ||
233 proxy
->ioeventfd_disabled
||
234 proxy
->ioeventfd_started
) {
238 for (n
= 0; n
< VIRTIO_PCI_QUEUE_MAX
; n
++) {
239 if (!virtio_queue_get_num(proxy
->vdev
, n
)) {
243 r
= virtio_pci_set_host_notifier_internal(proxy
, n
, true);
248 virtio_pci_set_host_notifier_fd_handler(proxy
, n
, true);
250 proxy
->ioeventfd_started
= true;
255 if (!virtio_queue_get_num(proxy
->vdev
, n
)) {
259 virtio_pci_set_host_notifier_fd_handler(proxy
, n
, false);
260 r
= virtio_pci_set_host_notifier_internal(proxy
, n
, false);
263 proxy
->ioeventfd_started
= false;
264 error_report("%s: failed. Fallback to a userspace (slower).", __func__
);
267 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy
*proxy
)
272 if (!proxy
->ioeventfd_started
) {
276 for (n
= 0; n
< VIRTIO_PCI_QUEUE_MAX
; n
++) {
277 if (!virtio_queue_get_num(proxy
->vdev
, n
)) {
281 virtio_pci_set_host_notifier_fd_handler(proxy
, n
, false);
282 r
= virtio_pci_set_host_notifier_internal(proxy
, n
, false);
285 proxy
->ioeventfd_started
= false;
288 static void virtio_pci_reset(DeviceState
*d
)
290 VirtIOPCIProxy
*proxy
= container_of(d
, VirtIOPCIProxy
, pci_dev
.qdev
);
291 virtio_pci_stop_ioeventfd(proxy
);
292 virtio_reset(proxy
->vdev
);
293 msix_reset(&proxy
->pci_dev
);
294 proxy
->flags
&= ~VIRTIO_PCI_FLAG_BUS_MASTER_BUG
;
297 static void virtio_ioport_write(void *opaque
, uint32_t addr
, uint32_t val
)
299 VirtIOPCIProxy
*proxy
= opaque
;
300 VirtIODevice
*vdev
= proxy
->vdev
;
301 target_phys_addr_t pa
;
304 case VIRTIO_PCI_GUEST_FEATURES
:
305 /* Guest does not negotiate properly? We have to assume nothing. */
306 if (val
& (1 << VIRTIO_F_BAD_FEATURE
)) {
307 if (vdev
->bad_features
)
308 val
= proxy
->host_features
& vdev
->bad_features(vdev
);
312 if (vdev
->set_features
)
313 vdev
->set_features(vdev
, val
);
314 vdev
->guest_features
= val
;
316 case VIRTIO_PCI_QUEUE_PFN
:
317 pa
= (target_phys_addr_t
)val
<< VIRTIO_PCI_QUEUE_ADDR_SHIFT
;
319 virtio_pci_stop_ioeventfd(proxy
);
320 virtio_reset(proxy
->vdev
);
321 msix_unuse_all_vectors(&proxy
->pci_dev
);
324 virtio_queue_set_addr(vdev
, vdev
->queue_sel
, pa
);
326 case VIRTIO_PCI_QUEUE_SEL
:
327 if (val
< VIRTIO_PCI_QUEUE_MAX
)
328 vdev
->queue_sel
= val
;
330 case VIRTIO_PCI_QUEUE_NOTIFY
:
331 if (val
< VIRTIO_PCI_QUEUE_MAX
) {
332 virtio_queue_notify(vdev
, val
);
335 case VIRTIO_PCI_STATUS
:
336 if (!(val
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
337 virtio_pci_stop_ioeventfd(proxy
);
340 virtio_set_status(vdev
, val
& 0xFF);
342 if (val
& VIRTIO_CONFIG_S_DRIVER_OK
) {
343 virtio_pci_start_ioeventfd(proxy
);
346 if (vdev
->status
== 0) {
347 virtio_reset(proxy
->vdev
);
348 msix_unuse_all_vectors(&proxy
->pci_dev
);
351 /* Linux before 2.6.34 sets the device as OK without enabling
352 the PCI device bus master bit. In this case we need to disable
353 some safety checks. */
354 if ((val
& VIRTIO_CONFIG_S_DRIVER_OK
) &&
355 !(proxy
->pci_dev
.config
[PCI_COMMAND
] & PCI_COMMAND_MASTER
)) {
356 proxy
->flags
|= VIRTIO_PCI_FLAG_BUS_MASTER_BUG
;
359 case VIRTIO_MSI_CONFIG_VECTOR
:
360 msix_vector_unuse(&proxy
->pci_dev
, vdev
->config_vector
);
361 /* Make it possible for guest to discover an error took place. */
362 if (msix_vector_use(&proxy
->pci_dev
, val
) < 0)
363 val
= VIRTIO_NO_VECTOR
;
364 vdev
->config_vector
= val
;
366 case VIRTIO_MSI_QUEUE_VECTOR
:
367 msix_vector_unuse(&proxy
->pci_dev
,
368 virtio_queue_vector(vdev
, vdev
->queue_sel
));
369 /* Make it possible for guest to discover an error took place. */
370 if (msix_vector_use(&proxy
->pci_dev
, val
) < 0)
371 val
= VIRTIO_NO_VECTOR
;
372 virtio_queue_set_vector(vdev
, vdev
->queue_sel
, val
);
375 error_report("%s: unexpected address 0x%x value 0x%x",
376 __func__
, addr
, val
);
381 static uint32_t virtio_ioport_read(VirtIOPCIProxy
*proxy
, uint32_t addr
)
383 VirtIODevice
*vdev
= proxy
->vdev
;
384 uint32_t ret
= 0xFFFFFFFF;
387 case VIRTIO_PCI_HOST_FEATURES
:
388 ret
= proxy
->host_features
;
390 case VIRTIO_PCI_GUEST_FEATURES
:
391 ret
= vdev
->guest_features
;
393 case VIRTIO_PCI_QUEUE_PFN
:
394 ret
= virtio_queue_get_addr(vdev
, vdev
->queue_sel
)
395 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT
;
397 case VIRTIO_PCI_QUEUE_NUM
:
398 ret
= virtio_queue_get_num(vdev
, vdev
->queue_sel
);
400 case VIRTIO_PCI_QUEUE_SEL
:
401 ret
= vdev
->queue_sel
;
403 case VIRTIO_PCI_STATUS
:
407 /* reading from the ISR also clears it. */
410 qemu_set_irq(proxy
->pci_dev
.irq
[0], 0);
412 case VIRTIO_MSI_CONFIG_VECTOR
:
413 ret
= vdev
->config_vector
;
415 case VIRTIO_MSI_QUEUE_VECTOR
:
416 ret
= virtio_queue_vector(vdev
, vdev
->queue_sel
);
425 static uint32_t virtio_pci_config_readb(void *opaque
, uint32_t addr
)
427 VirtIOPCIProxy
*proxy
= opaque
;
428 uint32_t config
= VIRTIO_PCI_CONFIG(&proxy
->pci_dev
);
431 return virtio_ioport_read(proxy
, addr
);
433 return virtio_config_readb(proxy
->vdev
, addr
);
436 static uint32_t virtio_pci_config_readw(void *opaque
, uint32_t addr
)
438 VirtIOPCIProxy
*proxy
= opaque
;
439 uint32_t config
= VIRTIO_PCI_CONFIG(&proxy
->pci_dev
);
442 return virtio_ioport_read(proxy
, addr
);
444 return virtio_config_readw(proxy
->vdev
, addr
);
447 static uint32_t virtio_pci_config_readl(void *opaque
, uint32_t addr
)
449 VirtIOPCIProxy
*proxy
= opaque
;
450 uint32_t config
= VIRTIO_PCI_CONFIG(&proxy
->pci_dev
);
453 return virtio_ioport_read(proxy
, addr
);
455 return virtio_config_readl(proxy
->vdev
, addr
);
458 static void virtio_pci_config_writeb(void *opaque
, uint32_t addr
, uint32_t val
)
460 VirtIOPCIProxy
*proxy
= opaque
;
461 uint32_t config
= VIRTIO_PCI_CONFIG(&proxy
->pci_dev
);
464 virtio_ioport_write(proxy
, addr
, val
);
468 virtio_config_writeb(proxy
->vdev
, addr
, val
);
471 static void virtio_pci_config_writew(void *opaque
, uint32_t addr
, uint32_t val
)
473 VirtIOPCIProxy
*proxy
= opaque
;
474 uint32_t config
= VIRTIO_PCI_CONFIG(&proxy
->pci_dev
);
477 virtio_ioport_write(proxy
, addr
, val
);
481 virtio_config_writew(proxy
->vdev
, addr
, val
);
484 static void virtio_pci_config_writel(void *opaque
, uint32_t addr
, uint32_t val
)
486 VirtIOPCIProxy
*proxy
= opaque
;
487 uint32_t config
= VIRTIO_PCI_CONFIG(&proxy
->pci_dev
);
490 virtio_ioport_write(proxy
, addr
, val
);
494 virtio_config_writel(proxy
->vdev
, addr
, val
);
497 static void virtio_map(PCIDevice
*pci_dev
, int region_num
,
498 pcibus_t addr
, pcibus_t size
, int type
)
500 VirtIOPCIProxy
*proxy
= container_of(pci_dev
, VirtIOPCIProxy
, pci_dev
);
501 VirtIODevice
*vdev
= proxy
->vdev
;
502 unsigned config_len
= VIRTIO_PCI_REGION_SIZE(pci_dev
) + vdev
->config_len
;
506 register_ioport_write(addr
, config_len
, 1, virtio_pci_config_writeb
, proxy
);
507 register_ioport_write(addr
, config_len
, 2, virtio_pci_config_writew
, proxy
);
508 register_ioport_write(addr
, config_len
, 4, virtio_pci_config_writel
, proxy
);
509 register_ioport_read(addr
, config_len
, 1, virtio_pci_config_readb
, proxy
);
510 register_ioport_read(addr
, config_len
, 2, virtio_pci_config_readw
, proxy
);
511 register_ioport_read(addr
, config_len
, 4, virtio_pci_config_readl
, proxy
);
513 if (vdev
->config_len
)
514 vdev
->get_config(vdev
, vdev
->config
);
517 static void virtio_write_config(PCIDevice
*pci_dev
, uint32_t address
,
518 uint32_t val
, int len
)
520 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
522 if (PCI_COMMAND
== address
) {
523 if (!(val
& PCI_COMMAND_MASTER
)) {
524 if (!(proxy
->flags
& VIRTIO_PCI_FLAG_BUS_MASTER_BUG
)) {
525 virtio_pci_stop_ioeventfd(proxy
);
526 virtio_set_status(proxy
->vdev
,
527 proxy
->vdev
->status
& ~VIRTIO_CONFIG_S_DRIVER_OK
);
532 pci_default_write_config(pci_dev
, address
, val
, len
);
533 msix_write_config(pci_dev
, address
, val
, len
);
536 static unsigned virtio_pci_get_features(void *opaque
)
538 VirtIOPCIProxy
*proxy
= opaque
;
539 return proxy
->host_features
;
542 static void virtio_pci_guest_notifier_read(void *opaque
)
544 VirtQueue
*vq
= opaque
;
545 EventNotifier
*n
= virtio_queue_get_guest_notifier(vq
);
546 if (event_notifier_test_and_clear(n
)) {
551 static int virtio_pci_mask_vq(PCIDevice
*dev
, unsigned vector
,
552 VirtQueue
*vq
, int masked
)
554 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
555 int r
= kvm_set_irqfd(dev
->msix_irq_entries
[vector
].gsi
,
556 event_notifier_get_fd(notifier
),
559 return (r
== -ENOSYS
) ? 0 : r
;
562 qemu_set_fd_handler(event_notifier_get_fd(notifier
),
563 virtio_pci_guest_notifier_read
, NULL
, vq
);
565 qemu_set_fd_handler(event_notifier_get_fd(notifier
),
571 static int virtio_pci_mask_notifier(PCIDevice
*dev
, unsigned vector
,
574 VirtIOPCIProxy
*proxy
= container_of(dev
, VirtIOPCIProxy
, pci_dev
);
575 VirtIODevice
*vdev
= proxy
->vdev
;
578 for (n
= 0; n
< VIRTIO_PCI_QUEUE_MAX
; n
++) {
579 if (!virtio_queue_get_num(vdev
, n
)) {
582 if (virtio_queue_vector(vdev
, n
) != vector
) {
585 r
= virtio_pci_mask_vq(dev
, vector
, virtio_get_queue(vdev
, n
), masked
);
593 if (virtio_queue_vector(vdev
, n
) != vector
) {
596 virtio_pci_mask_vq(dev
, vector
, virtio_get_queue(vdev
, n
), !masked
);
602 static int virtio_pci_set_guest_notifier(void *opaque
, int n
, bool assign
)
604 VirtIOPCIProxy
*proxy
= opaque
;
605 VirtQueue
*vq
= virtio_get_queue(proxy
->vdev
, n
);
606 EventNotifier
*notifier
= virtio_queue_get_guest_notifier(vq
);
609 int r
= event_notifier_init(notifier
, 0);
613 qemu_set_fd_handler(event_notifier_get_fd(notifier
),
614 virtio_pci_guest_notifier_read
, NULL
, vq
);
616 qemu_set_fd_handler(event_notifier_get_fd(notifier
),
618 /* Test and clear notifier before closing it,
619 * in case poll callback didn't have time to run. */
620 virtio_pci_guest_notifier_read(vq
);
621 event_notifier_cleanup(notifier
);
627 static bool virtio_pci_query_guest_notifiers(void *opaque
)
629 VirtIOPCIProxy
*proxy
= opaque
;
630 return msix_enabled(&proxy
->pci_dev
);
633 static int virtio_pci_set_guest_notifiers(void *opaque
, bool assign
)
635 VirtIOPCIProxy
*proxy
= opaque
;
636 VirtIODevice
*vdev
= proxy
->vdev
;
639 /* Must unset mask notifier while guest notifier
640 * is still assigned */
642 r
= msix_unset_mask_notifier(&proxy
->pci_dev
);
646 for (n
= 0; n
< VIRTIO_PCI_QUEUE_MAX
; n
++) {
647 if (!virtio_queue_get_num(vdev
, n
)) {
651 r
= virtio_pci_set_guest_notifier(opaque
, n
, assign
);
657 /* Must set mask notifier after guest notifier
658 * has been assigned */
660 r
= msix_set_mask_notifier(&proxy
->pci_dev
,
661 virtio_pci_mask_notifier
);
670 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
672 virtio_pci_set_guest_notifier(opaque
, n
, !assign
);
676 msix_set_mask_notifier(&proxy
->pci_dev
,
677 virtio_pci_mask_notifier
);
682 static int virtio_pci_set_host_notifier(void *opaque
, int n
, bool assign
)
684 VirtIOPCIProxy
*proxy
= opaque
;
686 /* Stop using ioeventfd for virtqueue kick if the device starts using host
687 * notifiers. This makes it easy to avoid stepping on each others' toes.
689 proxy
->ioeventfd_disabled
= assign
;
691 virtio_pci_stop_ioeventfd(proxy
);
693 /* We don't need to start here: it's not needed because backend
694 * currently only stops on status change away from ok,
695 * reset, vmstop and such. If we do add code to start here,
696 * need to check vmstate, device state etc. */
697 return virtio_pci_set_host_notifier_internal(proxy
, n
, assign
);
700 static void virtio_pci_vmstate_change(void *opaque
, bool running
)
702 VirtIOPCIProxy
*proxy
= opaque
;
705 /* Try to find out if the guest has bus master disabled, but is
706 in ready state. Then we have a buggy guest OS. */
707 if ((proxy
->vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
) &&
708 !(proxy
->pci_dev
.config
[PCI_COMMAND
] & PCI_COMMAND_MASTER
)) {
709 proxy
->flags
|= VIRTIO_PCI_FLAG_BUS_MASTER_BUG
;
711 virtio_pci_start_ioeventfd(proxy
);
713 virtio_pci_stop_ioeventfd(proxy
);
717 static const VirtIOBindings virtio_pci_bindings
= {
718 .notify
= virtio_pci_notify
,
719 .save_config
= virtio_pci_save_config
,
720 .load_config
= virtio_pci_load_config
,
721 .save_queue
= virtio_pci_save_queue
,
722 .load_queue
= virtio_pci_load_queue
,
723 .get_features
= virtio_pci_get_features
,
724 .query_guest_notifiers
= virtio_pci_query_guest_notifiers
,
725 .set_host_notifier
= virtio_pci_set_host_notifier
,
726 .set_guest_notifiers
= virtio_pci_set_guest_notifiers
,
727 .vmstate_change
= virtio_pci_vmstate_change
,
730 void virtio_init_pci(VirtIOPCIProxy
*proxy
, VirtIODevice
*vdev
)
737 config
= proxy
->pci_dev
.config
;
739 if (proxy
->class_code
) {
740 pci_config_set_class(config
, proxy
->class_code
);
742 pci_set_word(config
+ 0x2c, pci_get_word(config
+ PCI_VENDOR_ID
));
743 pci_set_word(config
+ 0x2e, vdev
->device_id
);
746 if (vdev
->nvectors
&& !msix_init(&proxy
->pci_dev
, vdev
->nvectors
, 1, 0)) {
747 pci_register_bar(&proxy
->pci_dev
, 1,
748 msix_bar_size(&proxy
->pci_dev
),
749 PCI_BASE_ADDRESS_SPACE_MEMORY
,
754 proxy
->pci_dev
.config_write
= virtio_write_config
;
756 size
= VIRTIO_PCI_REGION_SIZE(&proxy
->pci_dev
) + vdev
->config_len
;
758 size
= 1 << qemu_fls(size
);
760 pci_register_bar(&proxy
->pci_dev
, 0, size
, PCI_BASE_ADDRESS_SPACE_IO
,
763 if (!kvm_has_many_ioeventfds()) {
764 proxy
->flags
&= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD
;
767 virtio_bind_device(vdev
, &virtio_pci_bindings
, proxy
);
768 proxy
->host_features
|= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY
;
769 proxy
->host_features
|= 0x1 << VIRTIO_F_BAD_FEATURE
;
770 proxy
->host_features
= vdev
->get_features(vdev
, proxy
->host_features
);
773 static int virtio_blk_init_pci(PCIDevice
*pci_dev
)
775 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
778 if (proxy
->class_code
!= PCI_CLASS_STORAGE_SCSI
&&
779 proxy
->class_code
!= PCI_CLASS_STORAGE_OTHER
)
780 proxy
->class_code
= PCI_CLASS_STORAGE_SCSI
;
782 vdev
= virtio_blk_init(&pci_dev
->qdev
, &proxy
->block
);
786 vdev
->nvectors
= proxy
->nvectors
;
787 virtio_init_pci(proxy
, vdev
);
788 /* make the actual value visible */
789 proxy
->nvectors
= vdev
->nvectors
;
793 static int virtio_exit_pci(PCIDevice
*pci_dev
)
795 return msix_uninit(pci_dev
);
798 static int virtio_blk_exit_pci(PCIDevice
*pci_dev
)
800 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
802 virtio_pci_stop_ioeventfd(proxy
);
803 virtio_blk_exit(proxy
->vdev
);
804 blockdev_mark_auto_del(proxy
->block
.bs
);
805 return virtio_exit_pci(pci_dev
);
808 static int virtio_serial_init_pci(PCIDevice
*pci_dev
)
810 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
813 if (proxy
->class_code
!= PCI_CLASS_COMMUNICATION_OTHER
&&
814 proxy
->class_code
!= PCI_CLASS_DISPLAY_OTHER
&& /* qemu 0.10 */
815 proxy
->class_code
!= PCI_CLASS_OTHERS
) /* qemu-kvm */
816 proxy
->class_code
= PCI_CLASS_COMMUNICATION_OTHER
;
818 vdev
= virtio_serial_init(&pci_dev
->qdev
, &proxy
->serial
);
822 vdev
->nvectors
= proxy
->nvectors
== DEV_NVECTORS_UNSPECIFIED
823 ? proxy
->serial
.max_virtserial_ports
+ 1
825 virtio_init_pci(proxy
, vdev
);
826 proxy
->nvectors
= vdev
->nvectors
;
830 static int virtio_serial_exit_pci(PCIDevice
*pci_dev
)
832 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
834 virtio_pci_stop_ioeventfd(proxy
);
835 virtio_serial_exit(proxy
->vdev
);
836 return virtio_exit_pci(pci_dev
);
839 static int virtio_net_init_pci(PCIDevice
*pci_dev
)
841 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
844 vdev
= virtio_net_init(&pci_dev
->qdev
, &proxy
->nic
, &proxy
->net
);
846 vdev
->nvectors
= proxy
->nvectors
;
847 virtio_init_pci(proxy
, vdev
);
849 /* make the actual value visible */
850 proxy
->nvectors
= vdev
->nvectors
;
854 static int virtio_net_exit_pci(PCIDevice
*pci_dev
)
856 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
858 virtio_pci_stop_ioeventfd(proxy
);
859 virtio_net_exit(proxy
->vdev
);
860 return virtio_exit_pci(pci_dev
);
863 static int virtio_balloon_init_pci(PCIDevice
*pci_dev
)
865 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
868 vdev
= virtio_balloon_init(&pci_dev
->qdev
);
869 virtio_init_pci(proxy
, vdev
);
874 static int virtio_9p_init_pci(PCIDevice
*pci_dev
)
876 VirtIOPCIProxy
*proxy
= DO_UPCAST(VirtIOPCIProxy
, pci_dev
, pci_dev
);
879 vdev
= virtio_9p_init(&pci_dev
->qdev
, &proxy
->fsconf
);
880 vdev
->nvectors
= proxy
->nvectors
;
881 virtio_init_pci(proxy
, vdev
);
882 /* make the actual value visible */
883 proxy
->nvectors
= vdev
->nvectors
;
888 static PCIDeviceInfo virtio_info
[] = {
890 .qdev
.name
= "virtio-blk-pci",
891 .qdev
.alias
= "virtio-blk",
892 .qdev
.size
= sizeof(VirtIOPCIProxy
),
893 .init
= virtio_blk_init_pci
,
894 .exit
= virtio_blk_exit_pci
,
895 .vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
,
896 .device_id
= PCI_DEVICE_ID_VIRTIO_BLOCK
,
897 .revision
= VIRTIO_PCI_ABI_VERSION
,
898 .class_id
= PCI_CLASS_STORAGE_SCSI
,
899 .qdev
.props
= (Property
[]) {
900 DEFINE_PROP_HEX32("class", VirtIOPCIProxy
, class_code
, 0),
901 DEFINE_BLOCK_PROPERTIES(VirtIOPCIProxy
, block
),
902 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy
, flags
,
903 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT
, true),
904 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy
, nvectors
, 2),
905 DEFINE_VIRTIO_BLK_FEATURES(VirtIOPCIProxy
, host_features
),
906 DEFINE_PROP_END_OF_LIST(),
908 .qdev
.reset
= virtio_pci_reset
,
910 .qdev
.name
= "virtio-net-pci",
911 .qdev
.alias
= "virtio-net",
912 .qdev
.size
= sizeof(VirtIOPCIProxy
),
913 .init
= virtio_net_init_pci
,
914 .exit
= virtio_net_exit_pci
,
915 .romfile
= "pxe-virtio.rom",
916 .vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
,
917 .device_id
= PCI_DEVICE_ID_VIRTIO_NET
,
918 .revision
= VIRTIO_PCI_ABI_VERSION
,
919 .class_id
= PCI_CLASS_NETWORK_ETHERNET
,
920 .qdev
.props
= (Property
[]) {
921 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy
, flags
,
922 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT
, false),
923 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy
, nvectors
, 3),
924 DEFINE_VIRTIO_NET_FEATURES(VirtIOPCIProxy
, host_features
),
925 DEFINE_NIC_PROPERTIES(VirtIOPCIProxy
, nic
),
926 DEFINE_PROP_UINT32("x-txtimer", VirtIOPCIProxy
,
927 net
.txtimer
, TX_TIMER_INTERVAL
),
928 DEFINE_PROP_INT32("x-txburst", VirtIOPCIProxy
,
929 net
.txburst
, TX_BURST
),
930 DEFINE_PROP_STRING("tx", VirtIOPCIProxy
, net
.tx
),
931 DEFINE_PROP_END_OF_LIST(),
933 .qdev
.reset
= virtio_pci_reset
,
935 .qdev
.name
= "virtio-serial-pci",
936 .qdev
.alias
= "virtio-serial",
937 .qdev
.size
= sizeof(VirtIOPCIProxy
),
938 .init
= virtio_serial_init_pci
,
939 .exit
= virtio_serial_exit_pci
,
940 .vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
,
941 .device_id
= PCI_DEVICE_ID_VIRTIO_CONSOLE
,
942 .revision
= VIRTIO_PCI_ABI_VERSION
,
943 .class_id
= PCI_CLASS_COMMUNICATION_OTHER
,
944 .qdev
.props
= (Property
[]) {
945 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy
, flags
,
946 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT
, true),
947 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy
, nvectors
,
948 DEV_NVECTORS_UNSPECIFIED
),
949 DEFINE_PROP_HEX32("class", VirtIOPCIProxy
, class_code
, 0),
950 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy
, host_features
),
951 DEFINE_PROP_UINT32("max_ports", VirtIOPCIProxy
,
952 serial
.max_virtserial_ports
, 31),
953 DEFINE_PROP_END_OF_LIST(),
955 .qdev
.reset
= virtio_pci_reset
,
957 .qdev
.name
= "virtio-balloon-pci",
958 .qdev
.alias
= "virtio-balloon",
959 .qdev
.size
= sizeof(VirtIOPCIProxy
),
960 .init
= virtio_balloon_init_pci
,
961 .exit
= virtio_exit_pci
,
962 .vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
,
963 .device_id
= PCI_DEVICE_ID_VIRTIO_BALLOON
,
964 .revision
= VIRTIO_PCI_ABI_VERSION
,
965 .class_id
= PCI_CLASS_MEMORY_RAM
,
966 .qdev
.props
= (Property
[]) {
967 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy
, host_features
),
968 DEFINE_PROP_END_OF_LIST(),
970 .qdev
.reset
= virtio_pci_reset
,
973 .qdev
.name
= "virtio-9p-pci",
974 .qdev
.alias
= "virtio-9p",
975 .qdev
.size
= sizeof(VirtIOPCIProxy
),
976 .init
= virtio_9p_init_pci
,
977 .vendor_id
= PCI_VENDOR_ID_REDHAT_QUMRANET
,
979 .revision
= VIRTIO_PCI_ABI_VERSION
,
981 .qdev
.props
= (Property
[]) {
982 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy
, nvectors
, 2),
983 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy
, host_features
),
984 DEFINE_PROP_STRING("mount_tag", VirtIOPCIProxy
, fsconf
.tag
),
985 DEFINE_PROP_STRING("fsdev", VirtIOPCIProxy
, fsconf
.fsdev_id
),
986 DEFINE_PROP_END_OF_LIST(),
994 static void virtio_pci_register_devices(void)
996 pci_qdev_register_many(virtio_info
);
999 device_init(virtio_pci_register_devices
)