vfio: Create device specific region info helper
[qemu/ar7.git] / hw / virtio / virtio-pci.c
blobbfedbbf17fad6c5e619e12b9225544041f53611c
1 /*
2 * Virtio PCI Bindings
4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2009 CodeSourcery
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paul Brook <paul@codesourcery.com>
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
18 #include "qemu/osdep.h"
20 #include "standard-headers/linux/virtio_pci.h"
21 #include "hw/virtio/virtio.h"
22 #include "hw/virtio/virtio-blk.h"
23 #include "hw/virtio/virtio-net.h"
24 #include "hw/virtio/virtio-serial.h"
25 #include "hw/virtio/virtio-scsi.h"
26 #include "hw/virtio/virtio-balloon.h"
27 #include "hw/virtio/virtio-input.h"
28 #include "hw/pci/pci.h"
29 #include "qapi/error.h"
30 #include "qemu/error-report.h"
31 #include "hw/pci/msi.h"
32 #include "hw/pci/msix.h"
33 #include "hw/loader.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/block-backend.h"
36 #include "virtio-pci.h"
37 #include "qemu/range.h"
38 #include "hw/virtio/virtio-bus.h"
39 #include "qapi/visitor.h"
41 #define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev))
43 #undef VIRTIO_PCI_CONFIG
45 /* The remaining space is defined by each driver as the per-driver
46 * configuration space */
47 #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
49 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
50 VirtIOPCIProxy *dev);
51 static void virtio_pci_reset(DeviceState *qdev);
53 /* virtio device */
54 /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
55 static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d)
57 return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
60 /* DeviceState to VirtIOPCIProxy. Note: used on datapath,
61 * be careful and test performance if you change this.
63 static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d)
65 return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
68 static void virtio_pci_notify(DeviceState *d, uint16_t vector)
70 VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d);
72 if (msix_enabled(&proxy->pci_dev))
73 msix_notify(&proxy->pci_dev, vector);
74 else {
75 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
76 pci_set_irq(&proxy->pci_dev, vdev->isr & 1);
80 static void virtio_pci_save_config(DeviceState *d, QEMUFile *f)
82 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
83 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
85 pci_device_save(&proxy->pci_dev, f);
86 msix_save(&proxy->pci_dev, f);
87 if (msix_present(&proxy->pci_dev))
88 qemu_put_be16(f, vdev->config_vector);
91 static void virtio_pci_load_modern_queue_state(VirtIOPCIQueue *vq,
92 QEMUFile *f)
94 vq->num = qemu_get_be16(f);
95 vq->enabled = qemu_get_be16(f);
96 vq->desc[0] = qemu_get_be32(f);
97 vq->desc[1] = qemu_get_be32(f);
98 vq->avail[0] = qemu_get_be32(f);
99 vq->avail[1] = qemu_get_be32(f);
100 vq->used[0] = qemu_get_be32(f);
101 vq->used[1] = qemu_get_be32(f);
104 static bool virtio_pci_has_extra_state(DeviceState *d)
106 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
108 return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA;
111 static int get_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size)
113 VirtIOPCIProxy *proxy = pv;
114 int i;
116 proxy->dfselect = qemu_get_be32(f);
117 proxy->gfselect = qemu_get_be32(f);
118 proxy->guest_features[0] = qemu_get_be32(f);
119 proxy->guest_features[1] = qemu_get_be32(f);
120 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
121 virtio_pci_load_modern_queue_state(&proxy->vqs[i], f);
124 return 0;
127 static void virtio_pci_save_modern_queue_state(VirtIOPCIQueue *vq,
128 QEMUFile *f)
130 qemu_put_be16(f, vq->num);
131 qemu_put_be16(f, vq->enabled);
132 qemu_put_be32(f, vq->desc[0]);
133 qemu_put_be32(f, vq->desc[1]);
134 qemu_put_be32(f, vq->avail[0]);
135 qemu_put_be32(f, vq->avail[1]);
136 qemu_put_be32(f, vq->used[0]);
137 qemu_put_be32(f, vq->used[1]);
140 static void put_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size)
142 VirtIOPCIProxy *proxy = pv;
143 int i;
145 qemu_put_be32(f, proxy->dfselect);
146 qemu_put_be32(f, proxy->gfselect);
147 qemu_put_be32(f, proxy->guest_features[0]);
148 qemu_put_be32(f, proxy->guest_features[1]);
149 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
150 virtio_pci_save_modern_queue_state(&proxy->vqs[i], f);
154 static const VMStateInfo vmstate_info_virtio_pci_modern_state = {
155 .name = "virtqueue_state",
156 .get = get_virtio_pci_modern_state,
157 .put = put_virtio_pci_modern_state,
160 static bool virtio_pci_modern_state_needed(void *opaque)
162 VirtIOPCIProxy *proxy = opaque;
164 return !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
167 static const VMStateDescription vmstate_virtio_pci_modern_state = {
168 .name = "virtio_pci/modern_state",
169 .version_id = 1,
170 .minimum_version_id = 1,
171 .needed = &virtio_pci_modern_state_needed,
172 .fields = (VMStateField[]) {
174 .name = "modern_state",
175 .version_id = 0,
176 .field_exists = NULL,
177 .size = 0,
178 .info = &vmstate_info_virtio_pci_modern_state,
179 .flags = VMS_SINGLE,
180 .offset = 0,
182 VMSTATE_END_OF_LIST()
186 static const VMStateDescription vmstate_virtio_pci = {
187 .name = "virtio_pci",
188 .version_id = 1,
189 .minimum_version_id = 1,
190 .minimum_version_id_old = 1,
191 .fields = (VMStateField[]) {
192 VMSTATE_END_OF_LIST()
194 .subsections = (const VMStateDescription*[]) {
195 &vmstate_virtio_pci_modern_state,
196 NULL
200 static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f)
202 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
204 vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL);
207 static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f)
209 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
211 return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1);
214 static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f)
216 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
217 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
219 if (msix_present(&proxy->pci_dev))
220 qemu_put_be16(f, virtio_queue_vector(vdev, n));
223 static int virtio_pci_load_config(DeviceState *d, QEMUFile *f)
225 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
226 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
228 int ret;
229 ret = pci_device_load(&proxy->pci_dev, f);
230 if (ret) {
231 return ret;
233 msix_unuse_all_vectors(&proxy->pci_dev);
234 msix_load(&proxy->pci_dev, f);
235 if (msix_present(&proxy->pci_dev)) {
236 qemu_get_be16s(f, &vdev->config_vector);
237 } else {
238 vdev->config_vector = VIRTIO_NO_VECTOR;
240 if (vdev->config_vector != VIRTIO_NO_VECTOR) {
241 return msix_vector_use(&proxy->pci_dev, vdev->config_vector);
243 return 0;
246 static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
248 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
249 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
251 uint16_t vector;
252 if (msix_present(&proxy->pci_dev)) {
253 qemu_get_be16s(f, &vector);
254 } else {
255 vector = VIRTIO_NO_VECTOR;
257 virtio_queue_set_vector(vdev, n, vector);
258 if (vector != VIRTIO_NO_VECTOR) {
259 return msix_vector_use(&proxy->pci_dev, vector);
262 return 0;
265 #define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000
267 static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
268 int n, bool assign, bool set_handler)
270 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
271 VirtQueue *vq = virtio_get_queue(vdev, n);
272 EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
273 bool legacy = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_LEGACY);
274 bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
275 bool fast_mmio = kvm_ioeventfd_any_length_enabled();
276 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
277 MemoryRegion *modern_mr = &proxy->notify.mr;
278 MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr;
279 MemoryRegion *legacy_mr = &proxy->bar;
280 hwaddr modern_addr = QEMU_VIRTIO_PCI_QUEUE_MEM_MULT *
281 virtio_get_queue_index(vq);
282 hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY;
283 int r = 0;
285 if (assign) {
286 r = event_notifier_init(notifier, 1);
287 if (r < 0) {
288 error_report("%s: unable to init event notifier: %d",
289 __func__, r);
290 return r;
292 virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
293 if (modern) {
294 if (fast_mmio) {
295 memory_region_add_eventfd(modern_mr, modern_addr, 0,
296 false, n, notifier);
297 } else {
298 memory_region_add_eventfd(modern_mr, modern_addr, 2,
299 false, n, notifier);
301 if (modern_pio) {
302 memory_region_add_eventfd(modern_notify_mr, 0, 2,
303 true, n, notifier);
306 if (legacy) {
307 memory_region_add_eventfd(legacy_mr, legacy_addr, 2,
308 true, n, notifier);
310 } else {
311 if (modern) {
312 if (fast_mmio) {
313 memory_region_del_eventfd(modern_mr, modern_addr, 0,
314 false, n, notifier);
315 } else {
316 memory_region_del_eventfd(modern_mr, modern_addr, 2,
317 false, n, notifier);
319 if (modern_pio) {
320 memory_region_del_eventfd(modern_notify_mr, 0, 2,
321 true, n, notifier);
324 if (legacy) {
325 memory_region_del_eventfd(legacy_mr, legacy_addr, 2,
326 true, n, notifier);
328 virtio_queue_set_host_notifier_fd_handler(vq, false, false);
329 event_notifier_cleanup(notifier);
331 return r;
334 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
336 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
337 int n, r;
339 if (!(proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) ||
340 proxy->ioeventfd_disabled ||
341 proxy->ioeventfd_started) {
342 return;
345 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
346 if (!virtio_queue_get_num(vdev, n)) {
347 continue;
350 r = virtio_pci_set_host_notifier_internal(proxy, n, true, true);
351 if (r < 0) {
352 goto assign_error;
355 proxy->ioeventfd_started = true;
356 return;
358 assign_error:
359 while (--n >= 0) {
360 if (!virtio_queue_get_num(vdev, n)) {
361 continue;
364 r = virtio_pci_set_host_notifier_internal(proxy, n, false, false);
365 assert(r >= 0);
367 proxy->ioeventfd_started = false;
368 error_report("%s: failed. Fallback to a userspace (slower).", __func__);
371 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
373 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
374 int r;
375 int n;
377 if (!proxy->ioeventfd_started) {
378 return;
381 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
382 if (!virtio_queue_get_num(vdev, n)) {
383 continue;
386 r = virtio_pci_set_host_notifier_internal(proxy, n, false, false);
387 assert(r >= 0);
389 proxy->ioeventfd_started = false;
392 static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
394 VirtIOPCIProxy *proxy = opaque;
395 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
396 hwaddr pa;
398 switch (addr) {
399 case VIRTIO_PCI_GUEST_FEATURES:
400 /* Guest does not negotiate properly? We have to assume nothing. */
401 if (val & (1 << VIRTIO_F_BAD_FEATURE)) {
402 val = virtio_bus_get_vdev_bad_features(&proxy->bus);
404 virtio_set_features(vdev, val);
405 break;
406 case VIRTIO_PCI_QUEUE_PFN:
407 pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
408 if (pa == 0) {
409 virtio_pci_reset(DEVICE(proxy));
411 else
412 virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
413 break;
414 case VIRTIO_PCI_QUEUE_SEL:
415 if (val < VIRTIO_QUEUE_MAX)
416 vdev->queue_sel = val;
417 break;
418 case VIRTIO_PCI_QUEUE_NOTIFY:
419 if (val < VIRTIO_QUEUE_MAX) {
420 virtio_queue_notify(vdev, val);
422 break;
423 case VIRTIO_PCI_STATUS:
424 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
425 virtio_pci_stop_ioeventfd(proxy);
428 virtio_set_status(vdev, val & 0xFF);
430 if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
431 virtio_pci_start_ioeventfd(proxy);
434 if (vdev->status == 0) {
435 virtio_pci_reset(DEVICE(proxy));
438 /* Linux before 2.6.34 drives the device without enabling
439 the PCI device bus master bit. Enable it automatically
440 for the guest. This is a PCI spec violation but so is
441 initiating DMA with bus master bit clear. */
442 if (val == (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER)) {
443 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND,
444 proxy->pci_dev.config[PCI_COMMAND] |
445 PCI_COMMAND_MASTER, 1);
447 break;
448 case VIRTIO_MSI_CONFIG_VECTOR:
449 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
450 /* Make it possible for guest to discover an error took place. */
451 if (msix_vector_use(&proxy->pci_dev, val) < 0)
452 val = VIRTIO_NO_VECTOR;
453 vdev->config_vector = val;
454 break;
455 case VIRTIO_MSI_QUEUE_VECTOR:
456 msix_vector_unuse(&proxy->pci_dev,
457 virtio_queue_vector(vdev, vdev->queue_sel));
458 /* Make it possible for guest to discover an error took place. */
459 if (msix_vector_use(&proxy->pci_dev, val) < 0)
460 val = VIRTIO_NO_VECTOR;
461 virtio_queue_set_vector(vdev, vdev->queue_sel, val);
462 break;
463 default:
464 error_report("%s: unexpected address 0x%x value 0x%x",
465 __func__, addr, val);
466 break;
470 static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
472 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
473 uint32_t ret = 0xFFFFFFFF;
475 switch (addr) {
476 case VIRTIO_PCI_HOST_FEATURES:
477 ret = vdev->host_features;
478 break;
479 case VIRTIO_PCI_GUEST_FEATURES:
480 ret = vdev->guest_features;
481 break;
482 case VIRTIO_PCI_QUEUE_PFN:
483 ret = virtio_queue_get_addr(vdev, vdev->queue_sel)
484 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
485 break;
486 case VIRTIO_PCI_QUEUE_NUM:
487 ret = virtio_queue_get_num(vdev, vdev->queue_sel);
488 break;
489 case VIRTIO_PCI_QUEUE_SEL:
490 ret = vdev->queue_sel;
491 break;
492 case VIRTIO_PCI_STATUS:
493 ret = vdev->status;
494 break;
495 case VIRTIO_PCI_ISR:
496 /* reading from the ISR also clears it. */
497 ret = vdev->isr;
498 vdev->isr = 0;
499 pci_irq_deassert(&proxy->pci_dev);
500 break;
501 case VIRTIO_MSI_CONFIG_VECTOR:
502 ret = vdev->config_vector;
503 break;
504 case VIRTIO_MSI_QUEUE_VECTOR:
505 ret = virtio_queue_vector(vdev, vdev->queue_sel);
506 break;
507 default:
508 break;
511 return ret;
514 static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr,
515 unsigned size)
517 VirtIOPCIProxy *proxy = opaque;
518 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
519 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev);
520 uint64_t val = 0;
521 if (addr < config) {
522 return virtio_ioport_read(proxy, addr);
524 addr -= config;
526 switch (size) {
527 case 1:
528 val = virtio_config_readb(vdev, addr);
529 break;
530 case 2:
531 val = virtio_config_readw(vdev, addr);
532 if (virtio_is_big_endian(vdev)) {
533 val = bswap16(val);
535 break;
536 case 4:
537 val = virtio_config_readl(vdev, addr);
538 if (virtio_is_big_endian(vdev)) {
539 val = bswap32(val);
541 break;
543 return val;
546 static void virtio_pci_config_write(void *opaque, hwaddr addr,
547 uint64_t val, unsigned size)
549 VirtIOPCIProxy *proxy = opaque;
550 uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev);
551 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
552 if (addr < config) {
553 virtio_ioport_write(proxy, addr, val);
554 return;
556 addr -= config;
558 * Virtio-PCI is odd. Ioports are LE but config space is target native
559 * endian.
561 switch (size) {
562 case 1:
563 virtio_config_writeb(vdev, addr, val);
564 break;
565 case 2:
566 if (virtio_is_big_endian(vdev)) {
567 val = bswap16(val);
569 virtio_config_writew(vdev, addr, val);
570 break;
571 case 4:
572 if (virtio_is_big_endian(vdev)) {
573 val = bswap32(val);
575 virtio_config_writel(vdev, addr, val);
576 break;
580 static const MemoryRegionOps virtio_pci_config_ops = {
581 .read = virtio_pci_config_read,
582 .write = virtio_pci_config_write,
583 .impl = {
584 .min_access_size = 1,
585 .max_access_size = 4,
587 .endianness = DEVICE_LITTLE_ENDIAN,
590 /* Below are generic functions to do memcpy from/to an address space,
591 * without byteswaps, with input validation.
593 * As regular address_space_* APIs all do some kind of byteswap at least for
594 * some host/target combinations, we are forced to explicitly convert to a
595 * known-endianness integer value.
596 * It doesn't really matter which endian format to go through, so the code
597 * below selects the endian that causes the least amount of work on the given
598 * host.
600 * Note: host pointer must be aligned.
602 static
603 void virtio_address_space_write(AddressSpace *as, hwaddr addr,
604 const uint8_t *buf, int len)
606 uint32_t val;
608 /* address_space_* APIs assume an aligned address.
609 * As address is under guest control, handle illegal values.
611 addr &= ~(len - 1);
613 /* Make sure caller aligned buf properly */
614 assert(!(((uintptr_t)buf) & (len - 1)));
616 switch (len) {
617 case 1:
618 val = pci_get_byte(buf);
619 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
620 break;
621 case 2:
622 val = pci_get_word(buf);
623 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
624 break;
625 case 4:
626 val = pci_get_long(buf);
627 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
628 break;
629 default:
630 /* As length is under guest control, handle illegal values. */
631 break;
635 static void
636 virtio_address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
638 uint32_t val;
640 /* address_space_* APIs assume an aligned address.
641 * As address is under guest control, handle illegal values.
643 addr &= ~(len - 1);
645 /* Make sure caller aligned buf properly */
646 assert(!(((uintptr_t)buf) & (len - 1)));
648 switch (len) {
649 case 1:
650 val = address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
651 pci_set_byte(buf, val);
652 break;
653 case 2:
654 val = address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
655 pci_set_word(buf, val);
656 break;
657 case 4:
658 val = address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
659 pci_set_long(buf, val);
660 break;
661 default:
662 /* As length is under guest control, handle illegal values. */
663 break;
667 static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
668 uint32_t val, int len)
670 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
671 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
672 struct virtio_pci_cfg_cap *cfg;
674 pci_default_write_config(pci_dev, address, val, len);
676 if (range_covers_byte(address, len, PCI_COMMAND) &&
677 !(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
678 virtio_pci_stop_ioeventfd(proxy);
679 virtio_set_status(vdev, vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK);
682 if (proxy->config_cap &&
683 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
684 pci_cfg_data),
685 sizeof cfg->pci_cfg_data)) {
686 uint32_t off;
687 uint32_t len;
689 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
690 off = le32_to_cpu(cfg->cap.offset);
691 len = le32_to_cpu(cfg->cap.length);
693 if (len == 1 || len == 2 || len == 4) {
694 assert(len <= sizeof cfg->pci_cfg_data);
695 virtio_address_space_write(&proxy->modern_as, off,
696 cfg->pci_cfg_data, len);
701 static uint32_t virtio_read_config(PCIDevice *pci_dev,
702 uint32_t address, int len)
704 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
705 struct virtio_pci_cfg_cap *cfg;
707 if (proxy->config_cap &&
708 ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap,
709 pci_cfg_data),
710 sizeof cfg->pci_cfg_data)) {
711 uint32_t off;
712 uint32_t len;
714 cfg = (void *)(proxy->pci_dev.config + proxy->config_cap);
715 off = le32_to_cpu(cfg->cap.offset);
716 len = le32_to_cpu(cfg->cap.length);
718 if (len == 1 || len == 2 || len == 4) {
719 assert(len <= sizeof cfg->pci_cfg_data);
720 virtio_address_space_read(&proxy->modern_as, off,
721 cfg->pci_cfg_data, len);
725 return pci_default_read_config(pci_dev, address, len);
728 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
729 unsigned int queue_no,
730 unsigned int vector,
731 MSIMessage msg)
733 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
734 int ret;
736 if (irqfd->users == 0) {
737 ret = kvm_irqchip_add_msi_route(kvm_state, msg, &proxy->pci_dev);
738 if (ret < 0) {
739 return ret;
741 irqfd->virq = ret;
743 irqfd->users++;
744 return 0;
747 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
748 unsigned int vector)
750 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
751 if (--irqfd->users == 0) {
752 kvm_irqchip_release_virq(kvm_state, irqfd->virq);
756 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
757 unsigned int queue_no,
758 unsigned int vector)
760 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
761 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
762 VirtQueue *vq = virtio_get_queue(vdev, queue_no);
763 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
764 int ret;
765 ret = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq);
766 return ret;
769 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
770 unsigned int queue_no,
771 unsigned int vector)
773 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
774 VirtQueue *vq = virtio_get_queue(vdev, queue_no);
775 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
776 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
777 int ret;
779 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq);
780 assert(ret == 0);
783 static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
785 PCIDevice *dev = &proxy->pci_dev;
786 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
787 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
788 unsigned int vector;
789 int ret, queue_no;
790 MSIMessage msg;
792 for (queue_no = 0; queue_no < nvqs; queue_no++) {
793 if (!virtio_queue_get_num(vdev, queue_no)) {
794 break;
796 vector = virtio_queue_vector(vdev, queue_no);
797 if (vector >= msix_nr_vectors_allocated(dev)) {
798 continue;
800 msg = msix_get_message(dev, vector);
801 ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector, msg);
802 if (ret < 0) {
803 goto undo;
805 /* If guest supports masking, set up irqfd now.
806 * Otherwise, delay until unmasked in the frontend.
808 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
809 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
810 if (ret < 0) {
811 kvm_virtio_pci_vq_vector_release(proxy, vector);
812 goto undo;
816 return 0;
818 undo:
819 while (--queue_no >= 0) {
820 vector = virtio_queue_vector(vdev, queue_no);
821 if (vector >= msix_nr_vectors_allocated(dev)) {
822 continue;
824 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
825 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
827 kvm_virtio_pci_vq_vector_release(proxy, vector);
829 return ret;
832 static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
834 PCIDevice *dev = &proxy->pci_dev;
835 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
836 unsigned int vector;
837 int queue_no;
838 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
840 for (queue_no = 0; queue_no < nvqs; queue_no++) {
841 if (!virtio_queue_get_num(vdev, queue_no)) {
842 break;
844 vector = virtio_queue_vector(vdev, queue_no);
845 if (vector >= msix_nr_vectors_allocated(dev)) {
846 continue;
848 /* If guest supports masking, clean up irqfd now.
849 * Otherwise, it was cleaned when masked in the frontend.
851 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
852 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
854 kvm_virtio_pci_vq_vector_release(proxy, vector);
858 static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
859 unsigned int queue_no,
860 unsigned int vector,
861 MSIMessage msg)
863 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
864 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
865 VirtQueue *vq = virtio_get_queue(vdev, queue_no);
866 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
867 VirtIOIRQFD *irqfd;
868 int ret = 0;
870 if (proxy->vector_irqfd) {
871 irqfd = &proxy->vector_irqfd[vector];
872 if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) {
873 ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg,
874 &proxy->pci_dev);
875 if (ret < 0) {
876 return ret;
881 /* If guest supports masking, irqfd is already setup, unmask it.
882 * Otherwise, set it up now.
884 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
885 k->guest_notifier_mask(vdev, queue_no, false);
886 /* Test after unmasking to avoid losing events. */
887 if (k->guest_notifier_pending &&
888 k->guest_notifier_pending(vdev, queue_no)) {
889 event_notifier_set(n);
891 } else {
892 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
894 return ret;
897 static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
898 unsigned int queue_no,
899 unsigned int vector)
901 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
902 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
904 /* If guest supports masking, keep irqfd but mask it.
905 * Otherwise, clean it up now.
907 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
908 k->guest_notifier_mask(vdev, queue_no, true);
909 } else {
910 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
914 static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
915 MSIMessage msg)
917 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
918 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
919 VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
920 int ret, index, unmasked = 0;
922 while (vq) {
923 index = virtio_get_queue_index(vq);
924 if (!virtio_queue_get_num(vdev, index)) {
925 break;
927 if (index < proxy->nvqs_with_notifiers) {
928 ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg);
929 if (ret < 0) {
930 goto undo;
932 ++unmasked;
934 vq = virtio_vector_next_queue(vq);
937 return 0;
939 undo:
940 vq = virtio_vector_first_queue(vdev, vector);
941 while (vq && unmasked >= 0) {
942 index = virtio_get_queue_index(vq);
943 if (index < proxy->nvqs_with_notifiers) {
944 virtio_pci_vq_vector_mask(proxy, index, vector);
945 --unmasked;
947 vq = virtio_vector_next_queue(vq);
949 return ret;
952 static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
954 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
955 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
956 VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
957 int index;
959 while (vq) {
960 index = virtio_get_queue_index(vq);
961 if (!virtio_queue_get_num(vdev, index)) {
962 break;
964 if (index < proxy->nvqs_with_notifiers) {
965 virtio_pci_vq_vector_mask(proxy, index, vector);
967 vq = virtio_vector_next_queue(vq);
971 static void virtio_pci_vector_poll(PCIDevice *dev,
972 unsigned int vector_start,
973 unsigned int vector_end)
975 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
976 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
977 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
978 int queue_no;
979 unsigned int vector;
980 EventNotifier *notifier;
981 VirtQueue *vq;
983 for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
984 if (!virtio_queue_get_num(vdev, queue_no)) {
985 break;
987 vector = virtio_queue_vector(vdev, queue_no);
988 if (vector < vector_start || vector >= vector_end ||
989 !msix_is_masked(dev, vector)) {
990 continue;
992 vq = virtio_get_queue(vdev, queue_no);
993 notifier = virtio_queue_get_guest_notifier(vq);
994 if (k->guest_notifier_pending) {
995 if (k->guest_notifier_pending(vdev, queue_no)) {
996 msix_set_pending(dev, vector);
998 } else if (event_notifier_test_and_clear(notifier)) {
999 msix_set_pending(dev, vector);
1004 static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
1005 bool with_irqfd)
1007 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1008 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1009 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
1010 VirtQueue *vq = virtio_get_queue(vdev, n);
1011 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
1013 if (assign) {
1014 int r = event_notifier_init(notifier, 0);
1015 if (r < 0) {
1016 return r;
1018 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
1019 } else {
1020 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
1021 event_notifier_cleanup(notifier);
1024 if (!msix_enabled(&proxy->pci_dev) &&
1025 vdev->use_guest_notifier_mask &&
1026 vdc->guest_notifier_mask) {
1027 vdc->guest_notifier_mask(vdev, n, !assign);
1030 return 0;
1033 static bool virtio_pci_query_guest_notifiers(DeviceState *d)
1035 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1036 return msix_enabled(&proxy->pci_dev);
1039 static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
1041 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1042 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1043 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1044 int r, n;
1045 bool with_irqfd = msix_enabled(&proxy->pci_dev) &&
1046 kvm_msi_via_irqfd_enabled();
1048 nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
1050 /* When deassigning, pass a consistent nvqs value
1051 * to avoid leaking notifiers.
1053 assert(assign || nvqs == proxy->nvqs_with_notifiers);
1055 proxy->nvqs_with_notifiers = nvqs;
1057 /* Must unset vector notifier while guest notifier is still assigned */
1058 if ((proxy->vector_irqfd || k->guest_notifier_mask) && !assign) {
1059 msix_unset_vector_notifiers(&proxy->pci_dev);
1060 if (proxy->vector_irqfd) {
1061 kvm_virtio_pci_vector_release(proxy, nvqs);
1062 g_free(proxy->vector_irqfd);
1063 proxy->vector_irqfd = NULL;
1067 for (n = 0; n < nvqs; n++) {
1068 if (!virtio_queue_get_num(vdev, n)) {
1069 break;
1072 r = virtio_pci_set_guest_notifier(d, n, assign, with_irqfd);
1073 if (r < 0) {
1074 goto assign_error;
1078 /* Must set vector notifier after guest notifier has been assigned */
1079 if ((with_irqfd || k->guest_notifier_mask) && assign) {
1080 if (with_irqfd) {
1081 proxy->vector_irqfd =
1082 g_malloc0(sizeof(*proxy->vector_irqfd) *
1083 msix_nr_vectors_allocated(&proxy->pci_dev));
1084 r = kvm_virtio_pci_vector_use(proxy, nvqs);
1085 if (r < 0) {
1086 goto assign_error;
1089 r = msix_set_vector_notifiers(&proxy->pci_dev,
1090 virtio_pci_vector_unmask,
1091 virtio_pci_vector_mask,
1092 virtio_pci_vector_poll);
1093 if (r < 0) {
1094 goto notifiers_error;
1098 return 0;
1100 notifiers_error:
1101 if (with_irqfd) {
1102 assert(assign);
1103 kvm_virtio_pci_vector_release(proxy, nvqs);
1106 assign_error:
1107 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
1108 assert(assign);
1109 while (--n >= 0) {
1110 virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd);
1112 return r;
1115 static int virtio_pci_set_host_notifier(DeviceState *d, int n, bool assign)
1117 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1119 /* Stop using ioeventfd for virtqueue kick if the device starts using host
1120 * notifiers. This makes it easy to avoid stepping on each others' toes.
1122 proxy->ioeventfd_disabled = assign;
1123 if (assign) {
1124 virtio_pci_stop_ioeventfd(proxy);
1126 /* We don't need to start here: it's not needed because backend
1127 * currently only stops on status change away from ok,
1128 * reset, vmstop and such. If we do add code to start here,
1129 * need to check vmstate, device state etc. */
1130 return virtio_pci_set_host_notifier_internal(proxy, n, assign, false);
1133 static void virtio_pci_vmstate_change(DeviceState *d, bool running)
1135 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
1136 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1138 if (running) {
1139 /* Old QEMU versions did not set bus master enable on status write.
1140 * Detect DRIVER set and enable it.
1142 if ((proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION) &&
1143 (vdev->status & VIRTIO_CONFIG_S_DRIVER) &&
1144 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
1145 pci_default_write_config(&proxy->pci_dev, PCI_COMMAND,
1146 proxy->pci_dev.config[PCI_COMMAND] |
1147 PCI_COMMAND_MASTER, 1);
1149 virtio_pci_start_ioeventfd(proxy);
1150 } else {
1151 virtio_pci_stop_ioeventfd(proxy);
1155 #ifdef CONFIG_VIRTFS
1156 static void virtio_9p_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
1158 V9fsPCIState *dev = VIRTIO_9P_PCI(vpci_dev);
1159 DeviceState *vdev = DEVICE(&dev->vdev);
1161 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1162 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
1165 static Property virtio_9p_pci_properties[] = {
1166 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
1167 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1168 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
1169 DEFINE_PROP_END_OF_LIST(),
1172 static void virtio_9p_pci_class_init(ObjectClass *klass, void *data)
1174 DeviceClass *dc = DEVICE_CLASS(klass);
1175 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
1176 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1178 k->realize = virtio_9p_pci_realize;
1179 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1180 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_9P;
1181 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
1182 pcidev_k->class_id = 0x2;
1183 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1184 dc->props = virtio_9p_pci_properties;
1187 static void virtio_9p_pci_instance_init(Object *obj)
1189 V9fsPCIState *dev = VIRTIO_9P_PCI(obj);
1191 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
1192 TYPE_VIRTIO_9P);
1195 static const TypeInfo virtio_9p_pci_info = {
1196 .name = TYPE_VIRTIO_9P_PCI,
1197 .parent = TYPE_VIRTIO_PCI,
1198 .instance_size = sizeof(V9fsPCIState),
1199 .instance_init = virtio_9p_pci_instance_init,
1200 .class_init = virtio_9p_pci_class_init,
1202 #endif /* CONFIG_VIRTFS */
1205 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
1208 static int virtio_pci_query_nvectors(DeviceState *d)
1210 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1212 return proxy->nvectors;
1215 static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy,
1216 struct virtio_pci_cap *cap)
1218 PCIDevice *dev = &proxy->pci_dev;
1219 int offset;
1221 offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, 0, cap->cap_len);
1222 assert(offset > 0);
1224 assert(cap->cap_len >= sizeof *cap);
1225 memcpy(dev->config + offset + PCI_CAP_FLAGS, &cap->cap_len,
1226 cap->cap_len - PCI_CAP_FLAGS);
1228 return offset;
1231 static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr,
1232 unsigned size)
1234 VirtIOPCIProxy *proxy = opaque;
1235 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1236 uint32_t val = 0;
1237 int i;
1239 switch (addr) {
1240 case VIRTIO_PCI_COMMON_DFSELECT:
1241 val = proxy->dfselect;
1242 break;
1243 case VIRTIO_PCI_COMMON_DF:
1244 if (proxy->dfselect <= 1) {
1245 val = (vdev->host_features & ~VIRTIO_LEGACY_FEATURES) >>
1246 (32 * proxy->dfselect);
1248 break;
1249 case VIRTIO_PCI_COMMON_GFSELECT:
1250 val = proxy->gfselect;
1251 break;
1252 case VIRTIO_PCI_COMMON_GF:
1253 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) {
1254 val = proxy->guest_features[proxy->gfselect];
1256 break;
1257 case VIRTIO_PCI_COMMON_MSIX:
1258 val = vdev->config_vector;
1259 break;
1260 case VIRTIO_PCI_COMMON_NUMQ:
1261 for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) {
1262 if (virtio_queue_get_num(vdev, i)) {
1263 val = i + 1;
1266 break;
1267 case VIRTIO_PCI_COMMON_STATUS:
1268 val = vdev->status;
1269 break;
1270 case VIRTIO_PCI_COMMON_CFGGENERATION:
1271 val = vdev->generation;
1272 break;
1273 case VIRTIO_PCI_COMMON_Q_SELECT:
1274 val = vdev->queue_sel;
1275 break;
1276 case VIRTIO_PCI_COMMON_Q_SIZE:
1277 val = virtio_queue_get_num(vdev, vdev->queue_sel);
1278 break;
1279 case VIRTIO_PCI_COMMON_Q_MSIX:
1280 val = virtio_queue_vector(vdev, vdev->queue_sel);
1281 break;
1282 case VIRTIO_PCI_COMMON_Q_ENABLE:
1283 val = proxy->vqs[vdev->queue_sel].enabled;
1284 break;
1285 case VIRTIO_PCI_COMMON_Q_NOFF:
1286 /* Simply map queues in order */
1287 val = vdev->queue_sel;
1288 break;
1289 case VIRTIO_PCI_COMMON_Q_DESCLO:
1290 val = proxy->vqs[vdev->queue_sel].desc[0];
1291 break;
1292 case VIRTIO_PCI_COMMON_Q_DESCHI:
1293 val = proxy->vqs[vdev->queue_sel].desc[1];
1294 break;
1295 case VIRTIO_PCI_COMMON_Q_AVAILLO:
1296 val = proxy->vqs[vdev->queue_sel].avail[0];
1297 break;
1298 case VIRTIO_PCI_COMMON_Q_AVAILHI:
1299 val = proxy->vqs[vdev->queue_sel].avail[1];
1300 break;
1301 case VIRTIO_PCI_COMMON_Q_USEDLO:
1302 val = proxy->vqs[vdev->queue_sel].used[0];
1303 break;
1304 case VIRTIO_PCI_COMMON_Q_USEDHI:
1305 val = proxy->vqs[vdev->queue_sel].used[1];
1306 break;
1307 default:
1308 val = 0;
1311 return val;
1314 static void virtio_pci_common_write(void *opaque, hwaddr addr,
1315 uint64_t val, unsigned size)
1317 VirtIOPCIProxy *proxy = opaque;
1318 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1320 switch (addr) {
1321 case VIRTIO_PCI_COMMON_DFSELECT:
1322 proxy->dfselect = val;
1323 break;
1324 case VIRTIO_PCI_COMMON_GFSELECT:
1325 proxy->gfselect = val;
1326 break;
1327 case VIRTIO_PCI_COMMON_GF:
1328 if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) {
1329 proxy->guest_features[proxy->gfselect] = val;
1330 virtio_set_features(vdev,
1331 (((uint64_t)proxy->guest_features[1]) << 32) |
1332 proxy->guest_features[0]);
1334 break;
1335 case VIRTIO_PCI_COMMON_MSIX:
1336 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
1337 /* Make it possible for guest to discover an error took place. */
1338 if (msix_vector_use(&proxy->pci_dev, val) < 0) {
1339 val = VIRTIO_NO_VECTOR;
1341 vdev->config_vector = val;
1342 break;
1343 case VIRTIO_PCI_COMMON_STATUS:
1344 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
1345 virtio_pci_stop_ioeventfd(proxy);
1348 virtio_set_status(vdev, val & 0xFF);
1350 if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
1351 virtio_pci_start_ioeventfd(proxy);
1354 if (vdev->status == 0) {
1355 virtio_pci_reset(DEVICE(proxy));
1358 break;
1359 case VIRTIO_PCI_COMMON_Q_SELECT:
1360 if (val < VIRTIO_QUEUE_MAX) {
1361 vdev->queue_sel = val;
1363 break;
1364 case VIRTIO_PCI_COMMON_Q_SIZE:
1365 proxy->vqs[vdev->queue_sel].num = val;
1366 break;
1367 case VIRTIO_PCI_COMMON_Q_MSIX:
1368 msix_vector_unuse(&proxy->pci_dev,
1369 virtio_queue_vector(vdev, vdev->queue_sel));
1370 /* Make it possible for guest to discover an error took place. */
1371 if (msix_vector_use(&proxy->pci_dev, val) < 0) {
1372 val = VIRTIO_NO_VECTOR;
1374 virtio_queue_set_vector(vdev, vdev->queue_sel, val);
1375 break;
1376 case VIRTIO_PCI_COMMON_Q_ENABLE:
1377 /* TODO: need a way to put num back on reset. */
1378 virtio_queue_set_num(vdev, vdev->queue_sel,
1379 proxy->vqs[vdev->queue_sel].num);
1380 virtio_queue_set_rings(vdev, vdev->queue_sel,
1381 ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 |
1382 proxy->vqs[vdev->queue_sel].desc[0],
1383 ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 |
1384 proxy->vqs[vdev->queue_sel].avail[0],
1385 ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 |
1386 proxy->vqs[vdev->queue_sel].used[0]);
1387 proxy->vqs[vdev->queue_sel].enabled = 1;
1388 break;
1389 case VIRTIO_PCI_COMMON_Q_DESCLO:
1390 proxy->vqs[vdev->queue_sel].desc[0] = val;
1391 break;
1392 case VIRTIO_PCI_COMMON_Q_DESCHI:
1393 proxy->vqs[vdev->queue_sel].desc[1] = val;
1394 break;
1395 case VIRTIO_PCI_COMMON_Q_AVAILLO:
1396 proxy->vqs[vdev->queue_sel].avail[0] = val;
1397 break;
1398 case VIRTIO_PCI_COMMON_Q_AVAILHI:
1399 proxy->vqs[vdev->queue_sel].avail[1] = val;
1400 break;
1401 case VIRTIO_PCI_COMMON_Q_USEDLO:
1402 proxy->vqs[vdev->queue_sel].used[0] = val;
1403 break;
1404 case VIRTIO_PCI_COMMON_Q_USEDHI:
1405 proxy->vqs[vdev->queue_sel].used[1] = val;
1406 break;
1407 default:
1408 break;
1413 static uint64_t virtio_pci_notify_read(void *opaque, hwaddr addr,
1414 unsigned size)
1416 return 0;
1419 static void virtio_pci_notify_write(void *opaque, hwaddr addr,
1420 uint64_t val, unsigned size)
1422 VirtIODevice *vdev = opaque;
1423 unsigned queue = addr / QEMU_VIRTIO_PCI_QUEUE_MEM_MULT;
1425 if (queue < VIRTIO_QUEUE_MAX) {
1426 virtio_queue_notify(vdev, queue);
1430 static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr,
1431 uint64_t val, unsigned size)
1433 VirtIODevice *vdev = opaque;
1434 unsigned queue = val;
1436 if (queue < VIRTIO_QUEUE_MAX) {
1437 virtio_queue_notify(vdev, queue);
1441 static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr,
1442 unsigned size)
1444 VirtIOPCIProxy *proxy = opaque;
1445 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1446 uint64_t val = vdev->isr;
1448 vdev->isr = 0;
1449 pci_irq_deassert(&proxy->pci_dev);
1451 return val;
1454 static void virtio_pci_isr_write(void *opaque, hwaddr addr,
1455 uint64_t val, unsigned size)
1459 static uint64_t virtio_pci_device_read(void *opaque, hwaddr addr,
1460 unsigned size)
1462 VirtIODevice *vdev = opaque;
1463 uint64_t val = 0;
1465 switch (size) {
1466 case 1:
1467 val = virtio_config_modern_readb(vdev, addr);
1468 break;
1469 case 2:
1470 val = virtio_config_modern_readw(vdev, addr);
1471 break;
1472 case 4:
1473 val = virtio_config_modern_readl(vdev, addr);
1474 break;
1476 return val;
1479 static void virtio_pci_device_write(void *opaque, hwaddr addr,
1480 uint64_t val, unsigned size)
1482 VirtIODevice *vdev = opaque;
1483 switch (size) {
1484 case 1:
1485 virtio_config_modern_writeb(vdev, addr, val);
1486 break;
1487 case 2:
1488 virtio_config_modern_writew(vdev, addr, val);
1489 break;
1490 case 4:
1491 virtio_config_modern_writel(vdev, addr, val);
1492 break;
1496 static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy)
1498 static const MemoryRegionOps common_ops = {
1499 .read = virtio_pci_common_read,
1500 .write = virtio_pci_common_write,
1501 .impl = {
1502 .min_access_size = 1,
1503 .max_access_size = 4,
1505 .endianness = DEVICE_LITTLE_ENDIAN,
1507 static const MemoryRegionOps isr_ops = {
1508 .read = virtio_pci_isr_read,
1509 .write = virtio_pci_isr_write,
1510 .impl = {
1511 .min_access_size = 1,
1512 .max_access_size = 4,
1514 .endianness = DEVICE_LITTLE_ENDIAN,
1516 static const MemoryRegionOps device_ops = {
1517 .read = virtio_pci_device_read,
1518 .write = virtio_pci_device_write,
1519 .impl = {
1520 .min_access_size = 1,
1521 .max_access_size = 4,
1523 .endianness = DEVICE_LITTLE_ENDIAN,
1525 static const MemoryRegionOps notify_ops = {
1526 .read = virtio_pci_notify_read,
1527 .write = virtio_pci_notify_write,
1528 .impl = {
1529 .min_access_size = 1,
1530 .max_access_size = 4,
1532 .endianness = DEVICE_LITTLE_ENDIAN,
1534 static const MemoryRegionOps notify_pio_ops = {
1535 .read = virtio_pci_notify_read,
1536 .write = virtio_pci_notify_write_pio,
1537 .impl = {
1538 .min_access_size = 1,
1539 .max_access_size = 4,
1541 .endianness = DEVICE_LITTLE_ENDIAN,
1545 memory_region_init_io(&proxy->common.mr, OBJECT(proxy),
1546 &common_ops,
1547 proxy,
1548 "virtio-pci-common",
1549 proxy->common.size);
1551 memory_region_init_io(&proxy->isr.mr, OBJECT(proxy),
1552 &isr_ops,
1553 proxy,
1554 "virtio-pci-isr",
1555 proxy->isr.size);
1557 memory_region_init_io(&proxy->device.mr, OBJECT(proxy),
1558 &device_ops,
1559 virtio_bus_get_device(&proxy->bus),
1560 "virtio-pci-device",
1561 proxy->device.size);
1563 memory_region_init_io(&proxy->notify.mr, OBJECT(proxy),
1564 &notify_ops,
1565 virtio_bus_get_device(&proxy->bus),
1566 "virtio-pci-notify",
1567 proxy->notify.size);
1569 memory_region_init_io(&proxy->notify_pio.mr, OBJECT(proxy),
1570 &notify_pio_ops,
1571 virtio_bus_get_device(&proxy->bus),
1572 "virtio-pci-notify-pio",
1573 proxy->notify.size);
1576 static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy,
1577 VirtIOPCIRegion *region,
1578 struct virtio_pci_cap *cap,
1579 MemoryRegion *mr,
1580 uint8_t bar)
1582 memory_region_add_subregion(mr, region->offset, &region->mr);
1584 cap->cfg_type = region->type;
1585 cap->bar = bar;
1586 cap->offset = cpu_to_le32(region->offset);
1587 cap->length = cpu_to_le32(region->size);
1588 virtio_pci_add_mem_cap(proxy, cap);
1592 static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy,
1593 VirtIOPCIRegion *region,
1594 struct virtio_pci_cap *cap)
1596 virtio_pci_modern_region_map(proxy, region, cap,
1597 &proxy->modern_bar, proxy->modern_mem_bar);
1600 static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy,
1601 VirtIOPCIRegion *region,
1602 struct virtio_pci_cap *cap)
1604 virtio_pci_modern_region_map(proxy, region, cap,
1605 &proxy->io_bar, proxy->modern_io_bar);
1608 static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy,
1609 VirtIOPCIRegion *region)
1611 memory_region_del_subregion(&proxy->modern_bar,
1612 &region->mr);
1615 static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy,
1616 VirtIOPCIRegion *region)
1618 memory_region_del_subregion(&proxy->io_bar,
1619 &region->mr);
1622 /* This is called by virtio-bus just after the device is plugged. */
1623 static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
1625 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1626 VirtioBusState *bus = &proxy->bus;
1627 bool legacy = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_LEGACY);
1628 bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
1629 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
1630 uint8_t *config;
1631 uint32_t size;
1632 VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
1634 config = proxy->pci_dev.config;
1635 if (proxy->class_code) {
1636 pci_config_set_class(config, proxy->class_code);
1639 if (legacy) {
1640 /* legacy and transitional */
1641 pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID,
1642 pci_get_word(config + PCI_VENDOR_ID));
1643 pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus));
1644 } else {
1645 /* pure virtio-1.0 */
1646 pci_set_word(config + PCI_VENDOR_ID,
1647 PCI_VENDOR_ID_REDHAT_QUMRANET);
1648 pci_set_word(config + PCI_DEVICE_ID,
1649 0x1040 + virtio_bus_get_vdev_id(bus));
1650 pci_config_set_revision(config, 1);
1652 config[PCI_INTERRUPT_PIN] = 1;
1655 if (modern) {
1656 struct virtio_pci_cap cap = {
1657 .cap_len = sizeof cap,
1659 struct virtio_pci_notify_cap notify = {
1660 .cap.cap_len = sizeof notify,
1661 .notify_off_multiplier =
1662 cpu_to_le32(QEMU_VIRTIO_PCI_QUEUE_MEM_MULT),
1664 struct virtio_pci_cfg_cap cfg = {
1665 .cap.cap_len = sizeof cfg,
1666 .cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG,
1668 struct virtio_pci_notify_cap notify_pio = {
1669 .cap.cap_len = sizeof notify,
1670 .notify_off_multiplier = cpu_to_le32(0x0),
1673 struct virtio_pci_cfg_cap *cfg_mask;
1675 virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
1676 virtio_pci_modern_regions_init(proxy);
1678 virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap);
1679 virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap);
1680 virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap);
1681 virtio_pci_modern_mem_region_map(proxy, &proxy->notify, &notify.cap);
1683 if (modern_pio) {
1684 memory_region_init(&proxy->io_bar, OBJECT(proxy),
1685 "virtio-pci-io", 0x4);
1687 pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar,
1688 PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar);
1690 virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio,
1691 &notify_pio.cap);
1694 pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar,
1695 PCI_BASE_ADDRESS_SPACE_MEMORY |
1696 PCI_BASE_ADDRESS_MEM_PREFETCH |
1697 PCI_BASE_ADDRESS_MEM_TYPE_64,
1698 &proxy->modern_bar);
1700 proxy->config_cap = virtio_pci_add_mem_cap(proxy, &cfg.cap);
1701 cfg_mask = (void *)(proxy->pci_dev.wmask + proxy->config_cap);
1702 pci_set_byte(&cfg_mask->cap.bar, ~0x0);
1703 pci_set_long((uint8_t *)&cfg_mask->cap.offset, ~0x0);
1704 pci_set_long((uint8_t *)&cfg_mask->cap.length, ~0x0);
1705 pci_set_long(cfg_mask->pci_cfg_data, ~0x0);
1708 if (proxy->nvectors) {
1709 int err = msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors,
1710 proxy->msix_bar);
1711 if (err) {
1712 /* Notice when a system that supports MSIx can't initialize it. */
1713 if (err != -ENOTSUP) {
1714 error_report("unable to init msix vectors to %" PRIu32,
1715 proxy->nvectors);
1717 proxy->nvectors = 0;
1721 proxy->pci_dev.config_write = virtio_write_config;
1722 proxy->pci_dev.config_read = virtio_read_config;
1724 if (legacy) {
1725 size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev)
1726 + virtio_bus_get_vdev_config_len(bus);
1727 size = pow2ceil(size);
1729 memory_region_init_io(&proxy->bar, OBJECT(proxy),
1730 &virtio_pci_config_ops,
1731 proxy, "virtio-pci", size);
1733 pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar,
1734 PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar);
1737 if (!kvm_has_many_ioeventfds()) {
1738 proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
1741 virtio_add_feature(&vdev->host_features, VIRTIO_F_BAD_FEATURE);
1744 static void virtio_pci_device_unplugged(DeviceState *d)
1746 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1747 bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
1748 bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
1750 virtio_pci_stop_ioeventfd(proxy);
1752 if (modern) {
1753 virtio_pci_modern_mem_region_unmap(proxy, &proxy->common);
1754 virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr);
1755 virtio_pci_modern_mem_region_unmap(proxy, &proxy->device);
1756 virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify);
1757 if (modern_pio) {
1758 virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio);
1763 static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
1765 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
1766 VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev);
1769 * virtio pci bar layout used by default.
1770 * subclasses can re-arrange things if needed.
1772 * region 0 -- virtio legacy io bar
1773 * region 1 -- msi-x bar
1774 * region 4+5 -- virtio modern memory (64bit) bar
1777 proxy->legacy_io_bar = 0;
1778 proxy->msix_bar = 1;
1779 proxy->modern_io_bar = 2;
1780 proxy->modern_mem_bar = 4;
1782 proxy->common.offset = 0x0;
1783 proxy->common.size = 0x1000;
1784 proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG;
1786 proxy->isr.offset = 0x1000;
1787 proxy->isr.size = 0x1000;
1788 proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG;
1790 proxy->device.offset = 0x2000;
1791 proxy->device.size = 0x1000;
1792 proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG;
1794 proxy->notify.offset = 0x3000;
1795 proxy->notify.size =
1796 QEMU_VIRTIO_PCI_QUEUE_MEM_MULT * VIRTIO_QUEUE_MAX;
1797 proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
1799 proxy->notify_pio.offset = 0x0;
1800 proxy->notify_pio.size = 0x4;
1801 proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
1803 /* subclasses can enforce modern, so do this unconditionally */
1804 memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci",
1805 2 * QEMU_VIRTIO_PCI_QUEUE_MEM_MULT *
1806 VIRTIO_QUEUE_MAX);
1808 memory_region_init_alias(&proxy->modern_cfg,
1809 OBJECT(proxy),
1810 "virtio-pci-cfg",
1811 &proxy->modern_bar,
1813 memory_region_size(&proxy->modern_bar));
1815 address_space_init(&proxy->modern_as, &proxy->modern_cfg, "virtio-pci-cfg-as");
1817 if (pci_is_express(pci_dev) && pci_bus_is_express(pci_dev->bus) &&
1818 !pci_bus_is_root(pci_dev->bus)) {
1819 int pos;
1821 pos = pcie_endpoint_cap_init(pci_dev, 0);
1822 assert(pos > 0);
1824 pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0, PCI_PM_SIZEOF);
1825 assert(pos > 0);
1828 * Indicates that this function complies with revision 1.2 of the
1829 * PCI Power Management Interface Specification.
1831 pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3);
1832 } else {
1834 * make future invocations of pci_is_express() return false
1835 * and pci_config_size() return PCI_CONFIG_SPACE_SIZE.
1837 pci_dev->cap_present &= ~QEMU_PCI_CAP_EXPRESS;
1840 virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy);
1841 if (k->realize) {
1842 k->realize(proxy, errp);
1846 static void virtio_pci_exit(PCIDevice *pci_dev)
1848 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
1850 msix_uninit_exclusive_bar(pci_dev);
1851 address_space_destroy(&proxy->modern_as);
1854 static void virtio_pci_reset(DeviceState *qdev)
1856 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
1857 VirtioBusState *bus = VIRTIO_BUS(&proxy->bus);
1858 int i;
1860 virtio_pci_stop_ioeventfd(proxy);
1861 virtio_bus_reset(bus);
1862 msix_unuse_all_vectors(&proxy->pci_dev);
1864 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1865 proxy->vqs[i].enabled = 0;
1869 static Property virtio_pci_properties[] = {
1870 DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags,
1871 VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false),
1872 DEFINE_PROP_BIT("disable-legacy", VirtIOPCIProxy, flags,
1873 VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT, false),
1874 DEFINE_PROP_BIT("disable-modern", VirtIOPCIProxy, flags,
1875 VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT, true),
1876 DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags,
1877 VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true),
1878 DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags,
1879 VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false),
1880 DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags,
1881 VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false),
1882 DEFINE_PROP_END_OF_LIST(),
1885 static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp)
1887 VirtioPCIClass *vpciklass = VIRTIO_PCI_GET_CLASS(qdev);
1888 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
1889 PCIDevice *pci_dev = &proxy->pci_dev;
1891 if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) &&
1892 !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN)) {
1893 pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
1896 vpciklass->parent_dc_realize(qdev, errp);
1899 static void virtio_pci_class_init(ObjectClass *klass, void *data)
1901 DeviceClass *dc = DEVICE_CLASS(klass);
1902 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1903 VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass);
1905 dc->props = virtio_pci_properties;
1906 k->realize = virtio_pci_realize;
1907 k->exit = virtio_pci_exit;
1908 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1909 k->revision = VIRTIO_PCI_ABI_VERSION;
1910 k->class_id = PCI_CLASS_OTHERS;
1911 vpciklass->parent_dc_realize = dc->realize;
1912 dc->realize = virtio_pci_dc_realize;
1913 dc->reset = virtio_pci_reset;
1916 static const TypeInfo virtio_pci_info = {
1917 .name = TYPE_VIRTIO_PCI,
1918 .parent = TYPE_PCI_DEVICE,
1919 .instance_size = sizeof(VirtIOPCIProxy),
1920 .class_init = virtio_pci_class_init,
1921 .class_size = sizeof(VirtioPCIClass),
1922 .abstract = true,
1925 /* virtio-blk-pci */
1927 static Property virtio_blk_pci_properties[] = {
1928 DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
1929 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
1930 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1931 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
1932 DEFINE_PROP_END_OF_LIST(),
1935 static void virtio_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
1937 VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(vpci_dev);
1938 DeviceState *vdev = DEVICE(&dev->vdev);
1940 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1941 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
1944 static void virtio_blk_pci_class_init(ObjectClass *klass, void *data)
1946 DeviceClass *dc = DEVICE_CLASS(klass);
1947 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1948 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
1950 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1951 dc->props = virtio_blk_pci_properties;
1952 k->realize = virtio_blk_pci_realize;
1953 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1954 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BLOCK;
1955 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
1956 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
1959 static void virtio_blk_pci_instance_init(Object *obj)
1961 VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(obj);
1963 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
1964 TYPE_VIRTIO_BLK);
1965 object_property_add_alias(obj, "iothread", OBJECT(&dev->vdev),"iothread",
1966 &error_abort);
1967 object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
1968 "bootindex", &error_abort);
1971 static const TypeInfo virtio_blk_pci_info = {
1972 .name = TYPE_VIRTIO_BLK_PCI,
1973 .parent = TYPE_VIRTIO_PCI,
1974 .instance_size = sizeof(VirtIOBlkPCI),
1975 .instance_init = virtio_blk_pci_instance_init,
1976 .class_init = virtio_blk_pci_class_init,
1979 /* virtio-scsi-pci */
1981 static Property virtio_scsi_pci_properties[] = {
1982 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
1983 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1984 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
1985 DEV_NVECTORS_UNSPECIFIED),
1986 DEFINE_PROP_END_OF_LIST(),
1989 static void virtio_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
1991 VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(vpci_dev);
1992 DeviceState *vdev = DEVICE(&dev->vdev);
1993 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
1994 DeviceState *proxy = DEVICE(vpci_dev);
1995 char *bus_name;
1997 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
1998 vpci_dev->nvectors = vs->conf.num_queues + 3;
2002 * For command line compatibility, this sets the virtio-scsi-device bus
2003 * name as before.
2005 if (proxy->id) {
2006 bus_name = g_strdup_printf("%s.0", proxy->id);
2007 virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name);
2008 g_free(bus_name);
2011 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2012 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2015 static void virtio_scsi_pci_class_init(ObjectClass *klass, void *data)
2017 DeviceClass *dc = DEVICE_CLASS(klass);
2018 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2019 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2021 k->realize = virtio_scsi_pci_realize;
2022 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
2023 dc->props = virtio_scsi_pci_properties;
2024 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2025 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
2026 pcidev_k->revision = 0x00;
2027 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
2030 static void virtio_scsi_pci_instance_init(Object *obj)
2032 VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(obj);
2034 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2035 TYPE_VIRTIO_SCSI);
2036 object_property_add_alias(obj, "iothread", OBJECT(&dev->vdev), "iothread",
2037 &error_abort);
2040 static const TypeInfo virtio_scsi_pci_info = {
2041 .name = TYPE_VIRTIO_SCSI_PCI,
2042 .parent = TYPE_VIRTIO_PCI,
2043 .instance_size = sizeof(VirtIOSCSIPCI),
2044 .instance_init = virtio_scsi_pci_instance_init,
2045 .class_init = virtio_scsi_pci_class_init,
2048 /* vhost-scsi-pci */
2050 #ifdef CONFIG_VHOST_SCSI
2051 static Property vhost_scsi_pci_properties[] = {
2052 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
2053 DEV_NVECTORS_UNSPECIFIED),
2054 DEFINE_PROP_END_OF_LIST(),
2057 static void vhost_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2059 VHostSCSIPCI *dev = VHOST_SCSI_PCI(vpci_dev);
2060 DeviceState *vdev = DEVICE(&dev->vdev);
2061 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
2063 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
2064 vpci_dev->nvectors = vs->conf.num_queues + 3;
2067 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2068 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2071 static void vhost_scsi_pci_class_init(ObjectClass *klass, void *data)
2073 DeviceClass *dc = DEVICE_CLASS(klass);
2074 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2075 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2076 k->realize = vhost_scsi_pci_realize;
2077 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
2078 dc->props = vhost_scsi_pci_properties;
2079 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2080 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
2081 pcidev_k->revision = 0x00;
2082 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
2085 static void vhost_scsi_pci_instance_init(Object *obj)
2087 VHostSCSIPCI *dev = VHOST_SCSI_PCI(obj);
2089 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2090 TYPE_VHOST_SCSI);
2091 object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
2092 "bootindex", &error_abort);
2095 static const TypeInfo vhost_scsi_pci_info = {
2096 .name = TYPE_VHOST_SCSI_PCI,
2097 .parent = TYPE_VIRTIO_PCI,
2098 .instance_size = sizeof(VHostSCSIPCI),
2099 .instance_init = vhost_scsi_pci_instance_init,
2100 .class_init = vhost_scsi_pci_class_init,
2102 #endif
2104 /* virtio-balloon-pci */
2106 static Property virtio_balloon_pci_properties[] = {
2107 DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
2108 DEFINE_PROP_END_OF_LIST(),
2111 static void virtio_balloon_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2113 VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(vpci_dev);
2114 DeviceState *vdev = DEVICE(&dev->vdev);
2116 if (vpci_dev->class_code != PCI_CLASS_OTHERS &&
2117 vpci_dev->class_code != PCI_CLASS_MEMORY_RAM) { /* qemu < 1.1 */
2118 vpci_dev->class_code = PCI_CLASS_OTHERS;
2121 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2122 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2125 static void virtio_balloon_pci_class_init(ObjectClass *klass, void *data)
2127 DeviceClass *dc = DEVICE_CLASS(klass);
2128 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2129 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2130 k->realize = virtio_balloon_pci_realize;
2131 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
2132 dc->props = virtio_balloon_pci_properties;
2133 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2134 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BALLOON;
2135 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
2136 pcidev_k->class_id = PCI_CLASS_OTHERS;
2139 static void virtio_balloon_pci_instance_init(Object *obj)
2141 VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(obj);
2143 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2144 TYPE_VIRTIO_BALLOON);
2145 object_property_add_alias(obj, "guest-stats", OBJECT(&dev->vdev),
2146 "guest-stats", &error_abort);
2147 object_property_add_alias(obj, "guest-stats-polling-interval",
2148 OBJECT(&dev->vdev),
2149 "guest-stats-polling-interval", &error_abort);
2152 static const TypeInfo virtio_balloon_pci_info = {
2153 .name = TYPE_VIRTIO_BALLOON_PCI,
2154 .parent = TYPE_VIRTIO_PCI,
2155 .instance_size = sizeof(VirtIOBalloonPCI),
2156 .instance_init = virtio_balloon_pci_instance_init,
2157 .class_init = virtio_balloon_pci_class_init,
2160 /* virtio-serial-pci */
2162 static void virtio_serial_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2164 VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(vpci_dev);
2165 DeviceState *vdev = DEVICE(&dev->vdev);
2166 DeviceState *proxy = DEVICE(vpci_dev);
2167 char *bus_name;
2169 if (vpci_dev->class_code != PCI_CLASS_COMMUNICATION_OTHER &&
2170 vpci_dev->class_code != PCI_CLASS_DISPLAY_OTHER && /* qemu 0.10 */
2171 vpci_dev->class_code != PCI_CLASS_OTHERS) { /* qemu-kvm */
2172 vpci_dev->class_code = PCI_CLASS_COMMUNICATION_OTHER;
2175 /* backwards-compatibility with machines that were created with
2176 DEV_NVECTORS_UNSPECIFIED */
2177 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
2178 vpci_dev->nvectors = dev->vdev.serial.max_virtserial_ports + 1;
2182 * For command line compatibility, this sets the virtio-serial-device bus
2183 * name as before.
2185 if (proxy->id) {
2186 bus_name = g_strdup_printf("%s.0", proxy->id);
2187 virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name);
2188 g_free(bus_name);
2191 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2192 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2195 static Property virtio_serial_pci_properties[] = {
2196 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
2197 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
2198 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
2199 DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0),
2200 DEFINE_PROP_END_OF_LIST(),
2203 static void virtio_serial_pci_class_init(ObjectClass *klass, void *data)
2205 DeviceClass *dc = DEVICE_CLASS(klass);
2206 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2207 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2208 k->realize = virtio_serial_pci_realize;
2209 set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
2210 dc->props = virtio_serial_pci_properties;
2211 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2212 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_CONSOLE;
2213 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
2214 pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER;
2217 static void virtio_serial_pci_instance_init(Object *obj)
2219 VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(obj);
2221 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2222 TYPE_VIRTIO_SERIAL);
2225 static const TypeInfo virtio_serial_pci_info = {
2226 .name = TYPE_VIRTIO_SERIAL_PCI,
2227 .parent = TYPE_VIRTIO_PCI,
2228 .instance_size = sizeof(VirtIOSerialPCI),
2229 .instance_init = virtio_serial_pci_instance_init,
2230 .class_init = virtio_serial_pci_class_init,
2233 /* virtio-net-pci */
2235 static Property virtio_net_properties[] = {
2236 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
2237 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false),
2238 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
2239 DEFINE_PROP_END_OF_LIST(),
2242 static void virtio_net_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2244 DeviceState *qdev = DEVICE(vpci_dev);
2245 VirtIONetPCI *dev = VIRTIO_NET_PCI(vpci_dev);
2246 DeviceState *vdev = DEVICE(&dev->vdev);
2248 virtio_net_set_netclient_name(&dev->vdev, qdev->id,
2249 object_get_typename(OBJECT(qdev)));
2250 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2251 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2254 static void virtio_net_pci_class_init(ObjectClass *klass, void *data)
2256 DeviceClass *dc = DEVICE_CLASS(klass);
2257 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
2258 VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass);
2260 k->romfile = "efi-virtio.rom";
2261 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2262 k->device_id = PCI_DEVICE_ID_VIRTIO_NET;
2263 k->revision = VIRTIO_PCI_ABI_VERSION;
2264 k->class_id = PCI_CLASS_NETWORK_ETHERNET;
2265 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
2266 dc->props = virtio_net_properties;
2267 vpciklass->realize = virtio_net_pci_realize;
2270 static void virtio_net_pci_instance_init(Object *obj)
2272 VirtIONetPCI *dev = VIRTIO_NET_PCI(obj);
2274 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2275 TYPE_VIRTIO_NET);
2276 object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
2277 "bootindex", &error_abort);
2280 static const TypeInfo virtio_net_pci_info = {
2281 .name = TYPE_VIRTIO_NET_PCI,
2282 .parent = TYPE_VIRTIO_PCI,
2283 .instance_size = sizeof(VirtIONetPCI),
2284 .instance_init = virtio_net_pci_instance_init,
2285 .class_init = virtio_net_pci_class_init,
2288 /* virtio-rng-pci */
2290 static void virtio_rng_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2292 VirtIORngPCI *vrng = VIRTIO_RNG_PCI(vpci_dev);
2293 DeviceState *vdev = DEVICE(&vrng->vdev);
2294 Error *err = NULL;
2296 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2297 object_property_set_bool(OBJECT(vdev), true, "realized", &err);
2298 if (err) {
2299 error_propagate(errp, err);
2300 return;
2303 object_property_set_link(OBJECT(vrng),
2304 OBJECT(vrng->vdev.conf.rng), "rng",
2305 NULL);
2308 static void virtio_rng_pci_class_init(ObjectClass *klass, void *data)
2310 DeviceClass *dc = DEVICE_CLASS(klass);
2311 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2312 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2314 k->realize = virtio_rng_pci_realize;
2315 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
2317 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
2318 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_RNG;
2319 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
2320 pcidev_k->class_id = PCI_CLASS_OTHERS;
2323 static void virtio_rng_initfn(Object *obj)
2325 VirtIORngPCI *dev = VIRTIO_RNG_PCI(obj);
2327 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2328 TYPE_VIRTIO_RNG);
2329 object_property_add_alias(obj, "rng", OBJECT(&dev->vdev), "rng",
2330 &error_abort);
2333 static const TypeInfo virtio_rng_pci_info = {
2334 .name = TYPE_VIRTIO_RNG_PCI,
2335 .parent = TYPE_VIRTIO_PCI,
2336 .instance_size = sizeof(VirtIORngPCI),
2337 .instance_init = virtio_rng_initfn,
2338 .class_init = virtio_rng_pci_class_init,
2341 /* virtio-input-pci */
2343 static Property virtio_input_pci_properties[] = {
2344 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
2345 DEFINE_PROP_END_OF_LIST(),
2348 static void virtio_input_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
2350 VirtIOInputPCI *vinput = VIRTIO_INPUT_PCI(vpci_dev);
2351 DeviceState *vdev = DEVICE(&vinput->vdev);
2353 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
2354 /* force virtio-1.0 */
2355 vpci_dev->flags &= ~VIRTIO_PCI_FLAG_DISABLE_MODERN;
2356 vpci_dev->flags |= VIRTIO_PCI_FLAG_DISABLE_LEGACY;
2357 object_property_set_bool(OBJECT(vdev), true, "realized", errp);
2360 static void virtio_input_pci_class_init(ObjectClass *klass, void *data)
2362 DeviceClass *dc = DEVICE_CLASS(klass);
2363 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
2364 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2366 dc->props = virtio_input_pci_properties;
2367 k->realize = virtio_input_pci_realize;
2368 set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
2370 pcidev_k->class_id = PCI_CLASS_INPUT_OTHER;
2373 static void virtio_input_hid_kbd_pci_class_init(ObjectClass *klass, void *data)
2375 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2377 pcidev_k->class_id = PCI_CLASS_INPUT_KEYBOARD;
2380 static void virtio_input_hid_mouse_pci_class_init(ObjectClass *klass,
2381 void *data)
2383 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
2385 pcidev_k->class_id = PCI_CLASS_INPUT_MOUSE;
2388 static void virtio_keyboard_initfn(Object *obj)
2390 VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
2392 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2393 TYPE_VIRTIO_KEYBOARD);
2396 static void virtio_mouse_initfn(Object *obj)
2398 VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
2400 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2401 TYPE_VIRTIO_MOUSE);
2404 static void virtio_tablet_initfn(Object *obj)
2406 VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
2408 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2409 TYPE_VIRTIO_TABLET);
2412 static const TypeInfo virtio_input_pci_info = {
2413 .name = TYPE_VIRTIO_INPUT_PCI,
2414 .parent = TYPE_VIRTIO_PCI,
2415 .instance_size = sizeof(VirtIOInputPCI),
2416 .class_init = virtio_input_pci_class_init,
2417 .abstract = true,
2420 static const TypeInfo virtio_input_hid_pci_info = {
2421 .name = TYPE_VIRTIO_INPUT_HID_PCI,
2422 .parent = TYPE_VIRTIO_INPUT_PCI,
2423 .instance_size = sizeof(VirtIOInputHIDPCI),
2424 .abstract = true,
2427 static const TypeInfo virtio_keyboard_pci_info = {
2428 .name = TYPE_VIRTIO_KEYBOARD_PCI,
2429 .parent = TYPE_VIRTIO_INPUT_HID_PCI,
2430 .class_init = virtio_input_hid_kbd_pci_class_init,
2431 .instance_size = sizeof(VirtIOInputHIDPCI),
2432 .instance_init = virtio_keyboard_initfn,
2435 static const TypeInfo virtio_mouse_pci_info = {
2436 .name = TYPE_VIRTIO_MOUSE_PCI,
2437 .parent = TYPE_VIRTIO_INPUT_HID_PCI,
2438 .class_init = virtio_input_hid_mouse_pci_class_init,
2439 .instance_size = sizeof(VirtIOInputHIDPCI),
2440 .instance_init = virtio_mouse_initfn,
2443 static const TypeInfo virtio_tablet_pci_info = {
2444 .name = TYPE_VIRTIO_TABLET_PCI,
2445 .parent = TYPE_VIRTIO_INPUT_HID_PCI,
2446 .instance_size = sizeof(VirtIOInputHIDPCI),
2447 .instance_init = virtio_tablet_initfn,
2450 #ifdef CONFIG_LINUX
2451 static void virtio_host_initfn(Object *obj)
2453 VirtIOInputHostPCI *dev = VIRTIO_INPUT_HOST_PCI(obj);
2455 virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
2456 TYPE_VIRTIO_INPUT_HOST);
2459 static const TypeInfo virtio_host_pci_info = {
2460 .name = TYPE_VIRTIO_INPUT_HOST_PCI,
2461 .parent = TYPE_VIRTIO_INPUT_PCI,
2462 .instance_size = sizeof(VirtIOInputHostPCI),
2463 .instance_init = virtio_host_initfn,
2465 #endif
2467 /* virtio-pci-bus */
2469 static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
2470 VirtIOPCIProxy *dev)
2472 DeviceState *qdev = DEVICE(dev);
2473 char virtio_bus_name[] = "virtio-bus";
2475 qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev,
2476 virtio_bus_name);
2479 static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
2481 BusClass *bus_class = BUS_CLASS(klass);
2482 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
2483 bus_class->max_dev = 1;
2484 k->notify = virtio_pci_notify;
2485 k->save_config = virtio_pci_save_config;
2486 k->load_config = virtio_pci_load_config;
2487 k->save_queue = virtio_pci_save_queue;
2488 k->load_queue = virtio_pci_load_queue;
2489 k->save_extra_state = virtio_pci_save_extra_state;
2490 k->load_extra_state = virtio_pci_load_extra_state;
2491 k->has_extra_state = virtio_pci_has_extra_state;
2492 k->query_guest_notifiers = virtio_pci_query_guest_notifiers;
2493 k->set_host_notifier = virtio_pci_set_host_notifier;
2494 k->set_guest_notifiers = virtio_pci_set_guest_notifiers;
2495 k->vmstate_change = virtio_pci_vmstate_change;
2496 k->device_plugged = virtio_pci_device_plugged;
2497 k->device_unplugged = virtio_pci_device_unplugged;
2498 k->query_nvectors = virtio_pci_query_nvectors;
2501 static const TypeInfo virtio_pci_bus_info = {
2502 .name = TYPE_VIRTIO_PCI_BUS,
2503 .parent = TYPE_VIRTIO_BUS,
2504 .instance_size = sizeof(VirtioPCIBusState),
2505 .class_init = virtio_pci_bus_class_init,
2508 static void virtio_pci_register_types(void)
2510 type_register_static(&virtio_rng_pci_info);
2511 type_register_static(&virtio_input_pci_info);
2512 type_register_static(&virtio_input_hid_pci_info);
2513 type_register_static(&virtio_keyboard_pci_info);
2514 type_register_static(&virtio_mouse_pci_info);
2515 type_register_static(&virtio_tablet_pci_info);
2516 #ifdef CONFIG_LINUX
2517 type_register_static(&virtio_host_pci_info);
2518 #endif
2519 type_register_static(&virtio_pci_bus_info);
2520 type_register_static(&virtio_pci_info);
2521 #ifdef CONFIG_VIRTFS
2522 type_register_static(&virtio_9p_pci_info);
2523 #endif
2524 type_register_static(&virtio_blk_pci_info);
2525 type_register_static(&virtio_scsi_pci_info);
2526 type_register_static(&virtio_balloon_pci_info);
2527 type_register_static(&virtio_serial_pci_info);
2528 type_register_static(&virtio_net_pci_info);
2529 #ifdef CONFIG_VHOST_SCSI
2530 type_register_static(&vhost_scsi_pci_info);
2531 #endif
2534 type_init(virtio_pci_register_types)