Merge commit 'f711df67d611e4762966a249742a5f7499e19f99' into upstream-merge
[qemu-kvm/stefanha.git] / hw / virtio-pci.c
blob8f123f275df330fdedb6864a7697ecefc7dddd8e
1 /*
2 * Virtio PCI Bindings
4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2009 CodeSourcery
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paul Brook <paul@codesourcery.com>
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
16 #include <inttypes.h>
18 #include "virtio.h"
19 #include "virtio-blk.h"
20 #include "virtio-net.h"
21 #include "pci.h"
22 #include "qemu-error.h"
23 #include "msix.h"
24 #include "net.h"
25 #include "loader.h"
26 #include "kvm.h"
27 #include "blockdev.h"
29 /* from Linux's linux/virtio_pci.h */
31 /* A 32-bit r/o bitmask of the features supported by the host */
32 #define VIRTIO_PCI_HOST_FEATURES 0
34 /* A 32-bit r/w bitmask of features activated by the guest */
35 #define VIRTIO_PCI_GUEST_FEATURES 4
37 /* A 32-bit r/w PFN for the currently selected queue */
38 #define VIRTIO_PCI_QUEUE_PFN 8
40 /* A 16-bit r/o queue size for the currently selected queue */
41 #define VIRTIO_PCI_QUEUE_NUM 12
43 /* A 16-bit r/w queue selector */
44 #define VIRTIO_PCI_QUEUE_SEL 14
46 /* A 16-bit r/w queue notifier */
47 #define VIRTIO_PCI_QUEUE_NOTIFY 16
49 /* An 8-bit device status register. */
50 #define VIRTIO_PCI_STATUS 18
52 /* An 8-bit r/o interrupt status register. Reading the value will return the
53 * current contents of the ISR and will also clear it. This is effectively
54 * a read-and-acknowledge. */
55 #define VIRTIO_PCI_ISR 19
57 /* MSI-X registers: only enabled if MSI-X is enabled. */
58 /* A 16-bit vector for configuration changes. */
59 #define VIRTIO_MSI_CONFIG_VECTOR 20
60 /* A 16-bit vector for selected queue notifications. */
61 #define VIRTIO_MSI_QUEUE_VECTOR 22
63 /* Config space size */
64 #define VIRTIO_PCI_CONFIG_NOMSI 20
65 #define VIRTIO_PCI_CONFIG_MSI 24
66 #define VIRTIO_PCI_REGION_SIZE(dev) (msix_present(dev) ? \
67 VIRTIO_PCI_CONFIG_MSI : \
68 VIRTIO_PCI_CONFIG_NOMSI)
70 /* The remaining space is defined by each driver as the per-driver
71 * configuration space */
72 #define VIRTIO_PCI_CONFIG(dev) (msix_enabled(dev) ? \
73 VIRTIO_PCI_CONFIG_MSI : \
74 VIRTIO_PCI_CONFIG_NOMSI)
76 /* Virtio ABI version, if we increment this, we break the guest driver. */
77 #define VIRTIO_PCI_ABI_VERSION 0
79 /* How many bits to shift physical queue address written to QUEUE_PFN.
80 * 12 is historical, and due to x86 page size. */
81 #define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
83 /* We can catch some guest bugs inside here so we continue supporting older
84 guests. */
85 #define VIRTIO_PCI_BUG_BUS_MASTER (1 << 0)
87 /* QEMU doesn't strictly need write barriers since everything runs in
88 * lock-step. We'll leave the calls to wmb() in though to make it obvious for
89 * KVM or if kqemu gets SMP support.
91 #define wmb() do { } while (0)
93 /* PCI bindings. */
95 typedef struct {
96 PCIDevice pci_dev;
97 VirtIODevice *vdev;
98 uint32_t bugs;
99 uint32_t addr;
100 uint32_t class_code;
101 uint32_t nvectors;
102 BlockConf block;
103 NICConf nic;
104 uint32_t host_features;
105 #ifdef CONFIG_LINUX
106 V9fsConf fsconf;
107 #endif
108 /* Max. number of ports we can have for a the virtio-serial device */
109 uint32_t max_virtserial_ports;
110 virtio_net_conf net;
111 } VirtIOPCIProxy;
113 /* virtio device */
115 static void virtio_pci_notify(void *opaque, uint16_t vector)
117 VirtIOPCIProxy *proxy = opaque;
118 if (msix_enabled(&proxy->pci_dev))
119 msix_notify(&proxy->pci_dev, vector);
120 else
121 qemu_set_irq(proxy->pci_dev.irq[0], proxy->vdev->isr & 1);
124 static void virtio_pci_save_config(void * opaque, QEMUFile *f)
126 VirtIOPCIProxy *proxy = opaque;
127 pci_device_save(&proxy->pci_dev, f);
128 msix_save(&proxy->pci_dev, f);
129 if (msix_present(&proxy->pci_dev))
130 qemu_put_be16(f, proxy->vdev->config_vector);
133 static void virtio_pci_save_queue(void * opaque, int n, QEMUFile *f)
135 VirtIOPCIProxy *proxy = opaque;
136 if (msix_present(&proxy->pci_dev))
137 qemu_put_be16(f, virtio_queue_vector(proxy->vdev, n));
140 static int virtio_pci_load_config(void * opaque, QEMUFile *f)
142 VirtIOPCIProxy *proxy = opaque;
143 int ret;
144 ret = pci_device_load(&proxy->pci_dev, f);
145 if (ret) {
146 return ret;
148 msix_load(&proxy->pci_dev, f);
149 if (msix_present(&proxy->pci_dev)) {
150 qemu_get_be16s(f, &proxy->vdev->config_vector);
151 } else {
152 proxy->vdev->config_vector = VIRTIO_NO_VECTOR;
154 if (proxy->vdev->config_vector != VIRTIO_NO_VECTOR) {
155 return msix_vector_use(&proxy->pci_dev, proxy->vdev->config_vector);
158 /* Try to find out if the guest has bus master disabled, but is
159 in ready state. Then we have a buggy guest OS. */
160 if ((proxy->vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) &&
161 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
162 proxy->bugs |= VIRTIO_PCI_BUG_BUS_MASTER;
164 return 0;
167 static int virtio_pci_load_queue(void * opaque, int n, QEMUFile *f)
169 VirtIOPCIProxy *proxy = opaque;
170 uint16_t vector;
171 if (msix_present(&proxy->pci_dev)) {
172 qemu_get_be16s(f, &vector);
173 } else {
174 vector = VIRTIO_NO_VECTOR;
176 virtio_queue_set_vector(proxy->vdev, n, vector);
177 if (vector != VIRTIO_NO_VECTOR) {
178 return msix_vector_use(&proxy->pci_dev, vector);
180 return 0;
183 static void virtio_pci_reset(DeviceState *d)
185 VirtIOPCIProxy *proxy = container_of(d, VirtIOPCIProxy, pci_dev.qdev);
186 virtio_reset(proxy->vdev);
187 msix_reset(&proxy->pci_dev);
188 proxy->bugs = 0;
191 static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
193 VirtIOPCIProxy *proxy = opaque;
194 VirtIODevice *vdev = proxy->vdev;
195 target_phys_addr_t pa;
197 switch (addr) {
198 case VIRTIO_PCI_GUEST_FEATURES:
199 /* Guest does not negotiate properly? We have to assume nothing. */
200 if (val & (1 << VIRTIO_F_BAD_FEATURE)) {
201 if (vdev->bad_features)
202 val = proxy->host_features & vdev->bad_features(vdev);
203 else
204 val = 0;
206 if (vdev->set_features)
207 vdev->set_features(vdev, val);
208 vdev->guest_features = val;
209 break;
210 case VIRTIO_PCI_QUEUE_PFN:
211 pa = (target_phys_addr_t)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
212 if (pa == 0) {
213 virtio_reset(proxy->vdev);
214 msix_unuse_all_vectors(&proxy->pci_dev);
216 else
217 virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
218 break;
219 case VIRTIO_PCI_QUEUE_SEL:
220 if (val < VIRTIO_PCI_QUEUE_MAX)
221 vdev->queue_sel = val;
222 break;
223 case VIRTIO_PCI_QUEUE_NOTIFY:
224 virtio_queue_notify(vdev, val);
225 break;
226 case VIRTIO_PCI_STATUS:
227 virtio_set_status(vdev, val & 0xFF);
228 if (vdev->status == 0) {
229 virtio_reset(proxy->vdev);
230 msix_unuse_all_vectors(&proxy->pci_dev);
233 /* Linux before 2.6.34 sets the device as OK without enabling
234 the PCI device bus master bit. In this case we need to disable
235 some safety checks. */
236 if ((val & VIRTIO_CONFIG_S_DRIVER_OK) &&
237 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
238 proxy->bugs |= VIRTIO_PCI_BUG_BUS_MASTER;
240 break;
241 case VIRTIO_MSI_CONFIG_VECTOR:
242 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
243 /* Make it possible for guest to discover an error took place. */
244 if (msix_vector_use(&proxy->pci_dev, val) < 0)
245 val = VIRTIO_NO_VECTOR;
246 vdev->config_vector = val;
247 break;
248 case VIRTIO_MSI_QUEUE_VECTOR:
249 msix_vector_unuse(&proxy->pci_dev,
250 virtio_queue_vector(vdev, vdev->queue_sel));
251 /* Make it possible for guest to discover an error took place. */
252 if (msix_vector_use(&proxy->pci_dev, val) < 0)
253 val = VIRTIO_NO_VECTOR;
254 virtio_queue_set_vector(vdev, vdev->queue_sel, val);
255 break;
256 default:
257 error_report("%s: unexpected address 0x%x value 0x%x",
258 __func__, addr, val);
259 break;
263 static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
265 VirtIODevice *vdev = proxy->vdev;
266 uint32_t ret = 0xFFFFFFFF;
268 switch (addr) {
269 case VIRTIO_PCI_HOST_FEATURES:
270 ret = proxy->host_features;
271 break;
272 case VIRTIO_PCI_GUEST_FEATURES:
273 ret = vdev->guest_features;
274 break;
275 case VIRTIO_PCI_QUEUE_PFN:
276 ret = virtio_queue_get_addr(vdev, vdev->queue_sel)
277 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
278 break;
279 case VIRTIO_PCI_QUEUE_NUM:
280 ret = virtio_queue_get_num(vdev, vdev->queue_sel);
281 break;
282 case VIRTIO_PCI_QUEUE_SEL:
283 ret = vdev->queue_sel;
284 break;
285 case VIRTIO_PCI_STATUS:
286 ret = vdev->status;
287 break;
288 case VIRTIO_PCI_ISR:
289 /* reading from the ISR also clears it. */
290 ret = vdev->isr;
291 vdev->isr = 0;
292 qemu_set_irq(proxy->pci_dev.irq[0], 0);
293 break;
294 case VIRTIO_MSI_CONFIG_VECTOR:
295 ret = vdev->config_vector;
296 break;
297 case VIRTIO_MSI_QUEUE_VECTOR:
298 ret = virtio_queue_vector(vdev, vdev->queue_sel);
299 break;
300 default:
301 break;
304 return ret;
307 static uint32_t virtio_pci_config_readb(void *opaque, uint32_t addr)
309 VirtIOPCIProxy *proxy = opaque;
310 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
311 addr -= proxy->addr;
312 if (addr < config)
313 return virtio_ioport_read(proxy, addr);
314 addr -= config;
315 return virtio_config_readb(proxy->vdev, addr);
318 static uint32_t virtio_pci_config_readw(void *opaque, uint32_t addr)
320 VirtIOPCIProxy *proxy = opaque;
321 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
322 addr -= proxy->addr;
323 if (addr < config)
324 return virtio_ioport_read(proxy, addr);
325 addr -= config;
326 return virtio_config_readw(proxy->vdev, addr);
329 static uint32_t virtio_pci_config_readl(void *opaque, uint32_t addr)
331 VirtIOPCIProxy *proxy = opaque;
332 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
333 addr -= proxy->addr;
334 if (addr < config)
335 return virtio_ioport_read(proxy, addr);
336 addr -= config;
337 return virtio_config_readl(proxy->vdev, addr);
340 static void virtio_pci_config_writeb(void *opaque, uint32_t addr, uint32_t val)
342 VirtIOPCIProxy *proxy = opaque;
343 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
344 addr -= proxy->addr;
345 if (addr < config) {
346 virtio_ioport_write(proxy, addr, val);
347 return;
349 addr -= config;
350 virtio_config_writeb(proxy->vdev, addr, val);
353 static void virtio_pci_config_writew(void *opaque, uint32_t addr, uint32_t val)
355 VirtIOPCIProxy *proxy = opaque;
356 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
357 addr -= proxy->addr;
358 if (addr < config) {
359 virtio_ioport_write(proxy, addr, val);
360 return;
362 addr -= config;
363 virtio_config_writew(proxy->vdev, addr, val);
366 static void virtio_pci_config_writel(void *opaque, uint32_t addr, uint32_t val)
368 VirtIOPCIProxy *proxy = opaque;
369 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
370 addr -= proxy->addr;
371 if (addr < config) {
372 virtio_ioport_write(proxy, addr, val);
373 return;
375 addr -= config;
376 virtio_config_writel(proxy->vdev, addr, val);
379 static void virtio_map(PCIDevice *pci_dev, int region_num,
380 pcibus_t addr, pcibus_t size, int type)
382 VirtIOPCIProxy *proxy = container_of(pci_dev, VirtIOPCIProxy, pci_dev);
383 VirtIODevice *vdev = proxy->vdev;
384 unsigned config_len = VIRTIO_PCI_REGION_SIZE(pci_dev) + vdev->config_len;
386 proxy->addr = addr;
388 register_ioport_write(addr, config_len, 1, virtio_pci_config_writeb, proxy);
389 register_ioport_write(addr, config_len, 2, virtio_pci_config_writew, proxy);
390 register_ioport_write(addr, config_len, 4, virtio_pci_config_writel, proxy);
391 register_ioport_read(addr, config_len, 1, virtio_pci_config_readb, proxy);
392 register_ioport_read(addr, config_len, 2, virtio_pci_config_readw, proxy);
393 register_ioport_read(addr, config_len, 4, virtio_pci_config_readl, proxy);
395 if (vdev->config_len)
396 vdev->get_config(vdev, vdev->config);
399 static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
400 uint32_t val, int len)
402 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
404 if (PCI_COMMAND == address) {
405 if (!(val & PCI_COMMAND_MASTER)) {
406 if (!(proxy->bugs & VIRTIO_PCI_BUG_BUS_MASTER)) {
407 virtio_set_status(proxy->vdev,
408 proxy->vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK);
413 pci_default_write_config(pci_dev, address, val, len);
414 msix_write_config(pci_dev, address, val, len);
417 static unsigned virtio_pci_get_features(void *opaque)
419 VirtIOPCIProxy *proxy = opaque;
420 return proxy->host_features;
423 static void virtio_pci_guest_notifier_read(void *opaque)
425 VirtQueue *vq = opaque;
426 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
427 if (event_notifier_test_and_clear(n)) {
428 virtio_irq(vq);
432 static int virtio_pci_mask_vq(PCIDevice *dev, unsigned vector,
433 VirtQueue *vq, int masked)
435 #ifdef CONFIG_KVM
436 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
437 int r = kvm_set_irqfd(dev->msix_irq_entries[vector].gsi,
438 event_notifier_get_fd(notifier),
439 !masked);
440 if (r < 0) {
441 return (r == -ENOSYS) ? 0 : r;
443 if (masked) {
444 qemu_set_fd_handler(event_notifier_get_fd(notifier),
445 virtio_pci_guest_notifier_read, NULL, vq);
446 } else {
447 qemu_set_fd_handler(event_notifier_get_fd(notifier),
448 NULL, NULL, NULL);
450 return 0;
451 #else
452 return -ENOSYS;
453 #endif
456 static int virtio_pci_mask_notifier(PCIDevice *dev, unsigned vector,
457 int masked)
459 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
460 VirtIODevice *vdev = proxy->vdev;
461 int r, n;
463 for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
464 if (!virtio_queue_get_num(vdev, n)) {
465 break;
467 if (virtio_queue_vector(vdev, n) != vector) {
468 continue;
470 r = virtio_pci_mask_vq(dev, vector, virtio_get_queue(vdev, n), masked);
471 if (r < 0) {
472 goto undo;
475 return 0;
476 undo:
477 while (--n >= 0) {
478 virtio_pci_mask_vq(dev, vector, virtio_get_queue(vdev, n), !masked);
480 return r;
484 static int virtio_pci_set_guest_notifier(void *opaque, int n, bool assign)
486 VirtIOPCIProxy *proxy = opaque;
487 VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
488 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
490 if (assign) {
491 int r = event_notifier_init(notifier, 0);
492 if (r < 0) {
493 return r;
495 qemu_set_fd_handler(event_notifier_get_fd(notifier),
496 virtio_pci_guest_notifier_read, NULL, vq);
497 } else {
498 qemu_set_fd_handler(event_notifier_get_fd(notifier),
499 NULL, NULL, NULL);
500 /* Test and clear notifier before closing it,
501 * in case poll callback didn't have time to run. */
502 virtio_pci_guest_notifier_read(vq);
503 event_notifier_cleanup(notifier);
506 return 0;
509 static int virtio_pci_set_guest_notifiers(void *opaque, bool assign)
511 VirtIOPCIProxy *proxy = opaque;
512 VirtIODevice *vdev = proxy->vdev;
513 int r, n;
515 /* Must unset mask notifier while guest notifier
516 * is still assigned */
517 if (!assign) {
518 r = msix_unset_mask_notifier(&proxy->pci_dev);
519 assert(r >= 0);
522 for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
523 if (!virtio_queue_get_num(vdev, n)) {
524 break;
527 r = virtio_pci_set_guest_notifier(opaque, n, assign);
528 if (r < 0) {
529 goto assign_error;
533 /* Must set mask notifier after guest notifier
534 * has been assigned */
535 if (assign) {
536 r = msix_set_mask_notifier(&proxy->pci_dev,
537 virtio_pci_mask_notifier);
538 if (r < 0) {
539 goto assign_error;
543 return 0;
545 assign_error:
546 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
547 while (--n >= 0) {
548 virtio_pci_set_guest_notifier(opaque, n, !assign);
550 return r;
553 static int virtio_pci_set_host_notifier(void *opaque, int n, bool assign)
555 VirtIOPCIProxy *proxy = opaque;
556 VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
557 EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
558 int r;
559 if (assign) {
560 r = event_notifier_init(notifier, 1);
561 if (r < 0) {
562 return r;
564 r = kvm_set_ioeventfd_pio_word(event_notifier_get_fd(notifier),
565 proxy->addr + VIRTIO_PCI_QUEUE_NOTIFY,
566 n, assign);
567 if (r < 0) {
568 event_notifier_cleanup(notifier);
570 } else {
571 r = kvm_set_ioeventfd_pio_word(event_notifier_get_fd(notifier),
572 proxy->addr + VIRTIO_PCI_QUEUE_NOTIFY,
573 n, assign);
574 if (r < 0) {
575 return r;
577 event_notifier_cleanup(notifier);
579 return r;
582 static const VirtIOBindings virtio_pci_bindings = {
583 .notify = virtio_pci_notify,
584 .save_config = virtio_pci_save_config,
585 .load_config = virtio_pci_load_config,
586 .save_queue = virtio_pci_save_queue,
587 .load_queue = virtio_pci_load_queue,
588 .get_features = virtio_pci_get_features,
589 .set_host_notifier = virtio_pci_set_host_notifier,
590 .set_guest_notifiers = virtio_pci_set_guest_notifiers,
593 static void virtio_init_pci(VirtIOPCIProxy *proxy, VirtIODevice *vdev,
594 uint16_t vendor, uint16_t device,
595 uint16_t class_code, uint8_t pif)
597 uint8_t *config;
598 uint32_t size;
600 proxy->vdev = vdev;
602 config = proxy->pci_dev.config;
603 pci_config_set_vendor_id(config, vendor);
604 pci_config_set_device_id(config, device);
606 config[0x08] = VIRTIO_PCI_ABI_VERSION;
608 config[0x09] = pif;
609 pci_config_set_class(config, class_code);
611 config[0x2c] = vendor & 0xFF;
612 config[0x2d] = (vendor >> 8) & 0xFF;
613 config[0x2e] = vdev->device_id & 0xFF;
614 config[0x2f] = (vdev->device_id >> 8) & 0xFF;
616 config[0x3d] = 1;
618 if (vdev->nvectors && !msix_init(&proxy->pci_dev, vdev->nvectors, 1, 0)) {
619 pci_register_bar(&proxy->pci_dev, 1,
620 msix_bar_size(&proxy->pci_dev),
621 PCI_BASE_ADDRESS_SPACE_MEMORY,
622 msix_mmio_map);
623 } else
624 vdev->nvectors = 0;
626 proxy->pci_dev.config_write = virtio_write_config;
628 size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev) + vdev->config_len;
629 if (size & (size-1))
630 size = 1 << qemu_fls(size);
632 pci_register_bar(&proxy->pci_dev, 0, size, PCI_BASE_ADDRESS_SPACE_IO,
633 virtio_map);
635 virtio_bind_device(vdev, &virtio_pci_bindings, proxy);
636 proxy->host_features |= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY;
637 proxy->host_features |= 0x1 << VIRTIO_F_BAD_FEATURE;
638 proxy->host_features = vdev->get_features(vdev, proxy->host_features);
641 static int virtio_blk_init_pci(PCIDevice *pci_dev)
643 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
644 VirtIODevice *vdev;
646 if (proxy->class_code != PCI_CLASS_STORAGE_SCSI &&
647 proxy->class_code != PCI_CLASS_STORAGE_OTHER)
648 proxy->class_code = PCI_CLASS_STORAGE_SCSI;
650 vdev = virtio_blk_init(&pci_dev->qdev, &proxy->block);
651 if (!vdev) {
652 return -1;
654 vdev->nvectors = proxy->nvectors;
655 virtio_init_pci(proxy, vdev,
656 PCI_VENDOR_ID_REDHAT_QUMRANET,
657 PCI_DEVICE_ID_VIRTIO_BLOCK,
658 proxy->class_code, 0x00);
659 /* make the actual value visible */
660 proxy->nvectors = vdev->nvectors;
661 return 0;
664 static int virtio_exit_pci(PCIDevice *pci_dev)
666 return msix_uninit(pci_dev);
669 static int virtio_blk_exit_pci(PCIDevice *pci_dev)
671 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
673 virtio_blk_exit(proxy->vdev);
674 blockdev_mark_auto_del(proxy->block.bs);
675 return virtio_exit_pci(pci_dev);
678 static int virtio_serial_init_pci(PCIDevice *pci_dev)
680 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
681 VirtIODevice *vdev;
683 if (proxy->class_code != PCI_CLASS_COMMUNICATION_OTHER &&
684 proxy->class_code != PCI_CLASS_DISPLAY_OTHER && /* qemu 0.10 */
685 proxy->class_code != PCI_CLASS_OTHERS) /* qemu-kvm */
686 proxy->class_code = PCI_CLASS_COMMUNICATION_OTHER;
688 vdev = virtio_serial_init(&pci_dev->qdev, proxy->max_virtserial_ports);
689 if (!vdev) {
690 return -1;
692 vdev->nvectors = proxy->nvectors == DEV_NVECTORS_UNSPECIFIED
693 ? proxy->max_virtserial_ports + 1
694 : proxy->nvectors;
695 virtio_init_pci(proxy, vdev,
696 PCI_VENDOR_ID_REDHAT_QUMRANET,
697 PCI_DEVICE_ID_VIRTIO_CONSOLE,
698 proxy->class_code, 0x00);
699 proxy->nvectors = vdev->nvectors;
700 return 0;
703 static int virtio_serial_exit_pci(PCIDevice *pci_dev)
705 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
707 virtio_serial_exit(proxy->vdev);
708 return virtio_exit_pci(pci_dev);
711 static int virtio_net_init_pci(PCIDevice *pci_dev)
713 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
714 VirtIODevice *vdev;
716 vdev = virtio_net_init(&pci_dev->qdev, &proxy->nic, &proxy->net);
718 vdev->nvectors = proxy->nvectors;
719 virtio_init_pci(proxy, vdev,
720 PCI_VENDOR_ID_REDHAT_QUMRANET,
721 PCI_DEVICE_ID_VIRTIO_NET,
722 PCI_CLASS_NETWORK_ETHERNET,
723 0x00);
725 /* make the actual value visible */
726 proxy->nvectors = vdev->nvectors;
727 return 0;
730 static int virtio_net_exit_pci(PCIDevice *pci_dev)
732 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
734 virtio_net_exit(proxy->vdev);
735 return virtio_exit_pci(pci_dev);
738 static int virtio_balloon_init_pci(PCIDevice *pci_dev)
740 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
741 VirtIODevice *vdev;
743 vdev = virtio_balloon_init(&pci_dev->qdev);
744 virtio_init_pci(proxy, vdev,
745 PCI_VENDOR_ID_REDHAT_QUMRANET,
746 PCI_DEVICE_ID_VIRTIO_BALLOON,
747 PCI_CLASS_MEMORY_RAM,
748 0x00);
749 return 0;
752 #ifdef CONFIG_VIRTFS
753 static int virtio_9p_init_pci(PCIDevice *pci_dev)
755 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
756 VirtIODevice *vdev;
758 vdev = virtio_9p_init(&pci_dev->qdev, &proxy->fsconf);
759 vdev->nvectors = proxy->nvectors;
760 virtio_init_pci(proxy, vdev,
761 PCI_VENDOR_ID_REDHAT_QUMRANET,
762 0x1009,
763 0x2,
764 0x00);
765 /* make the actual value visible */
766 proxy->nvectors = vdev->nvectors;
767 return 0;
769 #endif
771 static PCIDeviceInfo virtio_info[] = {
773 .qdev.name = "virtio-blk-pci",
774 .qdev.size = sizeof(VirtIOPCIProxy),
775 .init = virtio_blk_init_pci,
776 .exit = virtio_blk_exit_pci,
777 .qdev.props = (Property[]) {
778 DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
779 DEFINE_BLOCK_PROPERTIES(VirtIOPCIProxy, block),
780 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
781 DEFINE_VIRTIO_BLK_FEATURES(VirtIOPCIProxy, host_features),
782 DEFINE_PROP_END_OF_LIST(),
784 .qdev.reset = virtio_pci_reset,
786 .qdev.name = "virtio-net-pci",
787 .qdev.size = sizeof(VirtIOPCIProxy),
788 .init = virtio_net_init_pci,
789 .exit = virtio_net_exit_pci,
790 .romfile = "pxe-virtio.bin",
791 .qdev.props = (Property[]) {
792 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
793 DEFINE_VIRTIO_NET_FEATURES(VirtIOPCIProxy, host_features),
794 DEFINE_NIC_PROPERTIES(VirtIOPCIProxy, nic),
795 DEFINE_PROP_UINT32("x-txtimer", VirtIOPCIProxy,
796 net.txtimer, TX_TIMER_INTERVAL),
797 DEFINE_PROP_INT32("x-txburst", VirtIOPCIProxy,
798 net.txburst, TX_BURST),
799 DEFINE_PROP_STRING("tx", VirtIOPCIProxy, net.tx),
800 DEFINE_PROP_END_OF_LIST(),
802 .qdev.reset = virtio_pci_reset,
804 .qdev.name = "virtio-serial-pci",
805 .qdev.alias = "virtio-serial",
806 .qdev.size = sizeof(VirtIOPCIProxy),
807 .init = virtio_serial_init_pci,
808 .exit = virtio_serial_exit_pci,
809 .qdev.props = (Property[]) {
810 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
811 DEV_NVECTORS_UNSPECIFIED),
812 DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
813 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
814 DEFINE_PROP_UINT32("max_ports", VirtIOPCIProxy, max_virtserial_ports,
815 31),
816 DEFINE_PROP_END_OF_LIST(),
818 .qdev.reset = virtio_pci_reset,
820 .qdev.name = "virtio-balloon-pci",
821 .qdev.size = sizeof(VirtIOPCIProxy),
822 .init = virtio_balloon_init_pci,
823 .exit = virtio_exit_pci,
824 .qdev.props = (Property[]) {
825 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
826 DEFINE_PROP_END_OF_LIST(),
828 .qdev.reset = virtio_pci_reset,
830 #ifdef CONFIG_VIRTFS
831 .qdev.name = "virtio-9p-pci",
832 .qdev.size = sizeof(VirtIOPCIProxy),
833 .init = virtio_9p_init_pci,
834 .qdev.props = (Property[]) {
835 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
836 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
837 DEFINE_PROP_STRING("mount_tag", VirtIOPCIProxy, fsconf.tag),
838 DEFINE_PROP_STRING("fsdev", VirtIOPCIProxy, fsconf.fsdev_id),
839 DEFINE_PROP_END_OF_LIST(),
841 }, {
842 #endif
843 /* end of list */
847 static void virtio_pci_register_devices(void)
849 pci_qdev_register_many(virtio_info);
852 device_init(virtio_pci_register_devices)