vmmouse: adapt to mouse handler changes.
[qemu.git] / hw / virtio-pci.c
blob729917d89169948250ef42e5366e2e5b2ff13816
1 /*
2 * Virtio PCI Bindings
4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2009 CodeSourcery
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paul Brook <paul@codesourcery.com>
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
16 #include <inttypes.h>
18 #include "virtio.h"
19 #include "virtio-blk.h"
20 #include "virtio-net.h"
21 #include "pci.h"
22 #include "qemu-error.h"
23 #include "msix.h"
24 #include "net.h"
25 #include "loader.h"
26 #include "kvm.h"
27 #include "blockdev.h"
29 /* from Linux's linux/virtio_pci.h */
31 /* A 32-bit r/o bitmask of the features supported by the host */
32 #define VIRTIO_PCI_HOST_FEATURES 0
34 /* A 32-bit r/w bitmask of features activated by the guest */
35 #define VIRTIO_PCI_GUEST_FEATURES 4
37 /* A 32-bit r/w PFN for the currently selected queue */
38 #define VIRTIO_PCI_QUEUE_PFN 8
40 /* A 16-bit r/o queue size for the currently selected queue */
41 #define VIRTIO_PCI_QUEUE_NUM 12
43 /* A 16-bit r/w queue selector */
44 #define VIRTIO_PCI_QUEUE_SEL 14
46 /* A 16-bit r/w queue notifier */
47 #define VIRTIO_PCI_QUEUE_NOTIFY 16
49 /* An 8-bit device status register. */
50 #define VIRTIO_PCI_STATUS 18
52 /* An 8-bit r/o interrupt status register. Reading the value will return the
53 * current contents of the ISR and will also clear it. This is effectively
54 * a read-and-acknowledge. */
55 #define VIRTIO_PCI_ISR 19
57 /* MSI-X registers: only enabled if MSI-X is enabled. */
58 /* A 16-bit vector for configuration changes. */
59 #define VIRTIO_MSI_CONFIG_VECTOR 20
60 /* A 16-bit vector for selected queue notifications. */
61 #define VIRTIO_MSI_QUEUE_VECTOR 22
63 /* Config space size */
64 #define VIRTIO_PCI_CONFIG_NOMSI 20
65 #define VIRTIO_PCI_CONFIG_MSI 24
66 #define VIRTIO_PCI_REGION_SIZE(dev) (msix_present(dev) ? \
67 VIRTIO_PCI_CONFIG_MSI : \
68 VIRTIO_PCI_CONFIG_NOMSI)
70 /* The remaining space is defined by each driver as the per-driver
71 * configuration space */
72 #define VIRTIO_PCI_CONFIG(dev) (msix_enabled(dev) ? \
73 VIRTIO_PCI_CONFIG_MSI : \
74 VIRTIO_PCI_CONFIG_NOMSI)
76 /* Virtio ABI version, if we increment this, we break the guest driver. */
77 #define VIRTIO_PCI_ABI_VERSION 0
79 /* How many bits to shift physical queue address written to QUEUE_PFN.
80 * 12 is historical, and due to x86 page size. */
81 #define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
83 /* We can catch some guest bugs inside here so we continue supporting older
84 guests. */
85 #define VIRTIO_PCI_BUG_BUS_MASTER (1 << 0)
87 /* QEMU doesn't strictly need write barriers since everything runs in
88 * lock-step. We'll leave the calls to wmb() in though to make it obvious for
89 * KVM or if kqemu gets SMP support.
91 #define wmb() do { } while (0)
93 /* PCI bindings. */
95 typedef struct {
96 PCIDevice pci_dev;
97 VirtIODevice *vdev;
98 uint32_t bugs;
99 uint32_t addr;
100 uint32_t class_code;
101 uint32_t nvectors;
102 BlockConf block;
103 NICConf nic;
104 uint32_t host_features;
105 #ifdef CONFIG_LINUX
106 V9fsConf fsconf;
107 #endif
108 /* Max. number of ports we can have for a the virtio-serial device */
109 uint32_t max_virtserial_ports;
110 virtio_net_conf net;
111 } VirtIOPCIProxy;
113 /* virtio device */
115 static void virtio_pci_notify(void *opaque, uint16_t vector)
117 VirtIOPCIProxy *proxy = opaque;
118 if (msix_enabled(&proxy->pci_dev))
119 msix_notify(&proxy->pci_dev, vector);
120 else
121 qemu_set_irq(proxy->pci_dev.irq[0], proxy->vdev->isr & 1);
124 static void virtio_pci_save_config(void * opaque, QEMUFile *f)
126 VirtIOPCIProxy *proxy = opaque;
127 pci_device_save(&proxy->pci_dev, f);
128 msix_save(&proxy->pci_dev, f);
129 if (msix_present(&proxy->pci_dev))
130 qemu_put_be16(f, proxy->vdev->config_vector);
133 static void virtio_pci_save_queue(void * opaque, int n, QEMUFile *f)
135 VirtIOPCIProxy *proxy = opaque;
136 if (msix_present(&proxy->pci_dev))
137 qemu_put_be16(f, virtio_queue_vector(proxy->vdev, n));
140 static int virtio_pci_load_config(void * opaque, QEMUFile *f)
142 VirtIOPCIProxy *proxy = opaque;
143 int ret;
144 ret = pci_device_load(&proxy->pci_dev, f);
145 if (ret) {
146 return ret;
148 msix_load(&proxy->pci_dev, f);
149 if (msix_present(&proxy->pci_dev)) {
150 qemu_get_be16s(f, &proxy->vdev->config_vector);
151 } else {
152 proxy->vdev->config_vector = VIRTIO_NO_VECTOR;
154 if (proxy->vdev->config_vector != VIRTIO_NO_VECTOR) {
155 return msix_vector_use(&proxy->pci_dev, proxy->vdev->config_vector);
158 /* Try to find out if the guest has bus master disabled, but is
159 in ready state. Then we have a buggy guest OS. */
160 if ((proxy->vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) &&
161 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
162 proxy->bugs |= VIRTIO_PCI_BUG_BUS_MASTER;
164 return 0;
167 static int virtio_pci_load_queue(void * opaque, int n, QEMUFile *f)
169 VirtIOPCIProxy *proxy = opaque;
170 uint16_t vector;
171 if (msix_present(&proxy->pci_dev)) {
172 qemu_get_be16s(f, &vector);
173 } else {
174 vector = VIRTIO_NO_VECTOR;
176 virtio_queue_set_vector(proxy->vdev, n, vector);
177 if (vector != VIRTIO_NO_VECTOR) {
178 return msix_vector_use(&proxy->pci_dev, vector);
180 return 0;
183 static void virtio_pci_reset(DeviceState *d)
185 VirtIOPCIProxy *proxy = container_of(d, VirtIOPCIProxy, pci_dev.qdev);
186 virtio_reset(proxy->vdev);
187 msix_reset(&proxy->pci_dev);
188 proxy->bugs = 0;
191 static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
193 VirtIOPCIProxy *proxy = opaque;
194 VirtIODevice *vdev = proxy->vdev;
195 target_phys_addr_t pa;
197 switch (addr) {
198 case VIRTIO_PCI_GUEST_FEATURES:
199 /* Guest does not negotiate properly? We have to assume nothing. */
200 if (val & (1 << VIRTIO_F_BAD_FEATURE)) {
201 if (vdev->bad_features)
202 val = proxy->host_features & vdev->bad_features(vdev);
203 else
204 val = 0;
206 if (vdev->set_features)
207 vdev->set_features(vdev, val);
208 vdev->guest_features = val;
209 break;
210 case VIRTIO_PCI_QUEUE_PFN:
211 pa = (target_phys_addr_t)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
212 if (pa == 0) {
213 virtio_reset(proxy->vdev);
214 msix_unuse_all_vectors(&proxy->pci_dev);
216 else
217 virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
218 break;
219 case VIRTIO_PCI_QUEUE_SEL:
220 if (val < VIRTIO_PCI_QUEUE_MAX)
221 vdev->queue_sel = val;
222 break;
223 case VIRTIO_PCI_QUEUE_NOTIFY:
224 virtio_queue_notify(vdev, val);
225 break;
226 case VIRTIO_PCI_STATUS:
227 virtio_set_status(vdev, val & 0xFF);
228 if (vdev->status == 0) {
229 virtio_reset(proxy->vdev);
230 msix_unuse_all_vectors(&proxy->pci_dev);
233 /* Linux before 2.6.34 sets the device as OK without enabling
234 the PCI device bus master bit. In this case we need to disable
235 some safety checks. */
236 if ((val & VIRTIO_CONFIG_S_DRIVER_OK) &&
237 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
238 proxy->bugs |= VIRTIO_PCI_BUG_BUS_MASTER;
240 break;
241 case VIRTIO_MSI_CONFIG_VECTOR:
242 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
243 /* Make it possible for guest to discover an error took place. */
244 if (msix_vector_use(&proxy->pci_dev, val) < 0)
245 val = VIRTIO_NO_VECTOR;
246 vdev->config_vector = val;
247 break;
248 case VIRTIO_MSI_QUEUE_VECTOR:
249 msix_vector_unuse(&proxy->pci_dev,
250 virtio_queue_vector(vdev, vdev->queue_sel));
251 /* Make it possible for guest to discover an error took place. */
252 if (msix_vector_use(&proxy->pci_dev, val) < 0)
253 val = VIRTIO_NO_VECTOR;
254 virtio_queue_set_vector(vdev, vdev->queue_sel, val);
255 break;
256 default:
257 fprintf(stderr, "%s: unexpected address 0x%x value 0x%x\n",
258 __func__, addr, val);
259 break;
263 static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
265 VirtIODevice *vdev = proxy->vdev;
266 uint32_t ret = 0xFFFFFFFF;
268 switch (addr) {
269 case VIRTIO_PCI_HOST_FEATURES:
270 ret = proxy->host_features;
271 break;
272 case VIRTIO_PCI_GUEST_FEATURES:
273 ret = vdev->guest_features;
274 break;
275 case VIRTIO_PCI_QUEUE_PFN:
276 ret = virtio_queue_get_addr(vdev, vdev->queue_sel)
277 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
278 break;
279 case VIRTIO_PCI_QUEUE_NUM:
280 ret = virtio_queue_get_num(vdev, vdev->queue_sel);
281 break;
282 case VIRTIO_PCI_QUEUE_SEL:
283 ret = vdev->queue_sel;
284 break;
285 case VIRTIO_PCI_STATUS:
286 ret = vdev->status;
287 break;
288 case VIRTIO_PCI_ISR:
289 /* reading from the ISR also clears it. */
290 ret = vdev->isr;
291 vdev->isr = 0;
292 qemu_set_irq(proxy->pci_dev.irq[0], 0);
293 break;
294 case VIRTIO_MSI_CONFIG_VECTOR:
295 ret = vdev->config_vector;
296 break;
297 case VIRTIO_MSI_QUEUE_VECTOR:
298 ret = virtio_queue_vector(vdev, vdev->queue_sel);
299 break;
300 default:
301 break;
304 return ret;
307 static uint32_t virtio_pci_config_readb(void *opaque, uint32_t addr)
309 VirtIOPCIProxy *proxy = opaque;
310 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
311 addr -= proxy->addr;
312 if (addr < config)
313 return virtio_ioport_read(proxy, addr);
314 addr -= config;
315 return virtio_config_readb(proxy->vdev, addr);
318 static uint32_t virtio_pci_config_readw(void *opaque, uint32_t addr)
320 VirtIOPCIProxy *proxy = opaque;
321 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
322 addr -= proxy->addr;
323 if (addr < config)
324 return virtio_ioport_read(proxy, addr);
325 addr -= config;
326 return virtio_config_readw(proxy->vdev, addr);
329 static uint32_t virtio_pci_config_readl(void *opaque, uint32_t addr)
331 VirtIOPCIProxy *proxy = opaque;
332 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
333 addr -= proxy->addr;
334 if (addr < config)
335 return virtio_ioport_read(proxy, addr);
336 addr -= config;
337 return virtio_config_readl(proxy->vdev, addr);
340 static void virtio_pci_config_writeb(void *opaque, uint32_t addr, uint32_t val)
342 VirtIOPCIProxy *proxy = opaque;
343 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
344 addr -= proxy->addr;
345 if (addr < config) {
346 virtio_ioport_write(proxy, addr, val);
347 return;
349 addr -= config;
350 virtio_config_writeb(proxy->vdev, addr, val);
353 static void virtio_pci_config_writew(void *opaque, uint32_t addr, uint32_t val)
355 VirtIOPCIProxy *proxy = opaque;
356 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
357 addr -= proxy->addr;
358 if (addr < config) {
359 virtio_ioport_write(proxy, addr, val);
360 return;
362 addr -= config;
363 virtio_config_writew(proxy->vdev, addr, val);
366 static void virtio_pci_config_writel(void *opaque, uint32_t addr, uint32_t val)
368 VirtIOPCIProxy *proxy = opaque;
369 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
370 addr -= proxy->addr;
371 if (addr < config) {
372 virtio_ioport_write(proxy, addr, val);
373 return;
375 addr -= config;
376 virtio_config_writel(proxy->vdev, addr, val);
379 static void virtio_map(PCIDevice *pci_dev, int region_num,
380 pcibus_t addr, pcibus_t size, int type)
382 VirtIOPCIProxy *proxy = container_of(pci_dev, VirtIOPCIProxy, pci_dev);
383 VirtIODevice *vdev = proxy->vdev;
384 unsigned config_len = VIRTIO_PCI_REGION_SIZE(pci_dev) + vdev->config_len;
386 proxy->addr = addr;
388 register_ioport_write(addr, config_len, 1, virtio_pci_config_writeb, proxy);
389 register_ioport_write(addr, config_len, 2, virtio_pci_config_writew, proxy);
390 register_ioport_write(addr, config_len, 4, virtio_pci_config_writel, proxy);
391 register_ioport_read(addr, config_len, 1, virtio_pci_config_readb, proxy);
392 register_ioport_read(addr, config_len, 2, virtio_pci_config_readw, proxy);
393 register_ioport_read(addr, config_len, 4, virtio_pci_config_readl, proxy);
395 if (vdev->config_len)
396 vdev->get_config(vdev, vdev->config);
399 static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
400 uint32_t val, int len)
402 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
404 if (PCI_COMMAND == address) {
405 if (!(val & PCI_COMMAND_MASTER)) {
406 if (!(proxy->bugs & VIRTIO_PCI_BUG_BUS_MASTER)) {
407 virtio_set_status(proxy->vdev,
408 proxy->vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK);
413 pci_default_write_config(pci_dev, address, val, len);
414 msix_write_config(pci_dev, address, val, len);
417 static unsigned virtio_pci_get_features(void *opaque)
419 VirtIOPCIProxy *proxy = opaque;
420 return proxy->host_features;
423 static void virtio_pci_guest_notifier_read(void *opaque)
425 VirtQueue *vq = opaque;
426 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
427 if (event_notifier_test_and_clear(n)) {
428 virtio_irq(vq);
432 static int virtio_pci_set_guest_notifier(void *opaque, int n, bool assign)
434 VirtIOPCIProxy *proxy = opaque;
435 VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
436 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
438 if (assign) {
439 int r = event_notifier_init(notifier, 0);
440 if (r < 0) {
441 return r;
443 qemu_set_fd_handler(event_notifier_get_fd(notifier),
444 virtio_pci_guest_notifier_read, NULL, vq);
445 } else {
446 qemu_set_fd_handler(event_notifier_get_fd(notifier),
447 NULL, NULL, NULL);
448 event_notifier_cleanup(notifier);
451 return 0;
454 static int virtio_pci_set_guest_notifiers(void *opaque, bool assign)
456 VirtIOPCIProxy *proxy = opaque;
457 VirtIODevice *vdev = proxy->vdev;
458 int r, n;
460 for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
461 if (!virtio_queue_get_num(vdev, n)) {
462 break;
465 r = virtio_pci_set_guest_notifier(opaque, n, assign);
466 if (r < 0) {
467 goto assign_error;
471 return 0;
473 assign_error:
474 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
475 while (--n >= 0) {
476 virtio_pci_set_guest_notifier(opaque, n, !assign);
478 return r;
481 static int virtio_pci_set_host_notifier(void *opaque, int n, bool assign)
483 VirtIOPCIProxy *proxy = opaque;
484 VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
485 EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
486 int r;
487 if (assign) {
488 r = event_notifier_init(notifier, 1);
489 if (r < 0) {
490 return r;
492 r = kvm_set_ioeventfd_pio_word(event_notifier_get_fd(notifier),
493 proxy->addr + VIRTIO_PCI_QUEUE_NOTIFY,
494 n, assign);
495 if (r < 0) {
496 event_notifier_cleanup(notifier);
498 } else {
499 r = kvm_set_ioeventfd_pio_word(event_notifier_get_fd(notifier),
500 proxy->addr + VIRTIO_PCI_QUEUE_NOTIFY,
501 n, assign);
502 if (r < 0) {
503 return r;
505 event_notifier_cleanup(notifier);
507 return r;
510 static const VirtIOBindings virtio_pci_bindings = {
511 .notify = virtio_pci_notify,
512 .save_config = virtio_pci_save_config,
513 .load_config = virtio_pci_load_config,
514 .save_queue = virtio_pci_save_queue,
515 .load_queue = virtio_pci_load_queue,
516 .get_features = virtio_pci_get_features,
517 .set_host_notifier = virtio_pci_set_host_notifier,
518 .set_guest_notifiers = virtio_pci_set_guest_notifiers,
521 static void virtio_init_pci(VirtIOPCIProxy *proxy, VirtIODevice *vdev,
522 uint16_t vendor, uint16_t device,
523 uint16_t class_code, uint8_t pif)
525 uint8_t *config;
526 uint32_t size;
528 proxy->vdev = vdev;
530 config = proxy->pci_dev.config;
531 pci_config_set_vendor_id(config, vendor);
532 pci_config_set_device_id(config, device);
534 config[0x08] = VIRTIO_PCI_ABI_VERSION;
536 config[0x09] = pif;
537 pci_config_set_class(config, class_code);
539 config[0x2c] = vendor & 0xFF;
540 config[0x2d] = (vendor >> 8) & 0xFF;
541 config[0x2e] = vdev->device_id & 0xFF;
542 config[0x2f] = (vdev->device_id >> 8) & 0xFF;
544 config[0x3d] = 1;
546 if (vdev->nvectors && !msix_init(&proxy->pci_dev, vdev->nvectors, 1, 0)) {
547 pci_register_bar(&proxy->pci_dev, 1,
548 msix_bar_size(&proxy->pci_dev),
549 PCI_BASE_ADDRESS_SPACE_MEMORY,
550 msix_mmio_map);
551 } else
552 vdev->nvectors = 0;
554 proxy->pci_dev.config_write = virtio_write_config;
556 size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev) + vdev->config_len;
557 if (size & (size-1))
558 size = 1 << qemu_fls(size);
560 pci_register_bar(&proxy->pci_dev, 0, size, PCI_BASE_ADDRESS_SPACE_IO,
561 virtio_map);
563 virtio_bind_device(vdev, &virtio_pci_bindings, proxy);
564 proxy->host_features |= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY;
565 proxy->host_features |= 0x1 << VIRTIO_F_BAD_FEATURE;
566 proxy->host_features = vdev->get_features(vdev, proxy->host_features);
569 static int virtio_blk_init_pci(PCIDevice *pci_dev)
571 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
572 VirtIODevice *vdev;
574 if (proxy->class_code != PCI_CLASS_STORAGE_SCSI &&
575 proxy->class_code != PCI_CLASS_STORAGE_OTHER)
576 proxy->class_code = PCI_CLASS_STORAGE_SCSI;
578 vdev = virtio_blk_init(&pci_dev->qdev, &proxy->block);
579 if (!vdev) {
580 return -1;
582 vdev->nvectors = proxy->nvectors;
583 virtio_init_pci(proxy, vdev,
584 PCI_VENDOR_ID_REDHAT_QUMRANET,
585 PCI_DEVICE_ID_VIRTIO_BLOCK,
586 proxy->class_code, 0x00);
587 /* make the actual value visible */
588 proxy->nvectors = vdev->nvectors;
589 return 0;
592 static int virtio_exit_pci(PCIDevice *pci_dev)
594 return msix_uninit(pci_dev);
597 static int virtio_blk_exit_pci(PCIDevice *pci_dev)
599 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
601 virtio_blk_exit(proxy->vdev);
602 blockdev_mark_auto_del(proxy->block.bs);
603 return virtio_exit_pci(pci_dev);
606 static int virtio_serial_init_pci(PCIDevice *pci_dev)
608 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
609 VirtIODevice *vdev;
611 if (proxy->class_code != PCI_CLASS_COMMUNICATION_OTHER &&
612 proxy->class_code != PCI_CLASS_DISPLAY_OTHER && /* qemu 0.10 */
613 proxy->class_code != PCI_CLASS_OTHERS) /* qemu-kvm */
614 proxy->class_code = PCI_CLASS_COMMUNICATION_OTHER;
616 vdev = virtio_serial_init(&pci_dev->qdev, proxy->max_virtserial_ports);
617 if (!vdev) {
618 return -1;
620 vdev->nvectors = proxy->nvectors == DEV_NVECTORS_UNSPECIFIED
621 ? proxy->max_virtserial_ports + 1
622 : proxy->nvectors;
623 virtio_init_pci(proxy, vdev,
624 PCI_VENDOR_ID_REDHAT_QUMRANET,
625 PCI_DEVICE_ID_VIRTIO_CONSOLE,
626 proxy->class_code, 0x00);
627 proxy->nvectors = vdev->nvectors;
628 return 0;
631 static int virtio_serial_exit_pci(PCIDevice *pci_dev)
633 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
635 virtio_serial_exit(proxy->vdev);
636 return virtio_exit_pci(pci_dev);
639 static int virtio_net_init_pci(PCIDevice *pci_dev)
641 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
642 VirtIODevice *vdev;
644 vdev = virtio_net_init(&pci_dev->qdev, &proxy->nic, &proxy->net);
646 vdev->nvectors = proxy->nvectors;
647 virtio_init_pci(proxy, vdev,
648 PCI_VENDOR_ID_REDHAT_QUMRANET,
649 PCI_DEVICE_ID_VIRTIO_NET,
650 PCI_CLASS_NETWORK_ETHERNET,
651 0x00);
653 /* make the actual value visible */
654 proxy->nvectors = vdev->nvectors;
655 return 0;
658 static int virtio_net_exit_pci(PCIDevice *pci_dev)
660 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
662 virtio_net_exit(proxy->vdev);
663 return virtio_exit_pci(pci_dev);
666 static int virtio_balloon_init_pci(PCIDevice *pci_dev)
668 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
669 VirtIODevice *vdev;
671 vdev = virtio_balloon_init(&pci_dev->qdev);
672 virtio_init_pci(proxy, vdev,
673 PCI_VENDOR_ID_REDHAT_QUMRANET,
674 PCI_DEVICE_ID_VIRTIO_BALLOON,
675 PCI_CLASS_MEMORY_RAM,
676 0x00);
677 return 0;
680 #ifdef CONFIG_VIRTFS
681 static int virtio_9p_init_pci(PCIDevice *pci_dev)
683 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
684 VirtIODevice *vdev;
686 vdev = virtio_9p_init(&pci_dev->qdev, &proxy->fsconf);
687 virtio_init_pci(proxy, vdev,
688 PCI_VENDOR_ID_REDHAT_QUMRANET,
689 0x1009,
690 0x2,
691 0x00);
693 return 0;
695 #endif
697 static PCIDeviceInfo virtio_info[] = {
699 .qdev.name = "virtio-blk-pci",
700 .qdev.size = sizeof(VirtIOPCIProxy),
701 .init = virtio_blk_init_pci,
702 .exit = virtio_blk_exit_pci,
703 .qdev.props = (Property[]) {
704 DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
705 DEFINE_BLOCK_PROPERTIES(VirtIOPCIProxy, block),
706 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
707 DEFINE_VIRTIO_BLK_FEATURES(VirtIOPCIProxy, host_features),
708 DEFINE_PROP_END_OF_LIST(),
710 .qdev.reset = virtio_pci_reset,
712 .qdev.name = "virtio-net-pci",
713 .qdev.size = sizeof(VirtIOPCIProxy),
714 .init = virtio_net_init_pci,
715 .exit = virtio_net_exit_pci,
716 .romfile = "pxe-virtio.bin",
717 .qdev.props = (Property[]) {
718 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
719 DEFINE_VIRTIO_NET_FEATURES(VirtIOPCIProxy, host_features),
720 DEFINE_NIC_PROPERTIES(VirtIOPCIProxy, nic),
721 DEFINE_PROP_UINT32("x-txtimer", VirtIOPCIProxy,
722 net.txtimer, TX_TIMER_INTERVAL),
723 DEFINE_PROP_INT32("x-txburst", VirtIOPCIProxy,
724 net.txburst, TX_BURST),
725 DEFINE_PROP_STRING("tx", VirtIOPCIProxy, net.tx),
726 DEFINE_PROP_END_OF_LIST(),
728 .qdev.reset = virtio_pci_reset,
730 .qdev.name = "virtio-serial-pci",
731 .qdev.alias = "virtio-serial",
732 .qdev.size = sizeof(VirtIOPCIProxy),
733 .init = virtio_serial_init_pci,
734 .exit = virtio_serial_exit_pci,
735 .qdev.props = (Property[]) {
736 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
737 DEV_NVECTORS_UNSPECIFIED),
738 DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
739 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
740 DEFINE_PROP_UINT32("max_ports", VirtIOPCIProxy, max_virtserial_ports,
741 31),
742 DEFINE_PROP_END_OF_LIST(),
744 .qdev.reset = virtio_pci_reset,
746 .qdev.name = "virtio-balloon-pci",
747 .qdev.size = sizeof(VirtIOPCIProxy),
748 .init = virtio_balloon_init_pci,
749 .exit = virtio_exit_pci,
750 .qdev.props = (Property[]) {
751 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
752 DEFINE_PROP_END_OF_LIST(),
754 .qdev.reset = virtio_pci_reset,
756 #ifdef CONFIG_VIRTFS
757 .qdev.name = "virtio-9p-pci",
758 .qdev.size = sizeof(VirtIOPCIProxy),
759 .init = virtio_9p_init_pci,
760 .qdev.props = (Property[]) {
761 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
762 DEFINE_PROP_STRING("mount_tag", VirtIOPCIProxy, fsconf.tag),
763 DEFINE_PROP_STRING("fsdev", VirtIOPCIProxy, fsconf.fsdev_id),
764 DEFINE_PROP_END_OF_LIST(),
766 }, {
767 #endif
768 /* end of list */
772 static void virtio_pci_register_devices(void)
774 pci_qdev_register_many(virtio_info);
777 device_init(virtio_pci_register_devices)