Immediate versions of ro[lr]
[qemu/mini2440.git] / hw / virtio.c
blob8a72d8d216c0cc7b63829cd5323488ce2476ba96
1 /*
2 * Virtio Support
4 * Copyright IBM, Corp. 2007
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include <inttypes.h>
16 #include "virtio.h"
17 #include "sysemu.h"
19 /* from Linux's linux/virtio_pci.h */
21 /* A 32-bit r/o bitmask of the features supported by the host */
22 #define VIRTIO_PCI_HOST_FEATURES 0
24 /* A 32-bit r/w bitmask of features activated by the guest */
25 #define VIRTIO_PCI_GUEST_FEATURES 4
27 /* A 32-bit r/w PFN for the currently selected queue */
28 #define VIRTIO_PCI_QUEUE_PFN 8
30 /* A 16-bit r/o queue size for the currently selected queue */
31 #define VIRTIO_PCI_QUEUE_NUM 12
33 /* A 16-bit r/w queue selector */
34 #define VIRTIO_PCI_QUEUE_SEL 14
36 /* A 16-bit r/w queue notifier */
37 #define VIRTIO_PCI_QUEUE_NOTIFY 16
39 /* An 8-bit device status register. */
40 #define VIRTIO_PCI_STATUS 18
42 /* An 8-bit r/o interrupt status register. Reading the value will return the
43 * current contents of the ISR and will also clear it. This is effectively
44 * a read-and-acknowledge. */
45 #define VIRTIO_PCI_ISR 19
47 #define VIRTIO_PCI_CONFIG 20
49 /* Virtio ABI version, if we increment this, we break the guest driver. */
50 #define VIRTIO_PCI_ABI_VERSION 0
52 /* How many bits to shift physical queue address written to QUEUE_PFN.
53 * 12 is historical, and due to x86 page size. */
54 #define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
56 /* The alignment to use between consumer and producer parts of vring.
57 * x86 pagesize again. */
58 #define VIRTIO_PCI_VRING_ALIGN 4096
60 /* QEMU doesn't strictly need write barriers since everything runs in
61 * lock-step. We'll leave the calls to wmb() in though to make it obvious for
62 * KVM or if kqemu gets SMP support.
64 #define wmb() do { } while (0)
66 typedef struct VRingDesc
68 uint64_t addr;
69 uint32_t len;
70 uint16_t flags;
71 uint16_t next;
72 } VRingDesc;
74 typedef struct VRingAvail
76 uint16_t flags;
77 uint16_t idx;
78 uint16_t ring[0];
79 } VRingAvail;
81 typedef struct VRingUsedElem
83 uint32_t id;
84 uint32_t len;
85 } VRingUsedElem;
87 typedef struct VRingUsed
89 uint16_t flags;
90 uint16_t idx;
91 VRingUsedElem ring[0];
92 } VRingUsed;
94 typedef struct VRing
96 unsigned int num;
97 target_phys_addr_t desc;
98 target_phys_addr_t avail;
99 target_phys_addr_t used;
100 } VRing;
102 struct VirtQueue
104 VRing vring;
105 uint32_t pfn;
106 uint16_t last_avail_idx;
107 int inuse;
108 void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq);
111 #define VIRTIO_PCI_QUEUE_MAX 16
113 /* virt queue functions */
114 static void virtqueue_init(VirtQueue *vq, target_phys_addr_t pa)
116 vq->vring.desc = pa;
117 vq->vring.avail = pa + vq->vring.num * sizeof(VRingDesc);
118 vq->vring.used = vring_align(vq->vring.avail +
119 offsetof(VRingAvail, ring[vq->vring.num]),
120 VIRTIO_PCI_VRING_ALIGN);
123 static inline uint64_t vring_desc_addr(VirtQueue *vq, int i)
125 target_phys_addr_t pa;
126 pa = vq->vring.desc + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr);
127 return ldq_phys(pa);
130 static inline uint32_t vring_desc_len(VirtQueue *vq, int i)
132 target_phys_addr_t pa;
133 pa = vq->vring.desc + sizeof(VRingDesc) * i + offsetof(VRingDesc, len);
134 return ldl_phys(pa);
137 static inline uint16_t vring_desc_flags(VirtQueue *vq, int i)
139 target_phys_addr_t pa;
140 pa = vq->vring.desc + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags);
141 return lduw_phys(pa);
144 static inline uint16_t vring_desc_next(VirtQueue *vq, int i)
146 target_phys_addr_t pa;
147 pa = vq->vring.desc + sizeof(VRingDesc) * i + offsetof(VRingDesc, next);
148 return lduw_phys(pa);
151 static inline uint16_t vring_avail_flags(VirtQueue *vq)
153 target_phys_addr_t pa;
154 pa = vq->vring.avail + offsetof(VRingAvail, flags);
155 return lduw_phys(pa);
158 static inline uint16_t vring_avail_idx(VirtQueue *vq)
160 target_phys_addr_t pa;
161 pa = vq->vring.avail + offsetof(VRingAvail, idx);
162 return lduw_phys(pa);
165 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
167 target_phys_addr_t pa;
168 pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
169 return lduw_phys(pa);
172 static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val)
174 target_phys_addr_t pa;
175 pa = vq->vring.used + offsetof(VRingUsed, ring[i].id);
176 stl_phys(pa, val);
179 static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val)
181 target_phys_addr_t pa;
182 pa = vq->vring.used + offsetof(VRingUsed, ring[i].len);
183 stl_phys(pa, val);
186 static uint16_t vring_used_idx(VirtQueue *vq)
188 target_phys_addr_t pa;
189 pa = vq->vring.used + offsetof(VRingUsed, idx);
190 return lduw_phys(pa);
193 static inline void vring_used_idx_increment(VirtQueue *vq, uint16_t val)
195 target_phys_addr_t pa;
196 pa = vq->vring.used + offsetof(VRingUsed, idx);
197 stw_phys(pa, vring_used_idx(vq) + val);
200 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
202 target_phys_addr_t pa;
203 pa = vq->vring.used + offsetof(VRingUsed, flags);
204 stw_phys(pa, lduw_phys(pa) | mask);
207 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
209 target_phys_addr_t pa;
210 pa = vq->vring.used + offsetof(VRingUsed, flags);
211 stw_phys(pa, lduw_phys(pa) & ~mask);
214 void virtio_queue_set_notification(VirtQueue *vq, int enable)
216 if (enable)
217 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
218 else
219 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
222 int virtio_queue_ready(VirtQueue *vq)
224 return vq->vring.avail != 0;
227 int virtio_queue_empty(VirtQueue *vq)
229 return vring_avail_idx(vq) == vq->last_avail_idx;
232 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
233 unsigned int len, unsigned int idx)
235 unsigned int offset;
236 int i;
238 offset = 0;
239 for (i = 0; i < elem->in_num; i++) {
240 size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
242 cpu_physical_memory_unmap(elem->in_sg[i].iov_base,
243 elem->in_sg[i].iov_len,
244 1, size);
246 offset += elem->in_sg[i].iov_len;
249 for (i = 0; i < elem->out_num; i++)
250 cpu_physical_memory_unmap(elem->out_sg[i].iov_base,
251 elem->out_sg[i].iov_len,
252 0, elem->out_sg[i].iov_len);
254 idx = (idx + vring_used_idx(vq)) % vq->vring.num;
256 /* Get a pointer to the next entry in the used ring. */
257 vring_used_ring_id(vq, idx, elem->index);
258 vring_used_ring_len(vq, idx, len);
261 void virtqueue_flush(VirtQueue *vq, unsigned int count)
263 /* Make sure buffer is written before we update index. */
264 wmb();
265 vring_used_idx_increment(vq, count);
266 vq->inuse -= count;
269 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
270 unsigned int len)
272 virtqueue_fill(vq, elem, len, 0);
273 virtqueue_flush(vq, 1);
276 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
278 uint16_t num_heads = vring_avail_idx(vq) - idx;
280 /* Check it isn't doing very strange things with descriptor numbers. */
281 if (num_heads > vq->vring.num) {
282 fprintf(stderr, "Guest moved used index from %u to %u",
283 idx, vring_avail_idx(vq));
284 exit(1);
287 return num_heads;
290 static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
292 unsigned int head;
294 /* Grab the next descriptor number they're advertising, and increment
295 * the index we've seen. */
296 head = vring_avail_ring(vq, idx % vq->vring.num);
298 /* If their number is silly, that's a fatal mistake. */
299 if (head >= vq->vring.num) {
300 fprintf(stderr, "Guest says index %u is available", head);
301 exit(1);
304 return head;
307 static unsigned virtqueue_next_desc(VirtQueue *vq, unsigned int i)
309 unsigned int next;
311 /* If this descriptor says it doesn't chain, we're done. */
312 if (!(vring_desc_flags(vq, i) & VRING_DESC_F_NEXT))
313 return vq->vring.num;
315 /* Check they're not leading us off end of descriptors. */
316 next = vring_desc_next(vq, i);
317 /* Make sure compiler knows to grab that: we don't want it changing! */
318 wmb();
320 if (next >= vq->vring.num) {
321 fprintf(stderr, "Desc next is %u", next);
322 exit(1);
325 return next;
328 int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes)
330 unsigned int idx;
331 int num_bufs, in_total, out_total;
333 idx = vq->last_avail_idx;
335 num_bufs = in_total = out_total = 0;
336 while (virtqueue_num_heads(vq, idx)) {
337 int i;
339 i = virtqueue_get_head(vq, idx++);
340 do {
341 /* If we've got too many, that implies a descriptor loop. */
342 if (++num_bufs > vq->vring.num) {
343 fprintf(stderr, "Looped descriptor");
344 exit(1);
347 if (vring_desc_flags(vq, i) & VRING_DESC_F_WRITE) {
348 if (in_bytes > 0 &&
349 (in_total += vring_desc_len(vq, i)) >= in_bytes)
350 return 1;
351 } else {
352 if (out_bytes > 0 &&
353 (out_total += vring_desc_len(vq, i)) >= out_bytes)
354 return 1;
356 } while ((i = virtqueue_next_desc(vq, i)) != vq->vring.num);
359 return 0;
362 int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
364 unsigned int i, head;
365 target_phys_addr_t len;
367 if (!virtqueue_num_heads(vq, vq->last_avail_idx))
368 return 0;
370 /* When we start there are none of either input nor output. */
371 elem->out_num = elem->in_num = 0;
373 i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
374 do {
375 struct iovec *sg;
376 int is_write = 0;
378 if (vring_desc_flags(vq, i) & VRING_DESC_F_WRITE) {
379 elem->in_addr[elem->in_num] = vring_desc_addr(vq, i);
380 sg = &elem->in_sg[elem->in_num++];
381 is_write = 1;
382 } else
383 sg = &elem->out_sg[elem->out_num++];
385 /* Grab the first descriptor, and check it's OK. */
386 sg->iov_len = vring_desc_len(vq, i);
387 len = sg->iov_len;
389 sg->iov_base = cpu_physical_memory_map(vring_desc_addr(vq, i), &len, is_write);
391 if (sg->iov_base == NULL || len != sg->iov_len) {
392 fprintf(stderr, "virtio: trying to map MMIO memory\n");
393 exit(1);
396 /* If we've got too many, that implies a descriptor loop. */
397 if ((elem->in_num + elem->out_num) > vq->vring.num) {
398 fprintf(stderr, "Looped descriptor");
399 exit(1);
401 } while ((i = virtqueue_next_desc(vq, i)) != vq->vring.num);
403 elem->index = head;
405 vq->inuse++;
407 return elem->in_num + elem->out_num;
410 /* virtio device */
412 static VirtIODevice *to_virtio_device(PCIDevice *pci_dev)
414 return (VirtIODevice *)pci_dev;
417 static void virtio_update_irq(VirtIODevice *vdev)
419 qemu_set_irq(vdev->pci_dev.irq[0], vdev->isr & 1);
422 static void virtio_reset(void *opaque)
424 VirtIODevice *vdev = opaque;
425 int i;
427 if (vdev->reset)
428 vdev->reset(vdev);
430 vdev->features = 0;
431 vdev->queue_sel = 0;
432 vdev->status = 0;
433 vdev->isr = 0;
434 virtio_update_irq(vdev);
436 for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
437 vdev->vq[i].vring.desc = 0;
438 vdev->vq[i].vring.avail = 0;
439 vdev->vq[i].vring.used = 0;
440 vdev->vq[i].last_avail_idx = 0;
441 vdev->vq[i].pfn = 0;
445 static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
447 VirtIODevice *vdev = to_virtio_device(opaque);
448 ram_addr_t pa;
450 addr -= vdev->addr;
452 switch (addr) {
453 case VIRTIO_PCI_GUEST_FEATURES:
454 if (vdev->set_features)
455 vdev->set_features(vdev, val);
456 vdev->features = val;
457 break;
458 case VIRTIO_PCI_QUEUE_PFN:
459 pa = (ram_addr_t)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
460 vdev->vq[vdev->queue_sel].pfn = val;
461 if (pa == 0) {
462 virtio_reset(vdev);
463 } else {
464 virtqueue_init(&vdev->vq[vdev->queue_sel], pa);
466 break;
467 case VIRTIO_PCI_QUEUE_SEL:
468 if (val < VIRTIO_PCI_QUEUE_MAX)
469 vdev->queue_sel = val;
470 break;
471 case VIRTIO_PCI_QUEUE_NOTIFY:
472 if (val < VIRTIO_PCI_QUEUE_MAX && vdev->vq[val].vring.desc)
473 vdev->vq[val].handle_output(vdev, &vdev->vq[val]);
474 break;
475 case VIRTIO_PCI_STATUS:
476 vdev->status = val & 0xFF;
477 if (vdev->status == 0)
478 virtio_reset(vdev);
479 break;
483 static uint32_t virtio_ioport_read(void *opaque, uint32_t addr)
485 VirtIODevice *vdev = to_virtio_device(opaque);
486 uint32_t ret = 0xFFFFFFFF;
488 addr -= vdev->addr;
490 switch (addr) {
491 case VIRTIO_PCI_HOST_FEATURES:
492 ret = vdev->get_features(vdev);
493 ret |= (1 << VIRTIO_F_NOTIFY_ON_EMPTY);
494 break;
495 case VIRTIO_PCI_GUEST_FEATURES:
496 ret = vdev->features;
497 break;
498 case VIRTIO_PCI_QUEUE_PFN:
499 ret = vdev->vq[vdev->queue_sel].pfn;
500 break;
501 case VIRTIO_PCI_QUEUE_NUM:
502 ret = vdev->vq[vdev->queue_sel].vring.num;
503 break;
504 case VIRTIO_PCI_QUEUE_SEL:
505 ret = vdev->queue_sel;
506 break;
507 case VIRTIO_PCI_STATUS:
508 ret = vdev->status;
509 break;
510 case VIRTIO_PCI_ISR:
511 /* reading from the ISR also clears it. */
512 ret = vdev->isr;
513 vdev->isr = 0;
514 virtio_update_irq(vdev);
515 break;
516 default:
517 break;
520 return ret;
523 static uint32_t virtio_config_readb(void *opaque, uint32_t addr)
525 VirtIODevice *vdev = opaque;
526 uint8_t val;
528 vdev->get_config(vdev, vdev->config);
530 addr -= vdev->addr + VIRTIO_PCI_CONFIG;
531 if (addr > (vdev->config_len - sizeof(val)))
532 return (uint32_t)-1;
534 memcpy(&val, vdev->config + addr, sizeof(val));
535 return val;
538 static uint32_t virtio_config_readw(void *opaque, uint32_t addr)
540 VirtIODevice *vdev = opaque;
541 uint16_t val;
543 vdev->get_config(vdev, vdev->config);
545 addr -= vdev->addr + VIRTIO_PCI_CONFIG;
546 if (addr > (vdev->config_len - sizeof(val)))
547 return (uint32_t)-1;
549 memcpy(&val, vdev->config + addr, sizeof(val));
550 return val;
553 static uint32_t virtio_config_readl(void *opaque, uint32_t addr)
555 VirtIODevice *vdev = opaque;
556 uint32_t val;
558 vdev->get_config(vdev, vdev->config);
560 addr -= vdev->addr + VIRTIO_PCI_CONFIG;
561 if (addr > (vdev->config_len - sizeof(val)))
562 return (uint32_t)-1;
564 memcpy(&val, vdev->config + addr, sizeof(val));
565 return val;
568 static void virtio_config_writeb(void *opaque, uint32_t addr, uint32_t data)
570 VirtIODevice *vdev = opaque;
571 uint8_t val = data;
573 addr -= vdev->addr + VIRTIO_PCI_CONFIG;
574 if (addr > (vdev->config_len - sizeof(val)))
575 return;
577 memcpy(vdev->config + addr, &val, sizeof(val));
579 if (vdev->set_config)
580 vdev->set_config(vdev, vdev->config);
583 static void virtio_config_writew(void *opaque, uint32_t addr, uint32_t data)
585 VirtIODevice *vdev = opaque;
586 uint16_t val = data;
588 addr -= vdev->addr + VIRTIO_PCI_CONFIG;
589 if (addr > (vdev->config_len - sizeof(val)))
590 return;
592 memcpy(vdev->config + addr, &val, sizeof(val));
594 if (vdev->set_config)
595 vdev->set_config(vdev, vdev->config);
598 static void virtio_config_writel(void *opaque, uint32_t addr, uint32_t data)
600 VirtIODevice *vdev = opaque;
601 uint32_t val = data;
603 addr -= vdev->addr + VIRTIO_PCI_CONFIG;
604 if (addr > (vdev->config_len - sizeof(val)))
605 return;
607 memcpy(vdev->config + addr, &val, sizeof(val));
609 if (vdev->set_config)
610 vdev->set_config(vdev, vdev->config);
613 static void virtio_map(PCIDevice *pci_dev, int region_num,
614 uint32_t addr, uint32_t size, int type)
616 VirtIODevice *vdev = to_virtio_device(pci_dev);
617 int i;
619 vdev->addr = addr;
620 for (i = 0; i < 3; i++) {
621 register_ioport_write(addr, 20, 1 << i, virtio_ioport_write, vdev);
622 register_ioport_read(addr, 20, 1 << i, virtio_ioport_read, vdev);
625 if (vdev->config_len) {
626 register_ioport_write(addr + 20, vdev->config_len, 1,
627 virtio_config_writeb, vdev);
628 register_ioport_write(addr + 20, vdev->config_len, 2,
629 virtio_config_writew, vdev);
630 register_ioport_write(addr + 20, vdev->config_len, 4,
631 virtio_config_writel, vdev);
632 register_ioport_read(addr + 20, vdev->config_len, 1,
633 virtio_config_readb, vdev);
634 register_ioport_read(addr + 20, vdev->config_len, 2,
635 virtio_config_readw, vdev);
636 register_ioport_read(addr + 20, vdev->config_len, 4,
637 virtio_config_readl, vdev);
639 vdev->get_config(vdev, vdev->config);
643 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
644 void (*handle_output)(VirtIODevice *, VirtQueue *))
646 int i;
648 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
649 if (vdev->vq[i].vring.num == 0)
650 break;
653 if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
654 abort();
656 vdev->vq[i].vring.num = queue_size;
657 vdev->vq[i].handle_output = handle_output;
659 return &vdev->vq[i];
662 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
664 /* Always notify when queue is empty (when feature acknowledge) */
665 if ((vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT) &&
666 (!(vdev->features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) ||
667 (vq->inuse || vring_avail_idx(vq) != vq->last_avail_idx)))
668 return;
670 vdev->isr |= 0x01;
671 virtio_update_irq(vdev);
674 void virtio_notify_config(VirtIODevice *vdev)
676 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
677 return;
679 vdev->isr |= 0x03;
680 virtio_update_irq(vdev);
683 void virtio_save(VirtIODevice *vdev, QEMUFile *f)
685 int i;
687 pci_device_save(&vdev->pci_dev, f);
689 qemu_put_be32s(f, &vdev->addr);
690 qemu_put_8s(f, &vdev->status);
691 qemu_put_8s(f, &vdev->isr);
692 qemu_put_be16s(f, &vdev->queue_sel);
693 qemu_put_be32s(f, &vdev->features);
694 qemu_put_be32(f, vdev->config_len);
695 qemu_put_buffer(f, vdev->config, vdev->config_len);
697 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
698 if (vdev->vq[i].vring.num == 0)
699 break;
702 qemu_put_be32(f, i);
704 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
705 if (vdev->vq[i].vring.num == 0)
706 break;
708 qemu_put_be32(f, vdev->vq[i].vring.num);
709 qemu_put_be32s(f, &vdev->vq[i].pfn);
710 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
714 void virtio_load(VirtIODevice *vdev, QEMUFile *f)
716 int num, i;
718 pci_device_load(&vdev->pci_dev, f);
720 qemu_get_be32s(f, &vdev->addr);
721 qemu_get_8s(f, &vdev->status);
722 qemu_get_8s(f, &vdev->isr);
723 qemu_get_be16s(f, &vdev->queue_sel);
724 qemu_get_be32s(f, &vdev->features);
725 vdev->config_len = qemu_get_be32(f);
726 qemu_get_buffer(f, vdev->config, vdev->config_len);
728 num = qemu_get_be32(f);
730 for (i = 0; i < num; i++) {
731 vdev->vq[i].vring.num = qemu_get_be32(f);
732 qemu_get_be32s(f, &vdev->vq[i].pfn);
733 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
735 if (vdev->vq[i].pfn) {
736 target_phys_addr_t pa;
738 pa = (ram_addr_t)vdev->vq[i].pfn << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
739 virtqueue_init(&vdev->vq[i], pa);
743 virtio_update_irq(vdev);
746 VirtIODevice *virtio_init_pci(PCIBus *bus, const char *name,
747 uint16_t vendor, uint16_t device,
748 uint16_t subvendor, uint16_t subdevice,
749 uint16_t class_code, uint8_t pif,
750 size_t config_size, size_t struct_size)
752 VirtIODevice *vdev;
753 PCIDevice *pci_dev;
754 uint8_t *config;
755 uint32_t size;
757 pci_dev = pci_register_device(bus, name, struct_size,
758 -1, NULL, NULL);
759 if (!pci_dev)
760 return NULL;
762 vdev = to_virtio_device(pci_dev);
764 vdev->status = 0;
765 vdev->isr = 0;
766 vdev->queue_sel = 0;
767 vdev->vq = qemu_mallocz(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX);
769 config = pci_dev->config;
770 pci_config_set_vendor_id(config, vendor);
771 pci_config_set_device_id(config, device);
773 config[0x08] = VIRTIO_PCI_ABI_VERSION;
775 config[0x09] = pif;
776 pci_config_set_class(config, class_code);
777 config[0x0e] = 0x00;
779 config[0x2c] = subvendor & 0xFF;
780 config[0x2d] = (subvendor >> 8) & 0xFF;
781 config[0x2e] = subdevice & 0xFF;
782 config[0x2f] = (subdevice >> 8) & 0xFF;
784 config[0x3d] = 1;
786 vdev->name = name;
787 vdev->config_len = config_size;
788 if (vdev->config_len)
789 vdev->config = qemu_mallocz(config_size);
790 else
791 vdev->config = NULL;
793 size = 20 + config_size;
794 if (size & (size-1))
795 size = 1 << qemu_fls(size);
797 pci_register_io_region(pci_dev, 0, size, PCI_ADDRESS_SPACE_IO,
798 virtio_map);
799 qemu_register_reset(virtio_reset, vdev);
801 return vdev;