4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
20 //#define VIRTIO_ZERO_COPY
22 /* from Linux's linux/virtio_pci.h */
24 /* A 32-bit r/o bitmask of the features supported by the host */
25 #define VIRTIO_PCI_HOST_FEATURES 0
27 /* A 32-bit r/w bitmask of features activated by the guest */
28 #define VIRTIO_PCI_GUEST_FEATURES 4
30 /* A 32-bit r/w PFN for the currently selected queue */
31 #define VIRTIO_PCI_QUEUE_PFN 8
33 /* A 16-bit r/o queue size for the currently selected queue */
34 #define VIRTIO_PCI_QUEUE_NUM 12
36 /* A 16-bit r/w queue selector */
37 #define VIRTIO_PCI_QUEUE_SEL 14
39 /* A 16-bit r/w queue notifier */
40 #define VIRTIO_PCI_QUEUE_NOTIFY 16
42 /* An 8-bit device status register. */
43 #define VIRTIO_PCI_STATUS 18
45 /* An 8-bit r/o interrupt status register. Reading the value will return the
46 * current contents of the ISR and will also clear it. This is effectively
47 * a read-and-acknowledge. */
48 #define VIRTIO_PCI_ISR 19
50 #define VIRTIO_PCI_CONFIG 20
52 /* Virtio ABI version, if we increment this, we break the guest driver. */
53 #define VIRTIO_PCI_ABI_VERSION 0
55 /* QEMU doesn't strictly need write barriers since everything runs in
56 * lock-step. We'll leave the calls to wmb() in though to make it obvious for
57 * KVM or if kqemu gets SMP support.
59 #define wmb() do { } while (0)
61 typedef struct VRingDesc
69 typedef struct VRingAvail
76 typedef struct VRingUsedElem
82 typedef struct VRingUsed
86 VRingUsedElem ring
[0];
92 target_phys_addr_t desc
;
93 target_phys_addr_t avail
;
94 target_phys_addr_t used
;
101 uint16_t last_avail_idx
;
103 void (*handle_output
)(VirtIODevice
*vdev
, VirtQueue
*vq
);
106 #define VIRTIO_PCI_QUEUE_MAX 16
108 /* virt queue functions */
109 #ifdef VIRTIO_ZERO_COPY
110 static void *virtio_map_gpa(target_phys_addr_t addr
, size_t size
)
113 target_phys_addr_t addr1
;
115 off
= cpu_get_physical_page_desc(addr
);
116 if ((off
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
117 fprintf(stderr
, "virtio DMA to IO ram\n");
121 off
= (off
& TARGET_PAGE_MASK
) | (addr
& ~TARGET_PAGE_MASK
);
123 for (addr1
= addr
+ TARGET_PAGE_SIZE
;
124 addr1
< TARGET_PAGE_ALIGN(addr
+ size
);
125 addr1
+= TARGET_PAGE_SIZE
) {
128 off1
= cpu_get_physical_page_desc(addr1
);
129 if ((off1
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
130 fprintf(stderr
, "virtio DMA to IO ram\n");
134 off1
= (off1
& TARGET_PAGE_MASK
) | (addr1
& ~TARGET_PAGE_MASK
);
136 if (off1
!= (off
+ (addr1
- addr
))) {
137 fprintf(stderr
, "discontigous virtio memory\n");
142 return phys_ram_base
+ off
;
146 static void virtqueue_init(VirtQueue
*vq
, target_phys_addr_t pa
)
149 vq
->vring
.avail
= pa
+ vq
->vring
.num
* sizeof(VRingDesc
);
150 vq
->vring
.used
= TARGET_PAGE_ALIGN(vq
->vring
.avail
+ offsetof(VRingAvail
, ring
[vq
->vring
.num
]));
153 static inline uint64_t vring_desc_addr(VirtQueue
*vq
, int i
)
155 target_phys_addr_t pa
;
156 pa
= vq
->vring
.desc
+ sizeof(VRingDesc
) * i
+ offsetof(VRingDesc
, addr
);
160 static inline uint32_t vring_desc_len(VirtQueue
*vq
, int i
)
162 target_phys_addr_t pa
;
163 pa
= vq
->vring
.desc
+ sizeof(VRingDesc
) * i
+ offsetof(VRingDesc
, len
);
167 static inline uint16_t vring_desc_flags(VirtQueue
*vq
, int i
)
169 target_phys_addr_t pa
;
170 pa
= vq
->vring
.desc
+ sizeof(VRingDesc
) * i
+ offsetof(VRingDesc
, flags
);
171 return lduw_phys(pa
);
174 static inline uint16_t vring_desc_next(VirtQueue
*vq
, int i
)
176 target_phys_addr_t pa
;
177 pa
= vq
->vring
.desc
+ sizeof(VRingDesc
) * i
+ offsetof(VRingDesc
, next
);
178 return lduw_phys(pa
);
181 static inline uint16_t vring_avail_flags(VirtQueue
*vq
)
183 target_phys_addr_t pa
;
184 pa
= vq
->vring
.avail
+ offsetof(VRingAvail
, flags
);
185 return lduw_phys(pa
);
188 static inline uint16_t vring_avail_idx(VirtQueue
*vq
)
190 target_phys_addr_t pa
;
191 pa
= vq
->vring
.avail
+ offsetof(VRingAvail
, idx
);
192 return lduw_phys(pa
);
195 static inline uint16_t vring_avail_ring(VirtQueue
*vq
, int i
)
197 target_phys_addr_t pa
;
198 pa
= vq
->vring
.avail
+ offsetof(VRingAvail
, ring
[i
]);
199 return lduw_phys(pa
);
202 static inline void vring_used_ring_id(VirtQueue
*vq
, int i
, uint32_t val
)
204 target_phys_addr_t pa
;
205 pa
= vq
->vring
.used
+ offsetof(VRingUsed
, ring
[i
].id
);
209 static inline void vring_used_ring_len(VirtQueue
*vq
, int i
, uint32_t val
)
211 target_phys_addr_t pa
;
212 pa
= vq
->vring
.used
+ offsetof(VRingUsed
, ring
[i
].len
);
216 static uint16_t vring_used_idx(VirtQueue
*vq
)
218 target_phys_addr_t pa
;
219 pa
= vq
->vring
.used
+ offsetof(VRingUsed
, idx
);
220 return lduw_phys(pa
);
223 static inline void vring_used_idx_increment(VirtQueue
*vq
, uint16_t val
)
225 target_phys_addr_t pa
;
226 pa
= vq
->vring
.used
+ offsetof(VRingUsed
, idx
);
227 stw_phys(pa
, vring_used_idx(vq
) + val
);
230 static inline void vring_used_flags_set_bit(VirtQueue
*vq
, int mask
)
232 target_phys_addr_t pa
;
233 pa
= vq
->vring
.used
+ offsetof(VRingUsed
, flags
);
234 stw_phys(pa
, lduw_phys(pa
) | mask
);
237 static inline void vring_used_flags_unset_bit(VirtQueue
*vq
, int mask
)
239 target_phys_addr_t pa
;
240 pa
= vq
->vring
.used
+ offsetof(VRingUsed
, flags
);
241 stw_phys(pa
, lduw_phys(pa
) & ~mask
);
244 void virtio_queue_set_notification(VirtQueue
*vq
, int enable
)
247 vring_used_flags_unset_bit(vq
, VRING_USED_F_NO_NOTIFY
);
249 vring_used_flags_set_bit(vq
, VRING_USED_F_NO_NOTIFY
);
252 int virtio_queue_ready(VirtQueue
*vq
)
254 return vq
->vring
.avail
!= 0;
257 int virtio_queue_empty(VirtQueue
*vq
)
259 return vring_avail_idx(vq
) == vq
->last_avail_idx
;
262 void virtqueue_fill(VirtQueue
*vq
, const VirtQueueElement
*elem
,
263 unsigned int len
, unsigned int idx
)
268 #ifndef VIRTIO_ZERO_COPY
269 for (i
= 0; i
< elem
->out_num
; i
++)
270 qemu_free(elem
->out_sg
[i
].iov_base
);
274 for (i
= 0; i
< elem
->in_num
; i
++) {
275 size_t size
= MIN(len
- offset
, elem
->in_sg
[i
].iov_len
);
277 #ifdef VIRTIO_ZERO_COPY
279 ram_addr_t addr
= (uint8_t *)elem
->in_sg
[i
].iov_base
- phys_ram_base
;
282 for (off
= 0; off
< size
; off
+= TARGET_PAGE_SIZE
)
283 cpu_physical_memory_set_dirty(addr
+ off
);
287 cpu_physical_memory_write(elem
->in_addr
[i
],
288 elem
->in_sg
[i
].iov_base
,
291 qemu_free(elem
->in_sg
[i
].iov_base
);
297 idx
= (idx
+ vring_used_idx(vq
)) % vq
->vring
.num
;
299 /* Get a pointer to the next entry in the used ring. */
300 vring_used_ring_id(vq
, idx
, elem
->index
);
301 vring_used_ring_len(vq
, idx
, len
);
304 void virtqueue_flush(VirtQueue
*vq
, unsigned int count
)
306 /* Make sure buffer is written before we update index. */
308 vring_used_idx_increment(vq
, count
);
312 void virtqueue_push(VirtQueue
*vq
, const VirtQueueElement
*elem
,
315 virtqueue_fill(vq
, elem
, len
, 0);
316 virtqueue_flush(vq
, 1);
319 static int virtqueue_num_heads(VirtQueue
*vq
, unsigned int idx
)
321 uint16_t num_heads
= vring_avail_idx(vq
) - idx
;
323 /* Check it isn't doing very strange things with descriptor numbers. */
324 if (num_heads
> vq
->vring
.num
)
325 errx(1, "Guest moved used index from %u to %u",
326 idx
, vring_avail_idx(vq
));
331 static unsigned int virtqueue_get_head(VirtQueue
*vq
, unsigned int idx
)
335 /* Grab the next descriptor number they're advertising, and increment
336 * the index we've seen. */
337 head
= vring_avail_ring(vq
, idx
% vq
->vring
.num
);
339 /* If their number is silly, that's a fatal mistake. */
340 if (head
>= vq
->vring
.num
)
341 errx(1, "Guest says index %u is available", head
);
346 static unsigned virtqueue_next_desc(VirtQueue
*vq
, unsigned int i
)
350 /* If this descriptor says it doesn't chain, we're done. */
351 if (!(vring_desc_flags(vq
, i
) & VRING_DESC_F_NEXT
))
352 return vq
->vring
.num
;
354 /* Check they're not leading us off end of descriptors. */
355 next
= vring_desc_next(vq
, i
);
356 /* Make sure compiler knows to grab that: we don't want it changing! */
359 if (next
>= vq
->vring
.num
)
360 errx(1, "Desc next is %u", next
);
365 int virtqueue_avail_bytes(VirtQueue
*vq
, int in_bytes
, int out_bytes
)
368 int num_bufs
, in_total
, out_total
;
370 idx
= vq
->last_avail_idx
;
372 num_bufs
= in_total
= out_total
= 0;
373 while (virtqueue_num_heads(vq
, idx
)) {
376 i
= virtqueue_get_head(vq
, idx
++);
378 /* If we've got too many, that implies a descriptor loop. */
379 if (++num_bufs
> vq
->vring
.num
)
380 errx(1, "Looped descriptor");
382 if (vring_desc_flags(vq
, i
) & VRING_DESC_F_WRITE
) {
384 (in_total
+= vring_desc_len(vq
, i
)) >= in_bytes
)
388 (out_total
+= vring_desc_len(vq
, i
)) >= out_bytes
)
391 } while ((i
= virtqueue_next_desc(vq
, i
)) != vq
->vring
.num
);
397 int virtqueue_pop(VirtQueue
*vq
, VirtQueueElement
*elem
)
399 unsigned int i
, head
;
401 if (!virtqueue_num_heads(vq
, vq
->last_avail_idx
))
404 /* When we start there are none of either input nor output. */
405 elem
->out_num
= elem
->in_num
= 0;
407 i
= head
= virtqueue_get_head(vq
, vq
->last_avail_idx
++);
411 if (vring_desc_flags(vq
, i
) & VRING_DESC_F_WRITE
) {
412 elem
->in_addr
[elem
->in_num
] = vring_desc_addr(vq
, i
);
413 sg
= &elem
->in_sg
[elem
->in_num
++];
415 sg
= &elem
->out_sg
[elem
->out_num
++];
417 /* Grab the first descriptor, and check it's OK. */
418 sg
->iov_len
= vring_desc_len(vq
, i
);
420 #ifdef VIRTIO_ZERO_COPY
421 sg
->iov_base
= virtio_map_gpa(vring_desc_addr(vq
, i
), sg
->iov_len
);
423 /* cap individual scatter element size to prevent unbounded allocations
424 of memory from the guest. Practically speaking, no virtio driver
425 will ever pass more than a page in each element. We set the cap to
426 be 2MB in case for some reason a large page makes it way into the
427 sg list. When we implement a zero copy API, this limitation will
429 if (sg
->iov_len
> (2 << 20))
430 sg
->iov_len
= 2 << 20;
432 sg
->iov_base
= qemu_malloc(sg
->iov_len
);
434 !(vring_desc_flags(vq
, i
) & VRING_DESC_F_WRITE
)) {
435 cpu_physical_memory_read(vring_desc_addr(vq
, i
),
440 if (sg
->iov_base
== NULL
)
441 errx(1, "Invalid mapping\n");
443 /* If we've got too many, that implies a descriptor loop. */
444 if ((elem
->in_num
+ elem
->out_num
) > vq
->vring
.num
)
445 errx(1, "Looped descriptor");
446 } while ((i
= virtqueue_next_desc(vq
, i
)) != vq
->vring
.num
);
452 return elem
->in_num
+ elem
->out_num
;
457 static VirtIODevice
*to_virtio_device(PCIDevice
*pci_dev
)
459 return (VirtIODevice
*)pci_dev
;
462 static void virtio_update_irq(VirtIODevice
*vdev
)
464 qemu_set_irq(vdev
->pci_dev
.irq
[0], vdev
->isr
& 1);
467 void virtio_reset(void *opaque
)
469 VirtIODevice
*vdev
= opaque
;
479 virtio_update_irq(vdev
);
481 for(i
= 0; i
< VIRTIO_PCI_QUEUE_MAX
; i
++) {
482 vdev
->vq
[i
].vring
.desc
= 0;
483 vdev
->vq
[i
].vring
.avail
= 0;
484 vdev
->vq
[i
].vring
.used
= 0;
485 vdev
->vq
[i
].last_avail_idx
= 0;
490 static void virtio_ioport_write(void *opaque
, uint32_t addr
, uint32_t val
)
492 VirtIODevice
*vdev
= to_virtio_device(opaque
);
498 case VIRTIO_PCI_GUEST_FEATURES
:
499 if (vdev
->set_features
)
500 vdev
->set_features(vdev
, val
);
501 vdev
->features
= val
;
503 case VIRTIO_PCI_QUEUE_PFN
:
504 pa
= (ram_addr_t
)val
<< TARGET_PAGE_BITS
;
505 vdev
->vq
[vdev
->queue_sel
].pfn
= val
;
509 virtqueue_init(&vdev
->vq
[vdev
->queue_sel
], pa
);
512 case VIRTIO_PCI_QUEUE_SEL
:
513 if (val
< VIRTIO_PCI_QUEUE_MAX
)
514 vdev
->queue_sel
= val
;
516 case VIRTIO_PCI_QUEUE_NOTIFY
:
517 if (val
< VIRTIO_PCI_QUEUE_MAX
&& vdev
->vq
[val
].vring
.desc
)
518 vdev
->vq
[val
].handle_output(vdev
, &vdev
->vq
[val
]);
520 case VIRTIO_PCI_STATUS
:
521 vdev
->status
= val
& 0xFF;
522 if (vdev
->status
== 0)
528 static uint32_t virtio_ioport_read(void *opaque
, uint32_t addr
)
530 VirtIODevice
*vdev
= to_virtio_device(opaque
);
531 uint32_t ret
= 0xFFFFFFFF;
536 case VIRTIO_PCI_HOST_FEATURES
:
537 ret
= vdev
->get_features(vdev
);
538 ret
|= (1 << VIRTIO_F_NOTIFY_ON_EMPTY
);
540 case VIRTIO_PCI_GUEST_FEATURES
:
541 ret
= vdev
->features
;
543 case VIRTIO_PCI_QUEUE_PFN
:
544 ret
= vdev
->vq
[vdev
->queue_sel
].pfn
;
546 case VIRTIO_PCI_QUEUE_NUM
:
547 ret
= vdev
->vq
[vdev
->queue_sel
].vring
.num
;
549 case VIRTIO_PCI_QUEUE_SEL
:
550 ret
= vdev
->queue_sel
;
552 case VIRTIO_PCI_STATUS
:
556 /* reading from the ISR also clears it. */
559 virtio_update_irq(vdev
);
568 static uint32_t virtio_config_readb(void *opaque
, uint32_t addr
)
570 VirtIODevice
*vdev
= opaque
;
573 vdev
->get_config(vdev
, vdev
->config
);
575 addr
-= vdev
->addr
+ VIRTIO_PCI_CONFIG
;
576 if (addr
> (vdev
->config_len
- sizeof(val
)))
579 memcpy(&val
, vdev
->config
+ addr
, sizeof(val
));
583 static uint32_t virtio_config_readw(void *opaque
, uint32_t addr
)
585 VirtIODevice
*vdev
= opaque
;
588 vdev
->get_config(vdev
, vdev
->config
);
590 addr
-= vdev
->addr
+ VIRTIO_PCI_CONFIG
;
591 if (addr
> (vdev
->config_len
- sizeof(val
)))
594 memcpy(&val
, vdev
->config
+ addr
, sizeof(val
));
598 static uint32_t virtio_config_readl(void *opaque
, uint32_t addr
)
600 VirtIODevice
*vdev
= opaque
;
603 vdev
->get_config(vdev
, vdev
->config
);
605 addr
-= vdev
->addr
+ VIRTIO_PCI_CONFIG
;
606 if (addr
> (vdev
->config_len
- sizeof(val
)))
609 memcpy(&val
, vdev
->config
+ addr
, sizeof(val
));
613 static void virtio_config_writeb(void *opaque
, uint32_t addr
, uint32_t data
)
615 VirtIODevice
*vdev
= opaque
;
618 addr
-= vdev
->addr
+ VIRTIO_PCI_CONFIG
;
619 if (addr
> (vdev
->config_len
- sizeof(val
)))
622 memcpy(vdev
->config
+ addr
, &val
, sizeof(val
));
624 if (vdev
->set_config
)
625 vdev
->set_config(vdev
, vdev
->config
);
628 static void virtio_config_writew(void *opaque
, uint32_t addr
, uint32_t data
)
630 VirtIODevice
*vdev
= opaque
;
633 addr
-= vdev
->addr
+ VIRTIO_PCI_CONFIG
;
634 if (addr
> (vdev
->config_len
- sizeof(val
)))
637 memcpy(vdev
->config
+ addr
, &val
, sizeof(val
));
639 if (vdev
->set_config
)
640 vdev
->set_config(vdev
, vdev
->config
);
643 static void virtio_config_writel(void *opaque
, uint32_t addr
, uint32_t data
)
645 VirtIODevice
*vdev
= opaque
;
648 addr
-= vdev
->addr
+ VIRTIO_PCI_CONFIG
;
649 if (addr
> (vdev
->config_len
- sizeof(val
)))
652 memcpy(vdev
->config
+ addr
, &val
, sizeof(val
));
654 if (vdev
->set_config
)
655 vdev
->set_config(vdev
, vdev
->config
);
658 static void virtio_map(PCIDevice
*pci_dev
, int region_num
,
659 uint32_t addr
, uint32_t size
, int type
)
661 VirtIODevice
*vdev
= to_virtio_device(pci_dev
);
665 for (i
= 0; i
< 3; i
++) {
666 register_ioport_write(addr
, 20, 1 << i
, virtio_ioport_write
, vdev
);
667 register_ioport_read(addr
, 20, 1 << i
, virtio_ioport_read
, vdev
);
670 if (vdev
->config_len
) {
671 register_ioport_write(addr
+ 20, vdev
->config_len
, 1,
672 virtio_config_writeb
, vdev
);
673 register_ioport_write(addr
+ 20, vdev
->config_len
, 2,
674 virtio_config_writew
, vdev
);
675 register_ioport_write(addr
+ 20, vdev
->config_len
, 4,
676 virtio_config_writel
, vdev
);
677 register_ioport_read(addr
+ 20, vdev
->config_len
, 1,
678 virtio_config_readb
, vdev
);
679 register_ioport_read(addr
+ 20, vdev
->config_len
, 2,
680 virtio_config_readw
, vdev
);
681 register_ioport_read(addr
+ 20, vdev
->config_len
, 4,
682 virtio_config_readl
, vdev
);
684 vdev
->get_config(vdev
, vdev
->config
);
688 VirtQueue
*virtio_add_queue(VirtIODevice
*vdev
, int queue_size
,
689 void (*handle_output
)(VirtIODevice
*, VirtQueue
*))
693 for (i
= 0; i
< VIRTIO_PCI_QUEUE_MAX
; i
++) {
694 if (vdev
->vq
[i
].vring
.num
== 0)
698 if (i
== VIRTIO_PCI_QUEUE_MAX
|| queue_size
> VIRTQUEUE_MAX_SIZE
)
701 vdev
->vq
[i
].vring
.num
= queue_size
;
702 vdev
->vq
[i
].handle_output
= handle_output
;
707 void virtio_notify(VirtIODevice
*vdev
, VirtQueue
*vq
)
709 /* Always notify when queue is empty */
710 if ((vq
->inuse
|| vring_avail_idx(vq
) != vq
->last_avail_idx
) &&
711 (vring_avail_flags(vq
) & VRING_AVAIL_F_NO_INTERRUPT
))
715 virtio_update_irq(vdev
);
718 void virtio_notify_config(VirtIODevice
*vdev
)
721 virtio_update_irq(vdev
);
724 void virtio_save(VirtIODevice
*vdev
, QEMUFile
*f
)
728 pci_device_save(&vdev
->pci_dev
, f
);
730 qemu_put_be32s(f
, &vdev
->addr
);
731 qemu_put_8s(f
, &vdev
->status
);
732 qemu_put_8s(f
, &vdev
->isr
);
733 qemu_put_be16s(f
, &vdev
->queue_sel
);
734 qemu_put_be32s(f
, &vdev
->features
);
735 qemu_put_be32(f
, vdev
->config_len
);
736 qemu_put_buffer(f
, vdev
->config
, vdev
->config_len
);
738 for (i
= 0; i
< VIRTIO_PCI_QUEUE_MAX
; i
++) {
739 if (vdev
->vq
[i
].vring
.num
== 0)
745 for (i
= 0; i
< VIRTIO_PCI_QUEUE_MAX
; i
++) {
746 if (vdev
->vq
[i
].vring
.num
== 0)
749 qemu_put_be32(f
, vdev
->vq
[i
].vring
.num
);
750 qemu_put_be32s(f
, &vdev
->vq
[i
].pfn
);
751 qemu_put_be16s(f
, &vdev
->vq
[i
].last_avail_idx
);
755 void virtio_load(VirtIODevice
*vdev
, QEMUFile
*f
)
759 pci_device_load(&vdev
->pci_dev
, f
);
761 qemu_get_be32s(f
, &vdev
->addr
);
762 qemu_get_8s(f
, &vdev
->status
);
763 qemu_get_8s(f
, &vdev
->isr
);
764 qemu_get_be16s(f
, &vdev
->queue_sel
);
765 qemu_get_be32s(f
, &vdev
->features
);
766 vdev
->config_len
= qemu_get_be32(f
);
767 qemu_get_buffer(f
, vdev
->config
, vdev
->config_len
);
769 num
= qemu_get_be32(f
);
771 for (i
= 0; i
< num
; i
++) {
772 vdev
->vq
[i
].vring
.num
= qemu_get_be32(f
);
773 qemu_get_be32s(f
, &vdev
->vq
[i
].pfn
);
774 qemu_get_be16s(f
, &vdev
->vq
[i
].last_avail_idx
);
776 if (vdev
->vq
[i
].pfn
) {
777 target_phys_addr_t pa
;
779 pa
= (ram_addr_t
)vdev
->vq
[i
].pfn
<< TARGET_PAGE_BITS
;
780 virtqueue_init(&vdev
->vq
[i
], pa
);
784 virtio_update_irq(vdev
);
787 VirtIODevice
*virtio_init_pci(PCIBus
*bus
, const char *name
,
788 uint16_t vendor
, uint16_t device
,
789 uint16_t subvendor
, uint16_t subdevice
,
790 uint8_t class_code
, uint8_t subclass_code
,
791 uint8_t pif
, size_t config_size
,
799 pci_dev
= pci_register_device(bus
, name
, struct_size
,
804 vdev
= to_virtio_device(pci_dev
);
809 vdev
->vq
= qemu_mallocz(sizeof(VirtQueue
) * VIRTIO_PCI_QUEUE_MAX
);
811 config
= pci_dev
->config
;
812 config
[0x00] = vendor
& 0xFF;
813 config
[0x01] = (vendor
>> 8) & 0xFF;
814 config
[0x02] = device
& 0xFF;
815 config
[0x03] = (device
>> 8) & 0xFF;
817 config
[0x08] = VIRTIO_PCI_ABI_VERSION
;
820 config
[0x0a] = subclass_code
;
821 config
[0x0b] = class_code
;
824 config
[0x2c] = subvendor
& 0xFF;
825 config
[0x2d] = (subvendor
>> 8) & 0xFF;
826 config
[0x2e] = subdevice
& 0xFF;
827 config
[0x2f] = (subdevice
>> 8) & 0xFF;
832 vdev
->config_len
= config_size
;
833 if (vdev
->config_len
)
834 vdev
->config
= qemu_mallocz(config_size
);
838 size
= 20 + config_size
;
840 size
= 1 << fls(size
);
842 pci_register_io_region(pci_dev
, 0, size
, PCI_ADDRESS_SPACE_IO
,
844 qemu_register_reset(virtio_reset
, vdev
);