4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #ifndef _QEMU_VIRTIO_H
15 #define _QEMU_VIRTIO_H
21 #include "event_notifier.h"
26 /* from Linux's linux/virtio_config.h */
28 /* Status byte for guest to report progress, and synchronize features. */
29 /* We have seen device and processed generic fields (VIRTIO_CONFIG_F_VIRTIO) */
30 #define VIRTIO_CONFIG_S_ACKNOWLEDGE 1
31 /* We have found a driver for the device. */
32 #define VIRTIO_CONFIG_S_DRIVER 2
33 /* Driver has used its parts of the config, and is happy */
34 #define VIRTIO_CONFIG_S_DRIVER_OK 4
35 /* We've given up on this device. */
36 #define VIRTIO_CONFIG_S_FAILED 0x80
38 /* Some virtio feature bits (currently bits 28 through 31) are reserved for the
39 * transport being used (eg. virtio_ring), the rest are per-device feature bits. */
40 #define VIRTIO_TRANSPORT_F_START 28
41 #define VIRTIO_TRANSPORT_F_END 32
43 /* We notify when the ring is completely used, even if the guest is suppressing
45 #define VIRTIO_F_NOTIFY_ON_EMPTY 24
46 /* We support indirect buffer descriptors */
47 #define VIRTIO_RING_F_INDIRECT_DESC 28
48 /* The Guest publishes the used index for which it expects an interrupt
49 * at the end of the avail ring. Host should ignore the avail->flags field. */
50 /* The Host publishes the avail index for which it expects a kick
51 * at the end of the used ring. Guest should ignore the used->flags field. */
52 #define VIRTIO_RING_F_EVENT_IDX 29
53 /* A guest should never accept this. It implies negotiation is broken. */
54 #define VIRTIO_F_BAD_FEATURE 30
56 /* from Linux's linux/virtio_ring.h */
58 /* This marks a buffer as continuing via the next field. */
59 #define VRING_DESC_F_NEXT 1
60 /* This marks a buffer as write-only (otherwise read-only). */
61 #define VRING_DESC_F_WRITE 2
62 /* This means the buffer contains a list of buffer descriptors. */
63 #define VRING_DESC_F_INDIRECT 4
65 /* This means don't notify other side when buffer added. */
66 #define VRING_USED_F_NO_NOTIFY 1
67 /* This means don't interrupt guest when buffer consumed. */
68 #define VRING_AVAIL_F_NO_INTERRUPT 1
72 static inline target_phys_addr_t
vring_align(target_phys_addr_t addr
,
75 return (addr
+ align
- 1) & ~(align
- 1);
78 typedef struct VirtQueue VirtQueue
;
80 #define VIRTQUEUE_MAX_SIZE 1024
82 typedef struct VirtQueueElement
87 target_phys_addr_t in_addr
[VIRTQUEUE_MAX_SIZE
];
88 target_phys_addr_t out_addr
[VIRTQUEUE_MAX_SIZE
];
89 struct iovec in_sg
[VIRTQUEUE_MAX_SIZE
];
90 struct iovec out_sg
[VIRTQUEUE_MAX_SIZE
];
94 void (*notify
)(void * opaque
, uint16_t vector
);
95 void (*save_config
)(void * opaque
, QEMUFile
*f
);
96 void (*save_queue
)(void * opaque
, int n
, QEMUFile
*f
);
97 int (*load_config
)(void * opaque
, QEMUFile
*f
);
98 int (*load_queue
)(void * opaque
, int n
, QEMUFile
*f
);
99 int (*load_done
)(void * opaque
, QEMUFile
*f
);
100 unsigned (*get_features
)(void * opaque
);
101 bool (*query_guest_notifiers
)(void * opaque
);
102 int (*set_guest_notifiers
)(void * opaque
, bool assigned
);
103 int (*set_host_notifier
)(void * opaque
, int n
, bool assigned
);
104 void (*vmstate_change
)(void * opaque
, bool running
);
107 #define VIRTIO_PCI_QUEUE_MAX 64
109 #define VIRTIO_NO_VECTOR 0xffff
117 uint32_t guest_features
;
120 uint16_t config_vector
;
122 uint32_t (*get_features
)(VirtIODevice
*vdev
, uint32_t requested_features
);
123 uint32_t (*bad_features
)(VirtIODevice
*vdev
);
124 void (*set_features
)(VirtIODevice
*vdev
, uint32_t val
);
125 void (*get_config
)(VirtIODevice
*vdev
, uint8_t *config
);
126 void (*set_config
)(VirtIODevice
*vdev
, const uint8_t *config
);
127 void (*reset
)(VirtIODevice
*vdev
);
128 void (*set_status
)(VirtIODevice
*vdev
, uint8_t val
);
130 const VirtIOBindings
*binding
;
131 void *binding_opaque
;
134 VMChangeStateEntry
*vmstate
;
137 VirtQueue
*virtio_add_queue(VirtIODevice
*vdev
, int queue_size
,
138 void (*handle_output
)(VirtIODevice
*,
141 void virtqueue_push(VirtQueue
*vq
, const VirtQueueElement
*elem
,
143 void virtqueue_flush(VirtQueue
*vq
, unsigned int count
);
144 void virtqueue_fill(VirtQueue
*vq
, const VirtQueueElement
*elem
,
145 unsigned int len
, unsigned int idx
);
147 void virtqueue_map_sg(struct iovec
*sg
, target_phys_addr_t
*addr
,
148 size_t num_sg
, int is_write
);
149 int virtqueue_pop(VirtQueue
*vq
, VirtQueueElement
*elem
);
150 int virtqueue_avail_bytes(VirtQueue
*vq
, int in_bytes
, int out_bytes
);
152 void virtio_notify(VirtIODevice
*vdev
, VirtQueue
*vq
);
154 void virtio_save(VirtIODevice
*vdev
, QEMUFile
*f
);
156 int virtio_load(VirtIODevice
*vdev
, QEMUFile
*f
);
158 void virtio_cleanup(VirtIODevice
*vdev
);
160 void virtio_notify_config(VirtIODevice
*vdev
);
162 void virtio_queue_set_notification(VirtQueue
*vq
, int enable
);
164 int virtio_queue_ready(VirtQueue
*vq
);
166 int virtio_queue_empty(VirtQueue
*vq
);
168 /* Host binding interface. */
170 VirtIODevice
*virtio_common_init(const char *name
, uint16_t device_id
,
171 size_t config_size
, size_t struct_size
);
172 uint32_t virtio_config_readb(VirtIODevice
*vdev
, uint32_t addr
);
173 uint32_t virtio_config_readw(VirtIODevice
*vdev
, uint32_t addr
);
174 uint32_t virtio_config_readl(VirtIODevice
*vdev
, uint32_t addr
);
175 void virtio_config_writeb(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
);
176 void virtio_config_writew(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
);
177 void virtio_config_writel(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
);
178 void virtio_queue_set_addr(VirtIODevice
*vdev
, int n
, target_phys_addr_t addr
);
179 target_phys_addr_t
virtio_queue_get_addr(VirtIODevice
*vdev
, int n
);
180 int virtio_queue_get_num(VirtIODevice
*vdev
, int n
);
181 void virtio_queue_notify(VirtIODevice
*vdev
, int n
);
182 uint16_t virtio_queue_vector(VirtIODevice
*vdev
, int n
);
183 void virtio_queue_set_vector(VirtIODevice
*vdev
, int n
, uint16_t vector
);
184 void virtio_set_status(VirtIODevice
*vdev
, uint8_t val
);
185 void virtio_reset(void *opaque
);
186 void virtio_update_irq(VirtIODevice
*vdev
);
187 int virtio_set_features(VirtIODevice
*vdev
, uint32_t val
);
189 void virtio_bind_device(VirtIODevice
*vdev
, const VirtIOBindings
*binding
,
193 typedef struct VirtIOBlkConf VirtIOBlkConf
;
194 VirtIODevice
*virtio_blk_init(DeviceState
*dev
, VirtIOBlkConf
*blk
);
195 struct virtio_net_conf
;
196 VirtIODevice
*virtio_net_init(DeviceState
*dev
, NICConf
*conf
,
197 struct virtio_net_conf
*net
);
198 typedef struct virtio_serial_conf virtio_serial_conf
;
199 VirtIODevice
*virtio_serial_init(DeviceState
*dev
, virtio_serial_conf
*serial
);
200 VirtIODevice
*virtio_balloon_init(DeviceState
*dev
);
201 typedef struct VirtIOSCSIConf VirtIOSCSIConf
;
202 VirtIODevice
*virtio_scsi_init(DeviceState
*dev
, VirtIOSCSIConf
*conf
);
204 VirtIODevice
*virtio_9p_init(DeviceState
*dev
, V9fsConf
*conf
);
208 void virtio_net_exit(VirtIODevice
*vdev
);
209 void virtio_blk_exit(VirtIODevice
*vdev
);
210 void virtio_serial_exit(VirtIODevice
*vdev
);
211 void virtio_balloon_exit(VirtIODevice
*vdev
);
212 void virtio_scsi_exit(VirtIODevice
*vdev
);
214 #define DEFINE_VIRTIO_COMMON_FEATURES(_state, _field) \
215 DEFINE_PROP_BIT("indirect_desc", _state, _field, \
216 VIRTIO_RING_F_INDIRECT_DESC, true), \
217 DEFINE_PROP_BIT("event_idx", _state, _field, \
218 VIRTIO_RING_F_EVENT_IDX, true)
220 target_phys_addr_t
virtio_queue_get_desc_addr(VirtIODevice
*vdev
, int n
);
221 target_phys_addr_t
virtio_queue_get_avail_addr(VirtIODevice
*vdev
, int n
);
222 target_phys_addr_t
virtio_queue_get_used_addr(VirtIODevice
*vdev
, int n
);
223 target_phys_addr_t
virtio_queue_get_ring_addr(VirtIODevice
*vdev
, int n
);
224 target_phys_addr_t
virtio_queue_get_desc_size(VirtIODevice
*vdev
, int n
);
225 target_phys_addr_t
virtio_queue_get_avail_size(VirtIODevice
*vdev
, int n
);
226 target_phys_addr_t
virtio_queue_get_used_size(VirtIODevice
*vdev
, int n
);
227 target_phys_addr_t
virtio_queue_get_ring_size(VirtIODevice
*vdev
, int n
);
228 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice
*vdev
, int n
);
229 void virtio_queue_set_last_avail_idx(VirtIODevice
*vdev
, int n
, uint16_t idx
);
230 VirtQueue
*virtio_get_queue(VirtIODevice
*vdev
, int n
);
231 int virtio_queue_get_id(VirtQueue
*vq
);
232 EventNotifier
*virtio_queue_get_guest_notifier(VirtQueue
*vq
);
233 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue
*vq
, bool assign
,
235 EventNotifier
*virtio_queue_get_host_notifier(VirtQueue
*vq
);
236 void virtio_queue_set_host_notifier_fd_handler(VirtQueue
*vq
, bool assign
,
238 void virtio_queue_notify_vq(VirtQueue
*vq
);
239 void virtio_irq(VirtQueue
*vq
);