4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #ifndef _QEMU_VIRTIO_H
15 #define _QEMU_VIRTIO_H
20 #include "sysemu/sysemu.h"
21 #include "qemu/event_notifier.h"
26 /* from Linux's linux/virtio_config.h */
28 /* Status byte for guest to report progress, and synchronize features. */
29 /* We have seen device and processed generic fields (VIRTIO_CONFIG_F_VIRTIO) */
30 #define VIRTIO_CONFIG_S_ACKNOWLEDGE 1
31 /* We have found a driver for the device. */
32 #define VIRTIO_CONFIG_S_DRIVER 2
33 /* Driver has used its parts of the config, and is happy */
34 #define VIRTIO_CONFIG_S_DRIVER_OK 4
35 /* We've given up on this device. */
36 #define VIRTIO_CONFIG_S_FAILED 0x80
38 /* Some virtio feature bits (currently bits 28 through 31) are reserved for the
39 * transport being used (eg. virtio_ring), the rest are per-device feature bits. */
40 #define VIRTIO_TRANSPORT_F_START 28
41 #define VIRTIO_TRANSPORT_F_END 32
43 /* We notify when the ring is completely used, even if the guest is suppressing
45 #define VIRTIO_F_NOTIFY_ON_EMPTY 24
46 /* We support indirect buffer descriptors */
47 #define VIRTIO_RING_F_INDIRECT_DESC 28
48 /* The Guest publishes the used index for which it expects an interrupt
49 * at the end of the avail ring. Host should ignore the avail->flags field. */
50 /* The Host publishes the avail index for which it expects a kick
51 * at the end of the used ring. Guest should ignore the used->flags field. */
52 #define VIRTIO_RING_F_EVENT_IDX 29
53 /* A guest should never accept this. It implies negotiation is broken. */
54 #define VIRTIO_F_BAD_FEATURE 30
56 /* from Linux's linux/virtio_ring.h */
58 /* This marks a buffer as continuing via the next field. */
59 #define VRING_DESC_F_NEXT 1
60 /* This marks a buffer as write-only (otherwise read-only). */
61 #define VRING_DESC_F_WRITE 2
62 /* This means the buffer contains a list of buffer descriptors. */
63 #define VRING_DESC_F_INDIRECT 4
65 /* This means don't notify other side when buffer added. */
66 #define VRING_USED_F_NO_NOTIFY 1
67 /* This means don't interrupt guest when buffer consumed. */
68 #define VRING_AVAIL_F_NO_INTERRUPT 1
72 static inline hwaddr
vring_align(hwaddr addr
,
75 return (addr
+ align
- 1) & ~(align
- 1);
78 typedef struct VirtQueue VirtQueue
;
80 #define VIRTQUEUE_MAX_SIZE 1024
82 typedef struct VirtQueueElement
87 hwaddr in_addr
[VIRTQUEUE_MAX_SIZE
];
88 hwaddr out_addr
[VIRTQUEUE_MAX_SIZE
];
89 struct iovec in_sg
[VIRTQUEUE_MAX_SIZE
];
90 struct iovec out_sg
[VIRTQUEUE_MAX_SIZE
];
94 void (*notify
)(DeviceState
*d
, uint16_t vector
);
95 void (*save_config
)(DeviceState
*d
, QEMUFile
*f
);
96 void (*save_queue
)(DeviceState
*d
, int n
, QEMUFile
*f
);
97 int (*load_config
)(DeviceState
*d
, QEMUFile
*f
);
98 int (*load_queue
)(DeviceState
*d
, int n
, QEMUFile
*f
);
99 int (*load_done
)(DeviceState
*d
, QEMUFile
*f
);
100 unsigned (*get_features
)(DeviceState
*d
);
101 bool (*query_guest_notifiers
)(DeviceState
*d
);
102 int (*set_guest_notifiers
)(DeviceState
*d
, int nvqs
, bool assigned
);
103 int (*set_host_notifier
)(DeviceState
*d
, int n
, bool assigned
);
104 void (*vmstate_change
)(DeviceState
*d
, bool running
);
107 #define VIRTIO_PCI_QUEUE_MAX 64
109 #define VIRTIO_NO_VECTOR 0xffff
117 uint32_t guest_features
;
120 uint16_t config_vector
;
122 uint32_t (*get_features
)(VirtIODevice
*vdev
, uint32_t requested_features
);
123 uint32_t (*bad_features
)(VirtIODevice
*vdev
);
124 void (*set_features
)(VirtIODevice
*vdev
, uint32_t val
);
125 void (*get_config
)(VirtIODevice
*vdev
, uint8_t *config
);
126 void (*set_config
)(VirtIODevice
*vdev
, const uint8_t *config
);
127 void (*reset
)(VirtIODevice
*vdev
);
128 void (*set_status
)(VirtIODevice
*vdev
, uint8_t val
);
129 /* Test and clear event pending status.
130 * Should be called after unmask to avoid losing events.
131 * If backend does not support masking,
132 * must check in frontend instead.
134 bool (*guest_notifier_pending
)(VirtIODevice
*vdev
, int n
);
135 /* Mask/unmask events from this vq. Any events reported
136 * while masked will become pending.
137 * If backend does not support masking,
138 * must mask in frontend instead.
140 void (*guest_notifier_mask
)(VirtIODevice
*vdev
, int n
, bool mask
);
143 const VirtIOBindings
*binding
;
144 DeviceState
*binding_opaque
;
147 VMChangeStateEntry
*vmstate
;
150 VirtQueue
*virtio_add_queue(VirtIODevice
*vdev
, int queue_size
,
151 void (*handle_output
)(VirtIODevice
*,
154 void virtqueue_push(VirtQueue
*vq
, const VirtQueueElement
*elem
,
156 void virtqueue_flush(VirtQueue
*vq
, unsigned int count
);
157 void virtqueue_fill(VirtQueue
*vq
, const VirtQueueElement
*elem
,
158 unsigned int len
, unsigned int idx
);
160 void virtqueue_map_sg(struct iovec
*sg
, hwaddr
*addr
,
161 size_t num_sg
, int is_write
);
162 int virtqueue_pop(VirtQueue
*vq
, VirtQueueElement
*elem
);
163 int virtqueue_avail_bytes(VirtQueue
*vq
, unsigned int in_bytes
,
164 unsigned int out_bytes
);
165 void virtqueue_get_avail_bytes(VirtQueue
*vq
, unsigned int *in_bytes
,
166 unsigned int *out_bytes
,
167 unsigned max_in_bytes
, unsigned max_out_bytes
);
169 void virtio_notify(VirtIODevice
*vdev
, VirtQueue
*vq
);
171 void virtio_save(VirtIODevice
*vdev
, QEMUFile
*f
);
173 int virtio_load(VirtIODevice
*vdev
, QEMUFile
*f
);
175 void virtio_cleanup(VirtIODevice
*vdev
);
177 void virtio_notify_config(VirtIODevice
*vdev
);
179 void virtio_queue_set_notification(VirtQueue
*vq
, int enable
);
181 int virtio_queue_ready(VirtQueue
*vq
);
183 int virtio_queue_empty(VirtQueue
*vq
);
185 /* Host binding interface. */
187 VirtIODevice
*virtio_common_init(const char *name
, uint16_t device_id
,
188 size_t config_size
, size_t struct_size
);
189 uint32_t virtio_config_readb(VirtIODevice
*vdev
, uint32_t addr
);
190 uint32_t virtio_config_readw(VirtIODevice
*vdev
, uint32_t addr
);
191 uint32_t virtio_config_readl(VirtIODevice
*vdev
, uint32_t addr
);
192 void virtio_config_writeb(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
);
193 void virtio_config_writew(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
);
194 void virtio_config_writel(VirtIODevice
*vdev
, uint32_t addr
, uint32_t data
);
195 void virtio_queue_set_addr(VirtIODevice
*vdev
, int n
, hwaddr addr
);
196 hwaddr
virtio_queue_get_addr(VirtIODevice
*vdev
, int n
);
197 int virtio_queue_get_num(VirtIODevice
*vdev
, int n
);
198 void virtio_queue_notify(VirtIODevice
*vdev
, int n
);
199 uint16_t virtio_queue_vector(VirtIODevice
*vdev
, int n
);
200 void virtio_queue_set_vector(VirtIODevice
*vdev
, int n
, uint16_t vector
);
201 void virtio_set_status(VirtIODevice
*vdev
, uint8_t val
);
202 void virtio_reset(void *opaque
);
203 void virtio_update_irq(VirtIODevice
*vdev
);
204 int virtio_set_features(VirtIODevice
*vdev
, uint32_t val
);
206 void virtio_bind_device(VirtIODevice
*vdev
, const VirtIOBindings
*binding
,
207 DeviceState
*opaque
);
210 typedef struct VirtIOBlkConf VirtIOBlkConf
;
211 VirtIODevice
*virtio_blk_init(DeviceState
*dev
, VirtIOBlkConf
*blk
);
212 struct virtio_net_conf
;
213 VirtIODevice
*virtio_net_init(DeviceState
*dev
, NICConf
*conf
,
214 struct virtio_net_conf
*net
);
215 typedef struct virtio_serial_conf virtio_serial_conf
;
216 VirtIODevice
*virtio_serial_init(DeviceState
*dev
, virtio_serial_conf
*serial
);
217 VirtIODevice
*virtio_balloon_init(DeviceState
*dev
);
218 typedef struct VirtIOSCSIConf VirtIOSCSIConf
;
219 VirtIODevice
*virtio_scsi_init(DeviceState
*dev
, VirtIOSCSIConf
*conf
);
220 typedef struct VirtIORNGConf VirtIORNGConf
;
221 VirtIODevice
*virtio_rng_init(DeviceState
*dev
, VirtIORNGConf
*conf
);
223 VirtIODevice
*virtio_9p_init(DeviceState
*dev
, V9fsConf
*conf
);
227 void virtio_net_exit(VirtIODevice
*vdev
);
228 void virtio_blk_exit(VirtIODevice
*vdev
);
229 void virtio_serial_exit(VirtIODevice
*vdev
);
230 void virtio_balloon_exit(VirtIODevice
*vdev
);
231 void virtio_scsi_exit(VirtIODevice
*vdev
);
232 void virtio_rng_exit(VirtIODevice
*vdev
);
234 #define DEFINE_VIRTIO_COMMON_FEATURES(_state, _field) \
235 DEFINE_PROP_BIT("indirect_desc", _state, _field, \
236 VIRTIO_RING_F_INDIRECT_DESC, true), \
237 DEFINE_PROP_BIT("event_idx", _state, _field, \
238 VIRTIO_RING_F_EVENT_IDX, true)
240 hwaddr
virtio_queue_get_desc_addr(VirtIODevice
*vdev
, int n
);
241 hwaddr
virtio_queue_get_avail_addr(VirtIODevice
*vdev
, int n
);
242 hwaddr
virtio_queue_get_used_addr(VirtIODevice
*vdev
, int n
);
243 hwaddr
virtio_queue_get_ring_addr(VirtIODevice
*vdev
, int n
);
244 hwaddr
virtio_queue_get_desc_size(VirtIODevice
*vdev
, int n
);
245 hwaddr
virtio_queue_get_avail_size(VirtIODevice
*vdev
, int n
);
246 hwaddr
virtio_queue_get_used_size(VirtIODevice
*vdev
, int n
);
247 hwaddr
virtio_queue_get_ring_size(VirtIODevice
*vdev
, int n
);
248 uint16_t virtio_queue_get_last_avail_idx(VirtIODevice
*vdev
, int n
);
249 void virtio_queue_set_last_avail_idx(VirtIODevice
*vdev
, int n
, uint16_t idx
);
250 VirtQueue
*virtio_get_queue(VirtIODevice
*vdev
, int n
);
251 int virtio_queue_get_id(VirtQueue
*vq
);
252 EventNotifier
*virtio_queue_get_guest_notifier(VirtQueue
*vq
);
253 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue
*vq
, bool assign
,
255 EventNotifier
*virtio_queue_get_host_notifier(VirtQueue
*vq
);
256 void virtio_queue_set_host_notifier_fd_handler(VirtQueue
*vq
, bool assign
,
258 void virtio_queue_notify_vq(VirtQueue
*vq
);
259 void virtio_irq(VirtQueue
*vq
);