Merge tag 'pull-tcg-20221031-2' of https://gitlab.com/rth7680/qemu into staging
[qemu/ar7.git] / hw / virtio / virtio.c
blob808446b4c9e5b3f97183e47eefb607313cb64d3f
1 /*
2 * Virtio Support
4 * Copyright IBM, Corp. 2007
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qapi/qmp/qdict.h"
17 #include "qapi/qapi-commands-virtio.h"
18 #include "qapi/qapi-commands-qom.h"
19 #include "qapi/qapi-visit-virtio.h"
20 #include "qapi/qmp/qjson.h"
21 #include "cpu.h"
22 #include "trace.h"
23 #include "qemu/error-report.h"
24 #include "qemu/log.h"
25 #include "qemu/main-loop.h"
26 #include "qemu/module.h"
27 #include "qom/object_interfaces.h"
28 #include "hw/virtio/virtio.h"
29 #include "migration/qemu-file-types.h"
30 #include "qemu/atomic.h"
31 #include "hw/virtio/virtio-bus.h"
32 #include "hw/qdev-properties.h"
33 #include "hw/virtio/virtio-access.h"
34 #include "sysemu/dma.h"
35 #include "sysemu/runstate.h"
36 #include "standard-headers/linux/virtio_ids.h"
37 #include "standard-headers/linux/vhost_types.h"
38 #include "standard-headers/linux/virtio_blk.h"
39 #include "standard-headers/linux/virtio_console.h"
40 #include "standard-headers/linux/virtio_gpu.h"
41 #include "standard-headers/linux/virtio_net.h"
42 #include "standard-headers/linux/virtio_scsi.h"
43 #include "standard-headers/linux/virtio_i2c.h"
44 #include "standard-headers/linux/virtio_balloon.h"
45 #include "standard-headers/linux/virtio_iommu.h"
46 #include "standard-headers/linux/virtio_mem.h"
47 #include "standard-headers/linux/virtio_vsock.h"
48 #include CONFIG_DEVICES
50 /* QAPI list of realized VirtIODevices */
51 static QTAILQ_HEAD(, VirtIODevice) virtio_list;
54 * Maximum size of virtio device config space
56 #define VHOST_USER_MAX_CONFIG_SIZE 256
58 #define FEATURE_ENTRY(name, desc) (qmp_virtio_feature_map_t) \
59 { .virtio_bit = name, .feature_desc = desc }
61 enum VhostUserProtocolFeature {
62 VHOST_USER_PROTOCOL_F_MQ = 0,
63 VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
64 VHOST_USER_PROTOCOL_F_RARP = 2,
65 VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
66 VHOST_USER_PROTOCOL_F_NET_MTU = 4,
67 VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5,
68 VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
69 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
70 VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
71 VHOST_USER_PROTOCOL_F_CONFIG = 9,
72 VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10,
73 VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
74 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
75 VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13,
76 VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14,
77 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
78 VHOST_USER_PROTOCOL_F_MAX
81 /* Virtio transport features mapping */
82 static qmp_virtio_feature_map_t virtio_transport_map[] = {
83 /* Virtio device transport features */
84 #ifndef VIRTIO_CONFIG_NO_LEGACY
85 FEATURE_ENTRY(VIRTIO_F_NOTIFY_ON_EMPTY, \
86 "VIRTIO_F_NOTIFY_ON_EMPTY: Notify when device runs out of avail. "
87 "descs. on VQ"),
88 FEATURE_ENTRY(VIRTIO_F_ANY_LAYOUT, \
89 "VIRTIO_F_ANY_LAYOUT: Device accepts arbitrary desc. layouts"),
90 #endif /* !VIRTIO_CONFIG_NO_LEGACY */
91 FEATURE_ENTRY(VIRTIO_F_VERSION_1, \
92 "VIRTIO_F_VERSION_1: Device compliant for v1 spec (legacy)"),
93 FEATURE_ENTRY(VIRTIO_F_IOMMU_PLATFORM, \
94 "VIRTIO_F_IOMMU_PLATFORM: Device can be used on IOMMU platform"),
95 FEATURE_ENTRY(VIRTIO_F_RING_PACKED, \
96 "VIRTIO_F_RING_PACKED: Device supports packed VQ layout"),
97 FEATURE_ENTRY(VIRTIO_F_IN_ORDER, \
98 "VIRTIO_F_IN_ORDER: Device uses buffers in same order as made "
99 "available by driver"),
100 FEATURE_ENTRY(VIRTIO_F_ORDER_PLATFORM, \
101 "VIRTIO_F_ORDER_PLATFORM: Memory accesses ordered by platform"),
102 FEATURE_ENTRY(VIRTIO_F_SR_IOV, \
103 "VIRTIO_F_SR_IOV: Device supports single root I/O virtualization"),
104 /* Virtio ring transport features */
105 FEATURE_ENTRY(VIRTIO_RING_F_INDIRECT_DESC, \
106 "VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported"),
107 FEATURE_ENTRY(VIRTIO_RING_F_EVENT_IDX, \
108 "VIRTIO_RING_F_EVENT_IDX: Used & avail. event fields enabled"),
109 { -1, "" }
112 /* Vhost-user protocol features mapping */
113 static qmp_virtio_feature_map_t vhost_user_protocol_map[] = {
114 FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_MQ, \
115 "VHOST_USER_PROTOCOL_F_MQ: Multiqueue protocol supported"),
116 FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_LOG_SHMFD, \
117 "VHOST_USER_PROTOCOL_F_LOG_SHMFD: Shared log memory fd supported"),
118 FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_RARP, \
119 "VHOST_USER_PROTOCOL_F_RARP: Vhost-user back-end RARP broadcasting "
120 "supported"),
121 FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_REPLY_ACK, \
122 "VHOST_USER_PROTOCOL_F_REPLY_ACK: Requested operation status ack. "
123 "supported"),
124 FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_NET_MTU, \
125 "VHOST_USER_PROTOCOL_F_NET_MTU: Expose host MTU to guest supported"),
126 FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_SLAVE_REQ, \
127 "VHOST_USER_PROTOCOL_F_SLAVE_REQ: Socket fd for back-end initiated "
128 "requests supported"),
129 FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CROSS_ENDIAN, \
130 "VHOST_USER_PROTOCOL_F_CROSS_ENDIAN: Endianness of VQs for legacy "
131 "devices supported"),
132 FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CRYPTO_SESSION, \
133 "VHOST_USER_PROTOCOL_F_CRYPTO_SESSION: Session creation for crypto "
134 "operations supported"),
135 FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_PAGEFAULT, \
136 "VHOST_USER_PROTOCOL_F_PAGEFAULT: Request servicing on userfaultfd "
137 "for accessed pages supported"),
138 FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CONFIG, \
139 "VHOST_USER_PROTOCOL_F_CONFIG: Vhost-user messaging for virtio "
140 "device configuration space supported"),
141 FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD, \
142 "VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD: Slave fd communication "
143 "channel supported"),
144 FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_HOST_NOTIFIER, \
145 "VHOST_USER_PROTOCOL_F_HOST_NOTIFIER: Host notifiers for specified "
146 "VQs supported"),
147 FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD, \
148 "VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD: Shared inflight I/O buffers "
149 "supported"),
150 FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_RESET_DEVICE, \
151 "VHOST_USER_PROTOCOL_F_RESET_DEVICE: Disabling all rings and "
152 "resetting internal device state supported"),
153 FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS, \
154 "VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS: In-band messaging "
155 "supported"),
156 FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS, \
157 "VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS: Configuration for "
158 "memory slots supported"),
159 { -1, "" }
162 /* virtio device configuration statuses */
163 static qmp_virtio_feature_map_t virtio_config_status_map[] = {
164 FEATURE_ENTRY(VIRTIO_CONFIG_S_DRIVER_OK, \
165 "VIRTIO_CONFIG_S_DRIVER_OK: Driver setup and ready"),
166 FEATURE_ENTRY(VIRTIO_CONFIG_S_FEATURES_OK, \
167 "VIRTIO_CONFIG_S_FEATURES_OK: Feature negotiation complete"),
168 FEATURE_ENTRY(VIRTIO_CONFIG_S_DRIVER, \
169 "VIRTIO_CONFIG_S_DRIVER: Guest OS compatible with device"),
170 FEATURE_ENTRY(VIRTIO_CONFIG_S_NEEDS_RESET, \
171 "VIRTIO_CONFIG_S_NEEDS_RESET: Irrecoverable error, device needs "
172 "reset"),
173 FEATURE_ENTRY(VIRTIO_CONFIG_S_FAILED, \
174 "VIRTIO_CONFIG_S_FAILED: Error in guest, device failed"),
175 FEATURE_ENTRY(VIRTIO_CONFIG_S_ACKNOWLEDGE, \
176 "VIRTIO_CONFIG_S_ACKNOWLEDGE: Valid virtio device found"),
177 { -1, "" }
180 /* virtio-blk features mapping */
181 qmp_virtio_feature_map_t virtio_blk_feature_map[] = {
182 FEATURE_ENTRY(VIRTIO_BLK_F_SIZE_MAX, \
183 "VIRTIO_BLK_F_SIZE_MAX: Max segment size is size_max"),
184 FEATURE_ENTRY(VIRTIO_BLK_F_SEG_MAX, \
185 "VIRTIO_BLK_F_SEG_MAX: Max segments in a request is seg_max"),
186 FEATURE_ENTRY(VIRTIO_BLK_F_GEOMETRY, \
187 "VIRTIO_BLK_F_GEOMETRY: Legacy geometry available"),
188 FEATURE_ENTRY(VIRTIO_BLK_F_RO, \
189 "VIRTIO_BLK_F_RO: Device is read-only"),
190 FEATURE_ENTRY(VIRTIO_BLK_F_BLK_SIZE, \
191 "VIRTIO_BLK_F_BLK_SIZE: Block size of disk available"),
192 FEATURE_ENTRY(VIRTIO_BLK_F_TOPOLOGY, \
193 "VIRTIO_BLK_F_TOPOLOGY: Topology information available"),
194 FEATURE_ENTRY(VIRTIO_BLK_F_MQ, \
195 "VIRTIO_BLK_F_MQ: Multiqueue supported"),
196 FEATURE_ENTRY(VIRTIO_BLK_F_DISCARD, \
197 "VIRTIO_BLK_F_DISCARD: Discard command supported"),
198 FEATURE_ENTRY(VIRTIO_BLK_F_WRITE_ZEROES, \
199 "VIRTIO_BLK_F_WRITE_ZEROES: Write zeroes command supported"),
200 #ifndef VIRTIO_BLK_NO_LEGACY
201 FEATURE_ENTRY(VIRTIO_BLK_F_BARRIER, \
202 "VIRTIO_BLK_F_BARRIER: Request barriers supported"),
203 FEATURE_ENTRY(VIRTIO_BLK_F_SCSI, \
204 "VIRTIO_BLK_F_SCSI: SCSI packet commands supported"),
205 FEATURE_ENTRY(VIRTIO_BLK_F_FLUSH, \
206 "VIRTIO_BLK_F_FLUSH: Flush command supported"),
207 FEATURE_ENTRY(VIRTIO_BLK_F_CONFIG_WCE, \
208 "VIRTIO_BLK_F_CONFIG_WCE: Cache writeback and writethrough modes "
209 "supported"),
210 #endif /* !VIRTIO_BLK_NO_LEGACY */
211 FEATURE_ENTRY(VHOST_F_LOG_ALL, \
212 "VHOST_F_LOG_ALL: Logging write descriptors supported"),
213 FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
214 "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
215 "negotiation supported"),
216 { -1, "" }
219 /* virtio-serial features mapping */
220 qmp_virtio_feature_map_t virtio_serial_feature_map[] = {
221 FEATURE_ENTRY(VIRTIO_CONSOLE_F_SIZE, \
222 "VIRTIO_CONSOLE_F_SIZE: Host providing console size"),
223 FEATURE_ENTRY(VIRTIO_CONSOLE_F_MULTIPORT, \
224 "VIRTIO_CONSOLE_F_MULTIPORT: Multiple ports for device supported"),
225 FEATURE_ENTRY(VIRTIO_CONSOLE_F_EMERG_WRITE, \
226 "VIRTIO_CONSOLE_F_EMERG_WRITE: Emergency write supported"),
227 { -1, "" }
230 /* virtio-gpu features mapping */
231 qmp_virtio_feature_map_t virtio_gpu_feature_map[] = {
232 FEATURE_ENTRY(VIRTIO_GPU_F_VIRGL, \
233 "VIRTIO_GPU_F_VIRGL: Virgl 3D mode supported"),
234 FEATURE_ENTRY(VIRTIO_GPU_F_EDID, \
235 "VIRTIO_GPU_F_EDID: EDID metadata supported"),
236 FEATURE_ENTRY(VIRTIO_GPU_F_RESOURCE_UUID, \
237 "VIRTIO_GPU_F_RESOURCE_UUID: Resource UUID assigning supported"),
238 FEATURE_ENTRY(VIRTIO_GPU_F_RESOURCE_BLOB, \
239 "VIRTIO_GPU_F_RESOURCE_BLOB: Size-based blob resources supported"),
240 FEATURE_ENTRY(VIRTIO_GPU_F_CONTEXT_INIT, \
241 "VIRTIO_GPU_F_CONTEXT_INIT: Context types and synchronization "
242 "timelines supported"),
243 FEATURE_ENTRY(VHOST_F_LOG_ALL, \
244 "VHOST_F_LOG_ALL: Logging write descriptors supported"),
245 FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
246 "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
247 "negotiation supported"),
248 { -1, "" }
251 /* virtio-input features mapping */
252 qmp_virtio_feature_map_t virtio_input_feature_map[] = {
253 FEATURE_ENTRY(VHOST_F_LOG_ALL, \
254 "VHOST_F_LOG_ALL: Logging write descriptors supported"),
255 FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
256 "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
257 "negotiation supported"),
258 { -1, "" }
261 /* virtio-net features mapping */
262 qmp_virtio_feature_map_t virtio_net_feature_map[] = {
263 FEATURE_ENTRY(VIRTIO_NET_F_CSUM, \
264 "VIRTIO_NET_F_CSUM: Device handling packets with partial checksum "
265 "supported"),
266 FEATURE_ENTRY(VIRTIO_NET_F_GUEST_CSUM, \
267 "VIRTIO_NET_F_GUEST_CSUM: Driver handling packets with partial "
268 "checksum supported"),
269 FEATURE_ENTRY(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
270 "VIRTIO_NET_F_CTRL_GUEST_OFFLOADS: Control channel offloading "
271 "reconfig. supported"),
272 FEATURE_ENTRY(VIRTIO_NET_F_MTU, \
273 "VIRTIO_NET_F_MTU: Device max MTU reporting supported"),
274 FEATURE_ENTRY(VIRTIO_NET_F_MAC, \
275 "VIRTIO_NET_F_MAC: Device has given MAC address"),
276 FEATURE_ENTRY(VIRTIO_NET_F_GUEST_TSO4, \
277 "VIRTIO_NET_F_GUEST_TSO4: Driver can receive TSOv4"),
278 FEATURE_ENTRY(VIRTIO_NET_F_GUEST_TSO6, \
279 "VIRTIO_NET_F_GUEST_TSO6: Driver can receive TSOv6"),
280 FEATURE_ENTRY(VIRTIO_NET_F_GUEST_ECN, \
281 "VIRTIO_NET_F_GUEST_ECN: Driver can receive TSO with ECN"),
282 FEATURE_ENTRY(VIRTIO_NET_F_GUEST_UFO, \
283 "VIRTIO_NET_F_GUEST_UFO: Driver can receive UFO"),
284 FEATURE_ENTRY(VIRTIO_NET_F_HOST_TSO4, \
285 "VIRTIO_NET_F_HOST_TSO4: Device can receive TSOv4"),
286 FEATURE_ENTRY(VIRTIO_NET_F_HOST_TSO6, \
287 "VIRTIO_NET_F_HOST_TSO6: Device can receive TSOv6"),
288 FEATURE_ENTRY(VIRTIO_NET_F_HOST_ECN, \
289 "VIRTIO_NET_F_HOST_ECN: Device can receive TSO with ECN"),
290 FEATURE_ENTRY(VIRTIO_NET_F_HOST_UFO, \
291 "VIRTIO_NET_F_HOST_UFO: Device can receive UFO"),
292 FEATURE_ENTRY(VIRTIO_NET_F_MRG_RXBUF, \
293 "VIRTIO_NET_F_MRG_RXBUF: Driver can merge receive buffers"),
294 FEATURE_ENTRY(VIRTIO_NET_F_STATUS, \
295 "VIRTIO_NET_F_STATUS: Configuration status field available"),
296 FEATURE_ENTRY(VIRTIO_NET_F_CTRL_VQ, \
297 "VIRTIO_NET_F_CTRL_VQ: Control channel available"),
298 FEATURE_ENTRY(VIRTIO_NET_F_CTRL_RX, \
299 "VIRTIO_NET_F_CTRL_RX: Control channel RX mode supported"),
300 FEATURE_ENTRY(VIRTIO_NET_F_CTRL_VLAN, \
301 "VIRTIO_NET_F_CTRL_VLAN: Control channel VLAN filtering supported"),
302 FEATURE_ENTRY(VIRTIO_NET_F_CTRL_RX_EXTRA, \
303 "VIRTIO_NET_F_CTRL_RX_EXTRA: Extra RX mode control supported"),
304 FEATURE_ENTRY(VIRTIO_NET_F_GUEST_ANNOUNCE, \
305 "VIRTIO_NET_F_GUEST_ANNOUNCE: Driver sending gratuitous packets "
306 "supported"),
307 FEATURE_ENTRY(VIRTIO_NET_F_MQ, \
308 "VIRTIO_NET_F_MQ: Multiqueue with automatic receive steering "
309 "supported"),
310 FEATURE_ENTRY(VIRTIO_NET_F_CTRL_MAC_ADDR, \
311 "VIRTIO_NET_F_CTRL_MAC_ADDR: MAC address set through control "
312 "channel"),
313 FEATURE_ENTRY(VIRTIO_NET_F_HASH_REPORT, \
314 "VIRTIO_NET_F_HASH_REPORT: Hash reporting supported"),
315 FEATURE_ENTRY(VIRTIO_NET_F_RSS, \
316 "VIRTIO_NET_F_RSS: RSS RX steering supported"),
317 FEATURE_ENTRY(VIRTIO_NET_F_RSC_EXT, \
318 "VIRTIO_NET_F_RSC_EXT: Extended coalescing info supported"),
319 FEATURE_ENTRY(VIRTIO_NET_F_STANDBY, \
320 "VIRTIO_NET_F_STANDBY: Device acting as standby for primary "
321 "device with same MAC addr. supported"),
322 FEATURE_ENTRY(VIRTIO_NET_F_SPEED_DUPLEX, \
323 "VIRTIO_NET_F_SPEED_DUPLEX: Device set linkspeed and duplex"),
324 #ifndef VIRTIO_NET_NO_LEGACY
325 FEATURE_ENTRY(VIRTIO_NET_F_GSO, \
326 "VIRTIO_NET_F_GSO: Handling GSO-type packets supported"),
327 #endif /* !VIRTIO_NET_NO_LEGACY */
328 FEATURE_ENTRY(VHOST_NET_F_VIRTIO_NET_HDR, \
329 "VHOST_NET_F_VIRTIO_NET_HDR: Virtio-net headers for RX and TX "
330 "packets supported"),
331 FEATURE_ENTRY(VHOST_F_LOG_ALL, \
332 "VHOST_F_LOG_ALL: Logging write descriptors supported"),
333 FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
334 "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
335 "negotiation supported"),
336 { -1, "" }
339 /* virtio-scsi features mapping */
340 qmp_virtio_feature_map_t virtio_scsi_feature_map[] = {
341 FEATURE_ENTRY(VIRTIO_SCSI_F_INOUT, \
342 "VIRTIO_SCSI_F_INOUT: Requests including read and writable data "
343 "buffers suppoted"),
344 FEATURE_ENTRY(VIRTIO_SCSI_F_HOTPLUG, \
345 "VIRTIO_SCSI_F_HOTPLUG: Reporting and handling hot-plug events "
346 "supported"),
347 FEATURE_ENTRY(VIRTIO_SCSI_F_CHANGE, \
348 "VIRTIO_SCSI_F_CHANGE: Reporting and handling LUN changes "
349 "supported"),
350 FEATURE_ENTRY(VIRTIO_SCSI_F_T10_PI, \
351 "VIRTIO_SCSI_F_T10_PI: T10 info included in request header"),
352 FEATURE_ENTRY(VHOST_F_LOG_ALL, \
353 "VHOST_F_LOG_ALL: Logging write descriptors supported"),
354 FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
355 "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
356 "negotiation supported"),
357 { -1, "" }
360 /* virtio/vhost-user-fs features mapping */
361 qmp_virtio_feature_map_t virtio_fs_feature_map[] = {
362 FEATURE_ENTRY(VHOST_F_LOG_ALL, \
363 "VHOST_F_LOG_ALL: Logging write descriptors supported"),
364 FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
365 "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
366 "negotiation supported"),
367 { -1, "" }
370 /* virtio/vhost-user-i2c features mapping */
371 qmp_virtio_feature_map_t virtio_i2c_feature_map[] = {
372 FEATURE_ENTRY(VIRTIO_I2C_F_ZERO_LENGTH_REQUEST, \
373 "VIRTIO_I2C_F_ZERO_LEGNTH_REQUEST: Zero length requests supported"),
374 FEATURE_ENTRY(VHOST_F_LOG_ALL, \
375 "VHOST_F_LOG_ALL: Logging write descriptors supported"),
376 FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
377 "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
378 "negotiation supported"),
379 { -1, "" }
382 /* virtio/vhost-vsock features mapping */
383 qmp_virtio_feature_map_t virtio_vsock_feature_map[] = {
384 FEATURE_ENTRY(VIRTIO_VSOCK_F_SEQPACKET, \
385 "VIRTIO_VSOCK_F_SEQPACKET: SOCK_SEQPACKET supported"),
386 FEATURE_ENTRY(VHOST_F_LOG_ALL, \
387 "VHOST_F_LOG_ALL: Logging write descriptors supported"),
388 FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
389 "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
390 "negotiation supported"),
391 { -1, "" }
394 /* virtio-balloon features mapping */
395 qmp_virtio_feature_map_t virtio_balloon_feature_map[] = {
396 FEATURE_ENTRY(VIRTIO_BALLOON_F_MUST_TELL_HOST, \
397 "VIRTIO_BALLOON_F_MUST_TELL_HOST: Tell host before reclaiming "
398 "pages"),
399 FEATURE_ENTRY(VIRTIO_BALLOON_F_STATS_VQ, \
400 "VIRTIO_BALLOON_F_STATS_VQ: Guest memory stats VQ available"),
401 FEATURE_ENTRY(VIRTIO_BALLOON_F_DEFLATE_ON_OOM, \
402 "VIRTIO_BALLOON_F_DEFLATE_ON_OOM: Deflate balloon when guest OOM"),
403 FEATURE_ENTRY(VIRTIO_BALLOON_F_FREE_PAGE_HINT, \
404 "VIRTIO_BALLOON_F_FREE_PAGE_HINT: VQ reporting free pages enabled"),
405 FEATURE_ENTRY(VIRTIO_BALLOON_F_PAGE_POISON, \
406 "VIRTIO_BALLOON_F_PAGE_POISON: Guest page poisoning enabled"),
407 FEATURE_ENTRY(VIRTIO_BALLOON_F_REPORTING, \
408 "VIRTIO_BALLOON_F_REPORTING: Page reporting VQ enabled"),
409 { -1, "" }
412 /* virtio-crypto features mapping */
413 qmp_virtio_feature_map_t virtio_crypto_feature_map[] = {
414 FEATURE_ENTRY(VHOST_F_LOG_ALL, \
415 "VHOST_F_LOG_ALL: Logging write descriptors supported"),
416 { -1, "" }
419 /* virtio-iommu features mapping */
420 qmp_virtio_feature_map_t virtio_iommu_feature_map[] = {
421 FEATURE_ENTRY(VIRTIO_IOMMU_F_INPUT_RANGE, \
422 "VIRTIO_IOMMU_F_INPUT_RANGE: Range of available virtual addrs. "
423 "available"),
424 FEATURE_ENTRY(VIRTIO_IOMMU_F_DOMAIN_RANGE, \
425 "VIRTIO_IOMMU_F_DOMAIN_RANGE: Number of supported domains "
426 "available"),
427 FEATURE_ENTRY(VIRTIO_IOMMU_F_MAP_UNMAP, \
428 "VIRTIO_IOMMU_F_MAP_UNMAP: Map and unmap requests available"),
429 FEATURE_ENTRY(VIRTIO_IOMMU_F_BYPASS, \
430 "VIRTIO_IOMMU_F_BYPASS: Endpoints not attached to domains are in "
431 "bypass mode"),
432 FEATURE_ENTRY(VIRTIO_IOMMU_F_PROBE, \
433 "VIRTIO_IOMMU_F_PROBE: Probe requests available"),
434 FEATURE_ENTRY(VIRTIO_IOMMU_F_MMIO, \
435 "VIRTIO_IOMMU_F_MMIO: VIRTIO_IOMMU_MAP_F_MMIO flag available"),
436 FEATURE_ENTRY(VIRTIO_IOMMU_F_BYPASS_CONFIG, \
437 "VIRTIO_IOMMU_F_BYPASS_CONFIG: Bypass field of IOMMU config "
438 "available"),
439 { -1, "" }
442 /* virtio-mem features mapping */
443 qmp_virtio_feature_map_t virtio_mem_feature_map[] = {
444 #ifndef CONFIG_ACPI
445 FEATURE_ENTRY(VIRTIO_MEM_F_ACPI_PXM, \
446 "VIRTIO_MEM_F_ACPI_PXM: node_id is an ACPI PXM and is valid"),
447 #endif /* !CONFIG_ACPI */
448 FEATURE_ENTRY(VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE, \
449 "VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE: Unplugged memory cannot be "
450 "accessed"),
451 { -1, "" }
454 /* virtio-rng features mapping */
455 qmp_virtio_feature_map_t virtio_rng_feature_map[] = {
456 FEATURE_ENTRY(VHOST_F_LOG_ALL, \
457 "VHOST_F_LOG_ALL: Logging write descriptors supported"),
458 FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
459 "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
460 "negotiation supported"),
461 { -1, "" }
465 * The alignment to use between consumer and producer parts of vring.
466 * x86 pagesize again. This is the default, used by transports like PCI
467 * which don't provide a means for the guest to tell the host the alignment.
469 #define VIRTIO_PCI_VRING_ALIGN 4096
471 typedef struct VRingDesc
473 uint64_t addr;
474 uint32_t len;
475 uint16_t flags;
476 uint16_t next;
477 } VRingDesc;
479 typedef struct VRingPackedDesc {
480 uint64_t addr;
481 uint32_t len;
482 uint16_t id;
483 uint16_t flags;
484 } VRingPackedDesc;
486 typedef struct VRingAvail
488 uint16_t flags;
489 uint16_t idx;
490 uint16_t ring[];
491 } VRingAvail;
493 typedef struct VRingUsedElem
495 uint32_t id;
496 uint32_t len;
497 } VRingUsedElem;
499 typedef struct VRingUsed
501 uint16_t flags;
502 uint16_t idx;
503 VRingUsedElem ring[];
504 } VRingUsed;
506 typedef struct VRingMemoryRegionCaches {
507 struct rcu_head rcu;
508 MemoryRegionCache desc;
509 MemoryRegionCache avail;
510 MemoryRegionCache used;
511 } VRingMemoryRegionCaches;
513 typedef struct VRing
515 unsigned int num;
516 unsigned int num_default;
517 unsigned int align;
518 hwaddr desc;
519 hwaddr avail;
520 hwaddr used;
521 VRingMemoryRegionCaches *caches;
522 } VRing;
524 typedef struct VRingPackedDescEvent {
525 uint16_t off_wrap;
526 uint16_t flags;
527 } VRingPackedDescEvent ;
529 struct VirtQueue
531 VRing vring;
532 VirtQueueElement *used_elems;
534 /* Next head to pop */
535 uint16_t last_avail_idx;
536 bool last_avail_wrap_counter;
538 /* Last avail_idx read from VQ. */
539 uint16_t shadow_avail_idx;
540 bool shadow_avail_wrap_counter;
542 uint16_t used_idx;
543 bool used_wrap_counter;
545 /* Last used index value we have signalled on */
546 uint16_t signalled_used;
548 /* Last used index value we have signalled on */
549 bool signalled_used_valid;
551 /* Notification enabled? */
552 bool notification;
554 uint16_t queue_index;
556 unsigned int inuse;
558 uint16_t vector;
559 VirtIOHandleOutput handle_output;
560 VirtIODevice *vdev;
561 EventNotifier guest_notifier;
562 EventNotifier host_notifier;
563 bool host_notifier_enabled;
564 QLIST_ENTRY(VirtQueue) node;
567 const char *virtio_device_names[] = {
568 [VIRTIO_ID_NET] = "virtio-net",
569 [VIRTIO_ID_BLOCK] = "virtio-blk",
570 [VIRTIO_ID_CONSOLE] = "virtio-serial",
571 [VIRTIO_ID_RNG] = "virtio-rng",
572 [VIRTIO_ID_BALLOON] = "virtio-balloon",
573 [VIRTIO_ID_IOMEM] = "virtio-iomem",
574 [VIRTIO_ID_RPMSG] = "virtio-rpmsg",
575 [VIRTIO_ID_SCSI] = "virtio-scsi",
576 [VIRTIO_ID_9P] = "virtio-9p",
577 [VIRTIO_ID_MAC80211_WLAN] = "virtio-mac-wlan",
578 [VIRTIO_ID_RPROC_SERIAL] = "virtio-rproc-serial",
579 [VIRTIO_ID_CAIF] = "virtio-caif",
580 [VIRTIO_ID_MEMORY_BALLOON] = "virtio-mem-balloon",
581 [VIRTIO_ID_GPU] = "virtio-gpu",
582 [VIRTIO_ID_CLOCK] = "virtio-clk",
583 [VIRTIO_ID_INPUT] = "virtio-input",
584 [VIRTIO_ID_VSOCK] = "vhost-vsock",
585 [VIRTIO_ID_CRYPTO] = "virtio-crypto",
586 [VIRTIO_ID_SIGNAL_DIST] = "virtio-signal",
587 [VIRTIO_ID_PSTORE] = "virtio-pstore",
588 [VIRTIO_ID_IOMMU] = "virtio-iommu",
589 [VIRTIO_ID_MEM] = "virtio-mem",
590 [VIRTIO_ID_SOUND] = "virtio-sound",
591 [VIRTIO_ID_FS] = "virtio-user-fs",
592 [VIRTIO_ID_PMEM] = "virtio-pmem",
593 [VIRTIO_ID_RPMB] = "virtio-rpmb",
594 [VIRTIO_ID_MAC80211_HWSIM] = "virtio-mac-hwsim",
595 [VIRTIO_ID_VIDEO_ENCODER] = "virtio-vid-encoder",
596 [VIRTIO_ID_VIDEO_DECODER] = "virtio-vid-decoder",
597 [VIRTIO_ID_SCMI] = "virtio-scmi",
598 [VIRTIO_ID_NITRO_SEC_MOD] = "virtio-nitro-sec-mod",
599 [VIRTIO_ID_I2C_ADAPTER] = "vhost-user-i2c",
600 [VIRTIO_ID_WATCHDOG] = "virtio-watchdog",
601 [VIRTIO_ID_CAN] = "virtio-can",
602 [VIRTIO_ID_DMABUF] = "virtio-dmabuf",
603 [VIRTIO_ID_PARAM_SERV] = "virtio-param-serv",
604 [VIRTIO_ID_AUDIO_POLICY] = "virtio-audio-pol",
605 [VIRTIO_ID_BT] = "virtio-bluetooth",
606 [VIRTIO_ID_GPIO] = "virtio-gpio"
609 static const char *virtio_id_to_name(uint16_t device_id)
611 assert(device_id < G_N_ELEMENTS(virtio_device_names));
612 const char *name = virtio_device_names[device_id];
613 assert(name != NULL);
614 return name;
617 /* Called within call_rcu(). */
618 static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
620 assert(caches != NULL);
621 address_space_cache_destroy(&caches->desc);
622 address_space_cache_destroy(&caches->avail);
623 address_space_cache_destroy(&caches->used);
624 g_free(caches);
627 static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
629 VRingMemoryRegionCaches *caches;
631 caches = qatomic_read(&vq->vring.caches);
632 qatomic_rcu_set(&vq->vring.caches, NULL);
633 if (caches) {
634 call_rcu(caches, virtio_free_region_cache, rcu);
638 static void virtio_init_region_cache(VirtIODevice *vdev, int n)
640 VirtQueue *vq = &vdev->vq[n];
641 VRingMemoryRegionCaches *old = vq->vring.caches;
642 VRingMemoryRegionCaches *new = NULL;
643 hwaddr addr, size;
644 int64_t len;
645 bool packed;
648 addr = vq->vring.desc;
649 if (!addr) {
650 goto out_no_cache;
652 new = g_new0(VRingMemoryRegionCaches, 1);
653 size = virtio_queue_get_desc_size(vdev, n);
654 packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
655 true : false;
656 len = address_space_cache_init(&new->desc, vdev->dma_as,
657 addr, size, packed);
658 if (len < size) {
659 virtio_error(vdev, "Cannot map desc");
660 goto err_desc;
663 size = virtio_queue_get_used_size(vdev, n);
664 len = address_space_cache_init(&new->used, vdev->dma_as,
665 vq->vring.used, size, true);
666 if (len < size) {
667 virtio_error(vdev, "Cannot map used");
668 goto err_used;
671 size = virtio_queue_get_avail_size(vdev, n);
672 len = address_space_cache_init(&new->avail, vdev->dma_as,
673 vq->vring.avail, size, false);
674 if (len < size) {
675 virtio_error(vdev, "Cannot map avail");
676 goto err_avail;
679 qatomic_rcu_set(&vq->vring.caches, new);
680 if (old) {
681 call_rcu(old, virtio_free_region_cache, rcu);
683 return;
685 err_avail:
686 address_space_cache_destroy(&new->avail);
687 err_used:
688 address_space_cache_destroy(&new->used);
689 err_desc:
690 address_space_cache_destroy(&new->desc);
691 out_no_cache:
692 g_free(new);
693 virtio_virtqueue_reset_region_cache(vq);
696 /* virt queue functions */
697 void virtio_queue_update_rings(VirtIODevice *vdev, int n)
699 VRing *vring = &vdev->vq[n].vring;
701 if (!vring->num || !vring->desc || !vring->align) {
702 /* not yet setup -> nothing to do */
703 return;
705 vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
706 vring->used = vring_align(vring->avail +
707 offsetof(VRingAvail, ring[vring->num]),
708 vring->align);
709 virtio_init_region_cache(vdev, n);
712 /* Called within rcu_read_lock(). */
713 static void vring_split_desc_read(VirtIODevice *vdev, VRingDesc *desc,
714 MemoryRegionCache *cache, int i)
716 address_space_read_cached(cache, i * sizeof(VRingDesc),
717 desc, sizeof(VRingDesc));
718 virtio_tswap64s(vdev, &desc->addr);
719 virtio_tswap32s(vdev, &desc->len);
720 virtio_tswap16s(vdev, &desc->flags);
721 virtio_tswap16s(vdev, &desc->next);
724 static void vring_packed_event_read(VirtIODevice *vdev,
725 MemoryRegionCache *cache,
726 VRingPackedDescEvent *e)
728 hwaddr off_off = offsetof(VRingPackedDescEvent, off_wrap);
729 hwaddr off_flags = offsetof(VRingPackedDescEvent, flags);
731 e->flags = virtio_lduw_phys_cached(vdev, cache, off_flags);
732 /* Make sure flags is seen before off_wrap */
733 smp_rmb();
734 e->off_wrap = virtio_lduw_phys_cached(vdev, cache, off_off);
735 virtio_tswap16s(vdev, &e->flags);
738 static void vring_packed_off_wrap_write(VirtIODevice *vdev,
739 MemoryRegionCache *cache,
740 uint16_t off_wrap)
742 hwaddr off = offsetof(VRingPackedDescEvent, off_wrap);
744 virtio_stw_phys_cached(vdev, cache, off, off_wrap);
745 address_space_cache_invalidate(cache, off, sizeof(off_wrap));
748 static void vring_packed_flags_write(VirtIODevice *vdev,
749 MemoryRegionCache *cache, uint16_t flags)
751 hwaddr off = offsetof(VRingPackedDescEvent, flags);
753 virtio_stw_phys_cached(vdev, cache, off, flags);
754 address_space_cache_invalidate(cache, off, sizeof(flags));
757 /* Called within rcu_read_lock(). */
758 static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
760 return qatomic_rcu_read(&vq->vring.caches);
763 /* Called within rcu_read_lock(). */
764 static inline uint16_t vring_avail_flags(VirtQueue *vq)
766 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
767 hwaddr pa = offsetof(VRingAvail, flags);
769 if (!caches) {
770 return 0;
773 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
776 /* Called within rcu_read_lock(). */
777 static inline uint16_t vring_avail_idx(VirtQueue *vq)
779 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
780 hwaddr pa = offsetof(VRingAvail, idx);
782 if (!caches) {
783 return 0;
786 vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
787 return vq->shadow_avail_idx;
790 /* Called within rcu_read_lock(). */
791 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
793 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
794 hwaddr pa = offsetof(VRingAvail, ring[i]);
796 if (!caches) {
797 return 0;
800 return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
803 /* Called within rcu_read_lock(). */
804 static inline uint16_t vring_get_used_event(VirtQueue *vq)
806 return vring_avail_ring(vq, vq->vring.num);
809 /* Called within rcu_read_lock(). */
810 static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
811 int i)
813 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
814 hwaddr pa = offsetof(VRingUsed, ring[i]);
816 if (!caches) {
817 return;
820 virtio_tswap32s(vq->vdev, &uelem->id);
821 virtio_tswap32s(vq->vdev, &uelem->len);
822 address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
823 address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem));
826 /* Called within rcu_read_lock(). */
827 static inline uint16_t vring_used_flags(VirtQueue *vq)
829 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
830 hwaddr pa = offsetof(VRingUsed, flags);
832 if (!caches) {
833 return 0;
836 return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
839 /* Called within rcu_read_lock(). */
840 static uint16_t vring_used_idx(VirtQueue *vq)
842 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
843 hwaddr pa = offsetof(VRingUsed, idx);
845 if (!caches) {
846 return 0;
849 return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
852 /* Called within rcu_read_lock(). */
853 static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
855 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
856 hwaddr pa = offsetof(VRingUsed, idx);
858 if (caches) {
859 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
860 address_space_cache_invalidate(&caches->used, pa, sizeof(val));
863 vq->used_idx = val;
866 /* Called within rcu_read_lock(). */
867 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
869 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
870 VirtIODevice *vdev = vq->vdev;
871 hwaddr pa = offsetof(VRingUsed, flags);
872 uint16_t flags;
874 if (!caches) {
875 return;
878 flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
879 virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
880 address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
883 /* Called within rcu_read_lock(). */
884 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
886 VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
887 VirtIODevice *vdev = vq->vdev;
888 hwaddr pa = offsetof(VRingUsed, flags);
889 uint16_t flags;
891 if (!caches) {
892 return;
895 flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
896 virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
897 address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
900 /* Called within rcu_read_lock(). */
901 static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
903 VRingMemoryRegionCaches *caches;
904 hwaddr pa;
905 if (!vq->notification) {
906 return;
909 caches = vring_get_region_caches(vq);
910 if (!caches) {
911 return;
914 pa = offsetof(VRingUsed, ring[vq->vring.num]);
915 virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
916 address_space_cache_invalidate(&caches->used, pa, sizeof(val));
919 static void virtio_queue_split_set_notification(VirtQueue *vq, int enable)
921 RCU_READ_LOCK_GUARD();
923 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
924 vring_set_avail_event(vq, vring_avail_idx(vq));
925 } else if (enable) {
926 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
927 } else {
928 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
930 if (enable) {
931 /* Expose avail event/used flags before caller checks the avail idx. */
932 smp_mb();
936 static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable)
938 uint16_t off_wrap;
939 VRingPackedDescEvent e;
940 VRingMemoryRegionCaches *caches;
942 RCU_READ_LOCK_GUARD();
943 caches = vring_get_region_caches(vq);
944 if (!caches) {
945 return;
948 vring_packed_event_read(vq->vdev, &caches->used, &e);
950 if (!enable) {
951 e.flags = VRING_PACKED_EVENT_FLAG_DISABLE;
952 } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
953 off_wrap = vq->shadow_avail_idx | vq->shadow_avail_wrap_counter << 15;
954 vring_packed_off_wrap_write(vq->vdev, &caches->used, off_wrap);
955 /* Make sure off_wrap is wrote before flags */
956 smp_wmb();
957 e.flags = VRING_PACKED_EVENT_FLAG_DESC;
958 } else {
959 e.flags = VRING_PACKED_EVENT_FLAG_ENABLE;
962 vring_packed_flags_write(vq->vdev, &caches->used, e.flags);
963 if (enable) {
964 /* Expose avail event/used flags before caller checks the avail idx. */
965 smp_mb();
969 bool virtio_queue_get_notification(VirtQueue *vq)
971 return vq->notification;
974 void virtio_queue_set_notification(VirtQueue *vq, int enable)
976 vq->notification = enable;
978 if (!vq->vring.desc) {
979 return;
982 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
983 virtio_queue_packed_set_notification(vq, enable);
984 } else {
985 virtio_queue_split_set_notification(vq, enable);
989 int virtio_queue_ready(VirtQueue *vq)
991 return vq->vring.avail != 0;
994 static void vring_packed_desc_read_flags(VirtIODevice *vdev,
995 uint16_t *flags,
996 MemoryRegionCache *cache,
997 int i)
999 hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags);
1001 *flags = virtio_lduw_phys_cached(vdev, cache, off);
1004 static void vring_packed_desc_read(VirtIODevice *vdev,
1005 VRingPackedDesc *desc,
1006 MemoryRegionCache *cache,
1007 int i, bool strict_order)
1009 hwaddr off = i * sizeof(VRingPackedDesc);
1011 vring_packed_desc_read_flags(vdev, &desc->flags, cache, i);
1013 if (strict_order) {
1014 /* Make sure flags is read before the rest fields. */
1015 smp_rmb();
1018 address_space_read_cached(cache, off + offsetof(VRingPackedDesc, addr),
1019 &desc->addr, sizeof(desc->addr));
1020 address_space_read_cached(cache, off + offsetof(VRingPackedDesc, id),
1021 &desc->id, sizeof(desc->id));
1022 address_space_read_cached(cache, off + offsetof(VRingPackedDesc, len),
1023 &desc->len, sizeof(desc->len));
1024 virtio_tswap64s(vdev, &desc->addr);
1025 virtio_tswap16s(vdev, &desc->id);
1026 virtio_tswap32s(vdev, &desc->len);
1029 static void vring_packed_desc_write_data(VirtIODevice *vdev,
1030 VRingPackedDesc *desc,
1031 MemoryRegionCache *cache,
1032 int i)
1034 hwaddr off_id = i * sizeof(VRingPackedDesc) +
1035 offsetof(VRingPackedDesc, id);
1036 hwaddr off_len = i * sizeof(VRingPackedDesc) +
1037 offsetof(VRingPackedDesc, len);
1039 virtio_tswap32s(vdev, &desc->len);
1040 virtio_tswap16s(vdev, &desc->id);
1041 address_space_write_cached(cache, off_id, &desc->id, sizeof(desc->id));
1042 address_space_cache_invalidate(cache, off_id, sizeof(desc->id));
1043 address_space_write_cached(cache, off_len, &desc->len, sizeof(desc->len));
1044 address_space_cache_invalidate(cache, off_len, sizeof(desc->len));
1047 static void vring_packed_desc_write_flags(VirtIODevice *vdev,
1048 VRingPackedDesc *desc,
1049 MemoryRegionCache *cache,
1050 int i)
1052 hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags);
1054 virtio_stw_phys_cached(vdev, cache, off, desc->flags);
1055 address_space_cache_invalidate(cache, off, sizeof(desc->flags));
1058 static void vring_packed_desc_write(VirtIODevice *vdev,
1059 VRingPackedDesc *desc,
1060 MemoryRegionCache *cache,
1061 int i, bool strict_order)
1063 vring_packed_desc_write_data(vdev, desc, cache, i);
1064 if (strict_order) {
1065 /* Make sure data is wrote before flags. */
1066 smp_wmb();
1068 vring_packed_desc_write_flags(vdev, desc, cache, i);
1071 static inline bool is_desc_avail(uint16_t flags, bool wrap_counter)
1073 bool avail, used;
1075 avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
1076 used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
1077 return (avail != used) && (avail == wrap_counter);
1080 /* Fetch avail_idx from VQ memory only when we really need to know if
1081 * guest has added some buffers.
1082 * Called within rcu_read_lock(). */
1083 static int virtio_queue_empty_rcu(VirtQueue *vq)
1085 if (virtio_device_disabled(vq->vdev)) {
1086 return 1;
1089 if (unlikely(!vq->vring.avail)) {
1090 return 1;
1093 if (vq->shadow_avail_idx != vq->last_avail_idx) {
1094 return 0;
1097 return vring_avail_idx(vq) == vq->last_avail_idx;
1100 static int virtio_queue_split_empty(VirtQueue *vq)
1102 bool empty;
1104 if (virtio_device_disabled(vq->vdev)) {
1105 return 1;
1108 if (unlikely(!vq->vring.avail)) {
1109 return 1;
1112 if (vq->shadow_avail_idx != vq->last_avail_idx) {
1113 return 0;
1116 RCU_READ_LOCK_GUARD();
1117 empty = vring_avail_idx(vq) == vq->last_avail_idx;
1118 return empty;
1121 /* Called within rcu_read_lock(). */
1122 static int virtio_queue_packed_empty_rcu(VirtQueue *vq)
1124 struct VRingPackedDesc desc;
1125 VRingMemoryRegionCaches *cache;
1127 if (unlikely(!vq->vring.desc)) {
1128 return 1;
1131 cache = vring_get_region_caches(vq);
1132 if (!cache) {
1133 return 1;
1136 vring_packed_desc_read_flags(vq->vdev, &desc.flags, &cache->desc,
1137 vq->last_avail_idx);
1139 return !is_desc_avail(desc.flags, vq->last_avail_wrap_counter);
1142 static int virtio_queue_packed_empty(VirtQueue *vq)
1144 RCU_READ_LOCK_GUARD();
1145 return virtio_queue_packed_empty_rcu(vq);
1148 int virtio_queue_empty(VirtQueue *vq)
1150 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1151 return virtio_queue_packed_empty(vq);
1152 } else {
1153 return virtio_queue_split_empty(vq);
1157 static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
1158 unsigned int len)
1160 AddressSpace *dma_as = vq->vdev->dma_as;
1161 unsigned int offset;
1162 int i;
1164 offset = 0;
1165 for (i = 0; i < elem->in_num; i++) {
1166 size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
1168 dma_memory_unmap(dma_as, elem->in_sg[i].iov_base,
1169 elem->in_sg[i].iov_len,
1170 DMA_DIRECTION_FROM_DEVICE, size);
1172 offset += size;
1175 for (i = 0; i < elem->out_num; i++)
1176 dma_memory_unmap(dma_as, elem->out_sg[i].iov_base,
1177 elem->out_sg[i].iov_len,
1178 DMA_DIRECTION_TO_DEVICE,
1179 elem->out_sg[i].iov_len);
1182 /* virtqueue_detach_element:
1183 * @vq: The #VirtQueue
1184 * @elem: The #VirtQueueElement
1185 * @len: number of bytes written
1187 * Detach the element from the virtqueue. This function is suitable for device
1188 * reset or other situations where a #VirtQueueElement is simply freed and will
1189 * not be pushed or discarded.
1191 void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
1192 unsigned int len)
1194 vq->inuse -= elem->ndescs;
1195 virtqueue_unmap_sg(vq, elem, len);
1198 static void virtqueue_split_rewind(VirtQueue *vq, unsigned int num)
1200 vq->last_avail_idx -= num;
1203 static void virtqueue_packed_rewind(VirtQueue *vq, unsigned int num)
1205 if (vq->last_avail_idx < num) {
1206 vq->last_avail_idx = vq->vring.num + vq->last_avail_idx - num;
1207 vq->last_avail_wrap_counter ^= 1;
1208 } else {
1209 vq->last_avail_idx -= num;
1213 /* virtqueue_unpop:
1214 * @vq: The #VirtQueue
1215 * @elem: The #VirtQueueElement
1216 * @len: number of bytes written
1218 * Pretend the most recent element wasn't popped from the virtqueue. The next
1219 * call to virtqueue_pop() will refetch the element.
1221 void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem,
1222 unsigned int len)
1225 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1226 virtqueue_packed_rewind(vq, 1);
1227 } else {
1228 virtqueue_split_rewind(vq, 1);
1231 virtqueue_detach_element(vq, elem, len);
1234 /* virtqueue_rewind:
1235 * @vq: The #VirtQueue
1236 * @num: Number of elements to push back
1238 * Pretend that elements weren't popped from the virtqueue. The next
1239 * virtqueue_pop() will refetch the oldest element.
1241 * Use virtqueue_unpop() instead if you have a VirtQueueElement.
1243 * Returns: true on success, false if @num is greater than the number of in use
1244 * elements.
1246 bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
1248 if (num > vq->inuse) {
1249 return false;
1252 vq->inuse -= num;
1253 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1254 virtqueue_packed_rewind(vq, num);
1255 } else {
1256 virtqueue_split_rewind(vq, num);
1258 return true;
1261 static void virtqueue_split_fill(VirtQueue *vq, const VirtQueueElement *elem,
1262 unsigned int len, unsigned int idx)
1264 VRingUsedElem uelem;
1266 if (unlikely(!vq->vring.used)) {
1267 return;
1270 idx = (idx + vq->used_idx) % vq->vring.num;
1272 uelem.id = elem->index;
1273 uelem.len = len;
1274 vring_used_write(vq, &uelem, idx);
1277 static void virtqueue_packed_fill(VirtQueue *vq, const VirtQueueElement *elem,
1278 unsigned int len, unsigned int idx)
1280 vq->used_elems[idx].index = elem->index;
1281 vq->used_elems[idx].len = len;
1282 vq->used_elems[idx].ndescs = elem->ndescs;
1285 static void virtqueue_packed_fill_desc(VirtQueue *vq,
1286 const VirtQueueElement *elem,
1287 unsigned int idx,
1288 bool strict_order)
1290 uint16_t head;
1291 VRingMemoryRegionCaches *caches;
1292 VRingPackedDesc desc = {
1293 .id = elem->index,
1294 .len = elem->len,
1296 bool wrap_counter = vq->used_wrap_counter;
1298 if (unlikely(!vq->vring.desc)) {
1299 return;
1302 head = vq->used_idx + idx;
1303 if (head >= vq->vring.num) {
1304 head -= vq->vring.num;
1305 wrap_counter ^= 1;
1307 if (wrap_counter) {
1308 desc.flags |= (1 << VRING_PACKED_DESC_F_AVAIL);
1309 desc.flags |= (1 << VRING_PACKED_DESC_F_USED);
1310 } else {
1311 desc.flags &= ~(1 << VRING_PACKED_DESC_F_AVAIL);
1312 desc.flags &= ~(1 << VRING_PACKED_DESC_F_USED);
1315 caches = vring_get_region_caches(vq);
1316 if (!caches) {
1317 return;
1320 vring_packed_desc_write(vq->vdev, &desc, &caches->desc, head, strict_order);
1323 /* Called within rcu_read_lock(). */
1324 void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
1325 unsigned int len, unsigned int idx)
1327 trace_virtqueue_fill(vq, elem, len, idx);
1329 virtqueue_unmap_sg(vq, elem, len);
1331 if (virtio_device_disabled(vq->vdev)) {
1332 return;
1335 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1336 virtqueue_packed_fill(vq, elem, len, idx);
1337 } else {
1338 virtqueue_split_fill(vq, elem, len, idx);
1342 /* Called within rcu_read_lock(). */
1343 static void virtqueue_split_flush(VirtQueue *vq, unsigned int count)
1345 uint16_t old, new;
1347 if (unlikely(!vq->vring.used)) {
1348 return;
1351 /* Make sure buffer is written before we update index. */
1352 smp_wmb();
1353 trace_virtqueue_flush(vq, count);
1354 old = vq->used_idx;
1355 new = old + count;
1356 vring_used_idx_set(vq, new);
1357 vq->inuse -= count;
1358 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
1359 vq->signalled_used_valid = false;
1362 static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count)
1364 unsigned int i, ndescs = 0;
1366 if (unlikely(!vq->vring.desc)) {
1367 return;
1370 for (i = 1; i < count; i++) {
1371 virtqueue_packed_fill_desc(vq, &vq->used_elems[i], i, false);
1372 ndescs += vq->used_elems[i].ndescs;
1374 virtqueue_packed_fill_desc(vq, &vq->used_elems[0], 0, true);
1375 ndescs += vq->used_elems[0].ndescs;
1377 vq->inuse -= ndescs;
1378 vq->used_idx += ndescs;
1379 if (vq->used_idx >= vq->vring.num) {
1380 vq->used_idx -= vq->vring.num;
1381 vq->used_wrap_counter ^= 1;
1382 vq->signalled_used_valid = false;
1386 void virtqueue_flush(VirtQueue *vq, unsigned int count)
1388 if (virtio_device_disabled(vq->vdev)) {
1389 vq->inuse -= count;
1390 return;
1393 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1394 virtqueue_packed_flush(vq, count);
1395 } else {
1396 virtqueue_split_flush(vq, count);
1400 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
1401 unsigned int len)
1403 RCU_READ_LOCK_GUARD();
1404 virtqueue_fill(vq, elem, len, 0);
1405 virtqueue_flush(vq, 1);
1408 /* Called within rcu_read_lock(). */
1409 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
1411 uint16_t num_heads = vring_avail_idx(vq) - idx;
1413 /* Check it isn't doing very strange things with descriptor numbers. */
1414 if (num_heads > vq->vring.num) {
1415 virtio_error(vq->vdev, "Guest moved used index from %u to %u",
1416 idx, vq->shadow_avail_idx);
1417 return -EINVAL;
1419 /* On success, callers read a descriptor at vq->last_avail_idx.
1420 * Make sure descriptor read does not bypass avail index read. */
1421 if (num_heads) {
1422 smp_rmb();
1425 return num_heads;
1428 /* Called within rcu_read_lock(). */
1429 static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
1430 unsigned int *head)
1432 /* Grab the next descriptor number they're advertising, and increment
1433 * the index we've seen. */
1434 *head = vring_avail_ring(vq, idx % vq->vring.num);
1436 /* If their number is silly, that's a fatal mistake. */
1437 if (*head >= vq->vring.num) {
1438 virtio_error(vq->vdev, "Guest says index %u is available", *head);
1439 return false;
1442 return true;
1445 enum {
1446 VIRTQUEUE_READ_DESC_ERROR = -1,
1447 VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */
1448 VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */
1451 static int virtqueue_split_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
1452 MemoryRegionCache *desc_cache,
1453 unsigned int max, unsigned int *next)
1455 /* If this descriptor says it doesn't chain, we're done. */
1456 if (!(desc->flags & VRING_DESC_F_NEXT)) {
1457 return VIRTQUEUE_READ_DESC_DONE;
1460 /* Check they're not leading us off end of descriptors. */
1461 *next = desc->next;
1462 /* Make sure compiler knows to grab that: we don't want it changing! */
1463 smp_wmb();
1465 if (*next >= max) {
1466 virtio_error(vdev, "Desc next is %u", *next);
1467 return VIRTQUEUE_READ_DESC_ERROR;
1470 vring_split_desc_read(vdev, desc, desc_cache, *next);
1471 return VIRTQUEUE_READ_DESC_MORE;
1474 /* Called within rcu_read_lock(). */
1475 static void virtqueue_split_get_avail_bytes(VirtQueue *vq,
1476 unsigned int *in_bytes, unsigned int *out_bytes,
1477 unsigned max_in_bytes, unsigned max_out_bytes,
1478 VRingMemoryRegionCaches *caches)
1480 VirtIODevice *vdev = vq->vdev;
1481 unsigned int max, idx;
1482 unsigned int total_bufs, in_total, out_total;
1483 MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
1484 int64_t len = 0;
1485 int rc;
1487 idx = vq->last_avail_idx;
1488 total_bufs = in_total = out_total = 0;
1490 max = vq->vring.num;
1492 while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
1493 MemoryRegionCache *desc_cache = &caches->desc;
1494 unsigned int num_bufs;
1495 VRingDesc desc;
1496 unsigned int i;
1498 num_bufs = total_bufs;
1500 if (!virtqueue_get_head(vq, idx++, &i)) {
1501 goto err;
1504 vring_split_desc_read(vdev, &desc, desc_cache, i);
1506 if (desc.flags & VRING_DESC_F_INDIRECT) {
1507 if (!desc.len || (desc.len % sizeof(VRingDesc))) {
1508 virtio_error(vdev, "Invalid size for indirect buffer table");
1509 goto err;
1512 /* If we've got too many, that implies a descriptor loop. */
1513 if (num_bufs >= max) {
1514 virtio_error(vdev, "Looped descriptor");
1515 goto err;
1518 /* loop over the indirect descriptor table */
1519 len = address_space_cache_init(&indirect_desc_cache,
1520 vdev->dma_as,
1521 desc.addr, desc.len, false);
1522 desc_cache = &indirect_desc_cache;
1523 if (len < desc.len) {
1524 virtio_error(vdev, "Cannot map indirect buffer");
1525 goto err;
1528 max = desc.len / sizeof(VRingDesc);
1529 num_bufs = i = 0;
1530 vring_split_desc_read(vdev, &desc, desc_cache, i);
1533 do {
1534 /* If we've got too many, that implies a descriptor loop. */
1535 if (++num_bufs > max) {
1536 virtio_error(vdev, "Looped descriptor");
1537 goto err;
1540 if (desc.flags & VRING_DESC_F_WRITE) {
1541 in_total += desc.len;
1542 } else {
1543 out_total += desc.len;
1545 if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1546 goto done;
1549 rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max, &i);
1550 } while (rc == VIRTQUEUE_READ_DESC_MORE);
1552 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
1553 goto err;
1556 if (desc_cache == &indirect_desc_cache) {
1557 address_space_cache_destroy(&indirect_desc_cache);
1558 total_bufs++;
1559 } else {
1560 total_bufs = num_bufs;
1564 if (rc < 0) {
1565 goto err;
1568 done:
1569 address_space_cache_destroy(&indirect_desc_cache);
1570 if (in_bytes) {
1571 *in_bytes = in_total;
1573 if (out_bytes) {
1574 *out_bytes = out_total;
1576 return;
1578 err:
1579 in_total = out_total = 0;
1580 goto done;
1583 static int virtqueue_packed_read_next_desc(VirtQueue *vq,
1584 VRingPackedDesc *desc,
1585 MemoryRegionCache
1586 *desc_cache,
1587 unsigned int max,
1588 unsigned int *next,
1589 bool indirect)
1591 /* If this descriptor says it doesn't chain, we're done. */
1592 if (!indirect && !(desc->flags & VRING_DESC_F_NEXT)) {
1593 return VIRTQUEUE_READ_DESC_DONE;
1596 ++*next;
1597 if (*next == max) {
1598 if (indirect) {
1599 return VIRTQUEUE_READ_DESC_DONE;
1600 } else {
1601 (*next) -= vq->vring.num;
1605 vring_packed_desc_read(vq->vdev, desc, desc_cache, *next, false);
1606 return VIRTQUEUE_READ_DESC_MORE;
1609 /* Called within rcu_read_lock(). */
1610 static void virtqueue_packed_get_avail_bytes(VirtQueue *vq,
1611 unsigned int *in_bytes,
1612 unsigned int *out_bytes,
1613 unsigned max_in_bytes,
1614 unsigned max_out_bytes,
1615 VRingMemoryRegionCaches *caches)
1617 VirtIODevice *vdev = vq->vdev;
1618 unsigned int max, idx;
1619 unsigned int total_bufs, in_total, out_total;
1620 MemoryRegionCache *desc_cache;
1621 MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
1622 int64_t len = 0;
1623 VRingPackedDesc desc;
1624 bool wrap_counter;
1626 idx = vq->last_avail_idx;
1627 wrap_counter = vq->last_avail_wrap_counter;
1628 total_bufs = in_total = out_total = 0;
1630 max = vq->vring.num;
1632 for (;;) {
1633 unsigned int num_bufs = total_bufs;
1634 unsigned int i = idx;
1635 int rc;
1637 desc_cache = &caches->desc;
1638 vring_packed_desc_read(vdev, &desc, desc_cache, idx, true);
1639 if (!is_desc_avail(desc.flags, wrap_counter)) {
1640 break;
1643 if (desc.flags & VRING_DESC_F_INDIRECT) {
1644 if (desc.len % sizeof(VRingPackedDesc)) {
1645 virtio_error(vdev, "Invalid size for indirect buffer table");
1646 goto err;
1649 /* If we've got too many, that implies a descriptor loop. */
1650 if (num_bufs >= max) {
1651 virtio_error(vdev, "Looped descriptor");
1652 goto err;
1655 /* loop over the indirect descriptor table */
1656 len = address_space_cache_init(&indirect_desc_cache,
1657 vdev->dma_as,
1658 desc.addr, desc.len, false);
1659 desc_cache = &indirect_desc_cache;
1660 if (len < desc.len) {
1661 virtio_error(vdev, "Cannot map indirect buffer");
1662 goto err;
1665 max = desc.len / sizeof(VRingPackedDesc);
1666 num_bufs = i = 0;
1667 vring_packed_desc_read(vdev, &desc, desc_cache, i, false);
1670 do {
1671 /* If we've got too many, that implies a descriptor loop. */
1672 if (++num_bufs > max) {
1673 virtio_error(vdev, "Looped descriptor");
1674 goto err;
1677 if (desc.flags & VRING_DESC_F_WRITE) {
1678 in_total += desc.len;
1679 } else {
1680 out_total += desc.len;
1682 if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1683 goto done;
1686 rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max,
1687 &i, desc_cache ==
1688 &indirect_desc_cache);
1689 } while (rc == VIRTQUEUE_READ_DESC_MORE);
1691 if (desc_cache == &indirect_desc_cache) {
1692 address_space_cache_destroy(&indirect_desc_cache);
1693 total_bufs++;
1694 idx++;
1695 } else {
1696 idx += num_bufs - total_bufs;
1697 total_bufs = num_bufs;
1700 if (idx >= vq->vring.num) {
1701 idx -= vq->vring.num;
1702 wrap_counter ^= 1;
1706 /* Record the index and wrap counter for a kick we want */
1707 vq->shadow_avail_idx = idx;
1708 vq->shadow_avail_wrap_counter = wrap_counter;
1709 done:
1710 address_space_cache_destroy(&indirect_desc_cache);
1711 if (in_bytes) {
1712 *in_bytes = in_total;
1714 if (out_bytes) {
1715 *out_bytes = out_total;
1717 return;
1719 err:
1720 in_total = out_total = 0;
1721 goto done;
1724 void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
1725 unsigned int *out_bytes,
1726 unsigned max_in_bytes, unsigned max_out_bytes)
1728 uint16_t desc_size;
1729 VRingMemoryRegionCaches *caches;
1731 RCU_READ_LOCK_GUARD();
1733 if (unlikely(!vq->vring.desc)) {
1734 goto err;
1737 caches = vring_get_region_caches(vq);
1738 if (!caches) {
1739 goto err;
1742 desc_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
1743 sizeof(VRingPackedDesc) : sizeof(VRingDesc);
1744 if (caches->desc.len < vq->vring.num * desc_size) {
1745 virtio_error(vq->vdev, "Cannot map descriptor ring");
1746 goto err;
1749 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1750 virtqueue_packed_get_avail_bytes(vq, in_bytes, out_bytes,
1751 max_in_bytes, max_out_bytes,
1752 caches);
1753 } else {
1754 virtqueue_split_get_avail_bytes(vq, in_bytes, out_bytes,
1755 max_in_bytes, max_out_bytes,
1756 caches);
1759 return;
1760 err:
1761 if (in_bytes) {
1762 *in_bytes = 0;
1764 if (out_bytes) {
1765 *out_bytes = 0;
1769 int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
1770 unsigned int out_bytes)
1772 unsigned int in_total, out_total;
1774 virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
1775 return in_bytes <= in_total && out_bytes <= out_total;
1778 static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
1779 hwaddr *addr, struct iovec *iov,
1780 unsigned int max_num_sg, bool is_write,
1781 hwaddr pa, size_t sz)
1783 bool ok = false;
1784 unsigned num_sg = *p_num_sg;
1785 assert(num_sg <= max_num_sg);
1787 if (!sz) {
1788 virtio_error(vdev, "virtio: zero sized buffers are not allowed");
1789 goto out;
1792 while (sz) {
1793 hwaddr len = sz;
1795 if (num_sg == max_num_sg) {
1796 virtio_error(vdev, "virtio: too many write descriptors in "
1797 "indirect table");
1798 goto out;
1801 iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
1802 is_write ?
1803 DMA_DIRECTION_FROM_DEVICE :
1804 DMA_DIRECTION_TO_DEVICE,
1805 MEMTXATTRS_UNSPECIFIED);
1806 if (!iov[num_sg].iov_base) {
1807 virtio_error(vdev, "virtio: bogus descriptor or out of resources");
1808 goto out;
1811 iov[num_sg].iov_len = len;
1812 addr[num_sg] = pa;
1814 sz -= len;
1815 pa += len;
1816 num_sg++;
1818 ok = true;
1820 out:
1821 *p_num_sg = num_sg;
1822 return ok;
1825 /* Only used by error code paths before we have a VirtQueueElement (therefore
1826 * virtqueue_unmap_sg() can't be used). Assumes buffers weren't written to
1827 * yet.
1829 static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
1830 struct iovec *iov)
1832 unsigned int i;
1834 for (i = 0; i < out_num + in_num; i++) {
1835 int is_write = i >= out_num;
1837 cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
1838 iov++;
1842 static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg,
1843 hwaddr *addr, unsigned int num_sg,
1844 bool is_write)
1846 unsigned int i;
1847 hwaddr len;
1849 for (i = 0; i < num_sg; i++) {
1850 len = sg[i].iov_len;
1851 sg[i].iov_base = dma_memory_map(vdev->dma_as,
1852 addr[i], &len, is_write ?
1853 DMA_DIRECTION_FROM_DEVICE :
1854 DMA_DIRECTION_TO_DEVICE,
1855 MEMTXATTRS_UNSPECIFIED);
1856 if (!sg[i].iov_base) {
1857 error_report("virtio: error trying to map MMIO memory");
1858 exit(1);
1860 if (len != sg[i].iov_len) {
1861 error_report("virtio: unexpected memory split");
1862 exit(1);
1867 void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem)
1869 virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, elem->in_num, true);
1870 virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, elem->out_num,
1871 false);
1874 static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
1876 VirtQueueElement *elem;
1877 size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
1878 size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
1879 size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
1880 size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
1881 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
1882 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
1884 assert(sz >= sizeof(VirtQueueElement));
1885 elem = g_malloc(out_sg_end);
1886 trace_virtqueue_alloc_element(elem, sz, in_num, out_num);
1887 elem->out_num = out_num;
1888 elem->in_num = in_num;
1889 elem->in_addr = (void *)elem + in_addr_ofs;
1890 elem->out_addr = (void *)elem + out_addr_ofs;
1891 elem->in_sg = (void *)elem + in_sg_ofs;
1892 elem->out_sg = (void *)elem + out_sg_ofs;
1893 return elem;
1896 static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
1898 unsigned int i, head, max;
1899 VRingMemoryRegionCaches *caches;
1900 MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
1901 MemoryRegionCache *desc_cache;
1902 int64_t len;
1903 VirtIODevice *vdev = vq->vdev;
1904 VirtQueueElement *elem = NULL;
1905 unsigned out_num, in_num, elem_entries;
1906 hwaddr addr[VIRTQUEUE_MAX_SIZE];
1907 struct iovec iov[VIRTQUEUE_MAX_SIZE];
1908 VRingDesc desc;
1909 int rc;
1911 RCU_READ_LOCK_GUARD();
1912 if (virtio_queue_empty_rcu(vq)) {
1913 goto done;
1915 /* Needed after virtio_queue_empty(), see comment in
1916 * virtqueue_num_heads(). */
1917 smp_rmb();
1919 /* When we start there are none of either input nor output. */
1920 out_num = in_num = elem_entries = 0;
1922 max = vq->vring.num;
1924 if (vq->inuse >= vq->vring.num) {
1925 virtio_error(vdev, "Virtqueue size exceeded");
1926 goto done;
1929 if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
1930 goto done;
1933 if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
1934 vring_set_avail_event(vq, vq->last_avail_idx);
1937 i = head;
1939 caches = vring_get_region_caches(vq);
1940 if (!caches) {
1941 virtio_error(vdev, "Region caches not initialized");
1942 goto done;
1945 if (caches->desc.len < max * sizeof(VRingDesc)) {
1946 virtio_error(vdev, "Cannot map descriptor ring");
1947 goto done;
1950 desc_cache = &caches->desc;
1951 vring_split_desc_read(vdev, &desc, desc_cache, i);
1952 if (desc.flags & VRING_DESC_F_INDIRECT) {
1953 if (!desc.len || (desc.len % sizeof(VRingDesc))) {
1954 virtio_error(vdev, "Invalid size for indirect buffer table");
1955 goto done;
1958 /* loop over the indirect descriptor table */
1959 len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
1960 desc.addr, desc.len, false);
1961 desc_cache = &indirect_desc_cache;
1962 if (len < desc.len) {
1963 virtio_error(vdev, "Cannot map indirect buffer");
1964 goto done;
1967 max = desc.len / sizeof(VRingDesc);
1968 i = 0;
1969 vring_split_desc_read(vdev, &desc, desc_cache, i);
1972 /* Collect all the descriptors */
1973 do {
1974 bool map_ok;
1976 if (desc.flags & VRING_DESC_F_WRITE) {
1977 map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
1978 iov + out_num,
1979 VIRTQUEUE_MAX_SIZE - out_num, true,
1980 desc.addr, desc.len);
1981 } else {
1982 if (in_num) {
1983 virtio_error(vdev, "Incorrect order for descriptors");
1984 goto err_undo_map;
1986 map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
1987 VIRTQUEUE_MAX_SIZE, false,
1988 desc.addr, desc.len);
1990 if (!map_ok) {
1991 goto err_undo_map;
1994 /* If we've got too many, that implies a descriptor loop. */
1995 if (++elem_entries > max) {
1996 virtio_error(vdev, "Looped descriptor");
1997 goto err_undo_map;
2000 rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max, &i);
2001 } while (rc == VIRTQUEUE_READ_DESC_MORE);
2003 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
2004 goto err_undo_map;
2007 /* Now copy what we have collected and mapped */
2008 elem = virtqueue_alloc_element(sz, out_num, in_num);
2009 elem->index = head;
2010 elem->ndescs = 1;
2011 for (i = 0; i < out_num; i++) {
2012 elem->out_addr[i] = addr[i];
2013 elem->out_sg[i] = iov[i];
2015 for (i = 0; i < in_num; i++) {
2016 elem->in_addr[i] = addr[out_num + i];
2017 elem->in_sg[i] = iov[out_num + i];
2020 vq->inuse++;
2022 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
2023 done:
2024 address_space_cache_destroy(&indirect_desc_cache);
2026 return elem;
2028 err_undo_map:
2029 virtqueue_undo_map_desc(out_num, in_num, iov);
2030 goto done;
2033 static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz)
2035 unsigned int i, max;
2036 VRingMemoryRegionCaches *caches;
2037 MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
2038 MemoryRegionCache *desc_cache;
2039 int64_t len;
2040 VirtIODevice *vdev = vq->vdev;
2041 VirtQueueElement *elem = NULL;
2042 unsigned out_num, in_num, elem_entries;
2043 hwaddr addr[VIRTQUEUE_MAX_SIZE];
2044 struct iovec iov[VIRTQUEUE_MAX_SIZE];
2045 VRingPackedDesc desc;
2046 uint16_t id;
2047 int rc;
2049 RCU_READ_LOCK_GUARD();
2050 if (virtio_queue_packed_empty_rcu(vq)) {
2051 goto done;
2054 /* When we start there are none of either input nor output. */
2055 out_num = in_num = elem_entries = 0;
2057 max = vq->vring.num;
2059 if (vq->inuse >= vq->vring.num) {
2060 virtio_error(vdev, "Virtqueue size exceeded");
2061 goto done;
2064 i = vq->last_avail_idx;
2066 caches = vring_get_region_caches(vq);
2067 if (!caches) {
2068 virtio_error(vdev, "Region caches not initialized");
2069 goto done;
2072 if (caches->desc.len < max * sizeof(VRingDesc)) {
2073 virtio_error(vdev, "Cannot map descriptor ring");
2074 goto done;
2077 desc_cache = &caches->desc;
2078 vring_packed_desc_read(vdev, &desc, desc_cache, i, true);
2079 id = desc.id;
2080 if (desc.flags & VRING_DESC_F_INDIRECT) {
2081 if (desc.len % sizeof(VRingPackedDesc)) {
2082 virtio_error(vdev, "Invalid size for indirect buffer table");
2083 goto done;
2086 /* loop over the indirect descriptor table */
2087 len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
2088 desc.addr, desc.len, false);
2089 desc_cache = &indirect_desc_cache;
2090 if (len < desc.len) {
2091 virtio_error(vdev, "Cannot map indirect buffer");
2092 goto done;
2095 max = desc.len / sizeof(VRingPackedDesc);
2096 i = 0;
2097 vring_packed_desc_read(vdev, &desc, desc_cache, i, false);
2100 /* Collect all the descriptors */
2101 do {
2102 bool map_ok;
2104 if (desc.flags & VRING_DESC_F_WRITE) {
2105 map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
2106 iov + out_num,
2107 VIRTQUEUE_MAX_SIZE - out_num, true,
2108 desc.addr, desc.len);
2109 } else {
2110 if (in_num) {
2111 virtio_error(vdev, "Incorrect order for descriptors");
2112 goto err_undo_map;
2114 map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
2115 VIRTQUEUE_MAX_SIZE, false,
2116 desc.addr, desc.len);
2118 if (!map_ok) {
2119 goto err_undo_map;
2122 /* If we've got too many, that implies a descriptor loop. */
2123 if (++elem_entries > max) {
2124 virtio_error(vdev, "Looped descriptor");
2125 goto err_undo_map;
2128 rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max, &i,
2129 desc_cache ==
2130 &indirect_desc_cache);
2131 } while (rc == VIRTQUEUE_READ_DESC_MORE);
2133 /* Now copy what we have collected and mapped */
2134 elem = virtqueue_alloc_element(sz, out_num, in_num);
2135 for (i = 0; i < out_num; i++) {
2136 elem->out_addr[i] = addr[i];
2137 elem->out_sg[i] = iov[i];
2139 for (i = 0; i < in_num; i++) {
2140 elem->in_addr[i] = addr[out_num + i];
2141 elem->in_sg[i] = iov[out_num + i];
2144 elem->index = id;
2145 elem->ndescs = (desc_cache == &indirect_desc_cache) ? 1 : elem_entries;
2146 vq->last_avail_idx += elem->ndescs;
2147 vq->inuse += elem->ndescs;
2149 if (vq->last_avail_idx >= vq->vring.num) {
2150 vq->last_avail_idx -= vq->vring.num;
2151 vq->last_avail_wrap_counter ^= 1;
2154 vq->shadow_avail_idx = vq->last_avail_idx;
2155 vq->shadow_avail_wrap_counter = vq->last_avail_wrap_counter;
2157 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
2158 done:
2159 address_space_cache_destroy(&indirect_desc_cache);
2161 return elem;
2163 err_undo_map:
2164 virtqueue_undo_map_desc(out_num, in_num, iov);
2165 goto done;
2168 void *virtqueue_pop(VirtQueue *vq, size_t sz)
2170 if (virtio_device_disabled(vq->vdev)) {
2171 return NULL;
2174 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
2175 return virtqueue_packed_pop(vq, sz);
2176 } else {
2177 return virtqueue_split_pop(vq, sz);
2181 static unsigned int virtqueue_packed_drop_all(VirtQueue *vq)
2183 VRingMemoryRegionCaches *caches;
2184 MemoryRegionCache *desc_cache;
2185 unsigned int dropped = 0;
2186 VirtQueueElement elem = {};
2187 VirtIODevice *vdev = vq->vdev;
2188 VRingPackedDesc desc;
2190 RCU_READ_LOCK_GUARD();
2192 caches = vring_get_region_caches(vq);
2193 if (!caches) {
2194 return 0;
2197 desc_cache = &caches->desc;
2199 virtio_queue_set_notification(vq, 0);
2201 while (vq->inuse < vq->vring.num) {
2202 unsigned int idx = vq->last_avail_idx;
2204 * works similar to virtqueue_pop but does not map buffers
2205 * and does not allocate any memory.
2207 vring_packed_desc_read(vdev, &desc, desc_cache,
2208 vq->last_avail_idx , true);
2209 if (!is_desc_avail(desc.flags, vq->last_avail_wrap_counter)) {
2210 break;
2212 elem.index = desc.id;
2213 elem.ndescs = 1;
2214 while (virtqueue_packed_read_next_desc(vq, &desc, desc_cache,
2215 vq->vring.num, &idx, false)) {
2216 ++elem.ndescs;
2219 * immediately push the element, nothing to unmap
2220 * as both in_num and out_num are set to 0.
2222 virtqueue_push(vq, &elem, 0);
2223 dropped++;
2224 vq->last_avail_idx += elem.ndescs;
2225 if (vq->last_avail_idx >= vq->vring.num) {
2226 vq->last_avail_idx -= vq->vring.num;
2227 vq->last_avail_wrap_counter ^= 1;
2231 return dropped;
2234 static unsigned int virtqueue_split_drop_all(VirtQueue *vq)
2236 unsigned int dropped = 0;
2237 VirtQueueElement elem = {};
2238 VirtIODevice *vdev = vq->vdev;
2239 bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
2241 while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) {
2242 /* works similar to virtqueue_pop but does not map buffers
2243 * and does not allocate any memory */
2244 smp_rmb();
2245 if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) {
2246 break;
2248 vq->inuse++;
2249 vq->last_avail_idx++;
2250 if (fEventIdx) {
2251 vring_set_avail_event(vq, vq->last_avail_idx);
2253 /* immediately push the element, nothing to unmap
2254 * as both in_num and out_num are set to 0 */
2255 virtqueue_push(vq, &elem, 0);
2256 dropped++;
2259 return dropped;
2262 /* virtqueue_drop_all:
2263 * @vq: The #VirtQueue
2264 * Drops all queued buffers and indicates them to the guest
2265 * as if they are done. Useful when buffers can not be
2266 * processed but must be returned to the guest.
2268 unsigned int virtqueue_drop_all(VirtQueue *vq)
2270 struct VirtIODevice *vdev = vq->vdev;
2272 if (virtio_device_disabled(vq->vdev)) {
2273 return 0;
2276 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
2277 return virtqueue_packed_drop_all(vq);
2278 } else {
2279 return virtqueue_split_drop_all(vq);
2283 /* Reading and writing a structure directly to QEMUFile is *awful*, but
2284 * it is what QEMU has always done by mistake. We can change it sooner
2285 * or later by bumping the version number of the affected vm states.
2286 * In the meanwhile, since the in-memory layout of VirtQueueElement
2287 * has changed, we need to marshal to and from the layout that was
2288 * used before the change.
2290 typedef struct VirtQueueElementOld {
2291 unsigned int index;
2292 unsigned int out_num;
2293 unsigned int in_num;
2294 hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
2295 hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
2296 struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
2297 struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
2298 } VirtQueueElementOld;
2300 void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz)
2302 VirtQueueElement *elem;
2303 VirtQueueElementOld data;
2304 int i;
2306 qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
2308 /* TODO: teach all callers that this can fail, and return failure instead
2309 * of asserting here.
2310 * This is just one thing (there are probably more) that must be
2311 * fixed before we can allow NDEBUG compilation.
2313 assert(ARRAY_SIZE(data.in_addr) >= data.in_num);
2314 assert(ARRAY_SIZE(data.out_addr) >= data.out_num);
2316 elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
2317 elem->index = data.index;
2319 for (i = 0; i < elem->in_num; i++) {
2320 elem->in_addr[i] = data.in_addr[i];
2323 for (i = 0; i < elem->out_num; i++) {
2324 elem->out_addr[i] = data.out_addr[i];
2327 for (i = 0; i < elem->in_num; i++) {
2328 /* Base is overwritten by virtqueue_map. */
2329 elem->in_sg[i].iov_base = 0;
2330 elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
2333 for (i = 0; i < elem->out_num; i++) {
2334 /* Base is overwritten by virtqueue_map. */
2335 elem->out_sg[i].iov_base = 0;
2336 elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
2339 if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
2340 qemu_get_be32s(f, &elem->ndescs);
2343 virtqueue_map(vdev, elem);
2344 return elem;
2347 void qemu_put_virtqueue_element(VirtIODevice *vdev, QEMUFile *f,
2348 VirtQueueElement *elem)
2350 VirtQueueElementOld data;
2351 int i;
2353 memset(&data, 0, sizeof(data));
2354 data.index = elem->index;
2355 data.in_num = elem->in_num;
2356 data.out_num = elem->out_num;
2358 for (i = 0; i < elem->in_num; i++) {
2359 data.in_addr[i] = elem->in_addr[i];
2362 for (i = 0; i < elem->out_num; i++) {
2363 data.out_addr[i] = elem->out_addr[i];
2366 for (i = 0; i < elem->in_num; i++) {
2367 /* Base is overwritten by virtqueue_map when loading. Do not
2368 * save it, as it would leak the QEMU address space layout. */
2369 data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
2372 for (i = 0; i < elem->out_num; i++) {
2373 /* Do not save iov_base as above. */
2374 data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
2377 if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
2378 qemu_put_be32s(f, &elem->ndescs);
2381 qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
2384 /* virtio device */
2385 static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
2387 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2388 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2390 if (virtio_device_disabled(vdev)) {
2391 return;
2394 if (k->notify) {
2395 k->notify(qbus->parent, vector);
2399 void virtio_update_irq(VirtIODevice *vdev)
2401 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
2404 static int virtio_validate_features(VirtIODevice *vdev)
2406 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2408 if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM) &&
2409 !virtio_vdev_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
2410 return -EFAULT;
2413 if (k->validate_features) {
2414 return k->validate_features(vdev);
2415 } else {
2416 return 0;
2420 int virtio_set_status(VirtIODevice *vdev, uint8_t val)
2422 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2423 trace_virtio_set_status(vdev, val);
2425 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2426 if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
2427 val & VIRTIO_CONFIG_S_FEATURES_OK) {
2428 int ret = virtio_validate_features(vdev);
2430 if (ret) {
2431 return ret;
2436 if ((vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) !=
2437 (val & VIRTIO_CONFIG_S_DRIVER_OK)) {
2438 virtio_set_started(vdev, val & VIRTIO_CONFIG_S_DRIVER_OK);
2441 if (k->set_status) {
2442 k->set_status(vdev, val);
2444 vdev->status = val;
2446 return 0;
2449 static enum virtio_device_endian virtio_default_endian(void)
2451 if (target_words_bigendian()) {
2452 return VIRTIO_DEVICE_ENDIAN_BIG;
2453 } else {
2454 return VIRTIO_DEVICE_ENDIAN_LITTLE;
2458 static enum virtio_device_endian virtio_current_cpu_endian(void)
2460 if (cpu_virtio_is_big_endian(current_cpu)) {
2461 return VIRTIO_DEVICE_ENDIAN_BIG;
2462 } else {
2463 return VIRTIO_DEVICE_ENDIAN_LITTLE;
2467 void virtio_reset(void *opaque)
2469 VirtIODevice *vdev = opaque;
2470 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2471 int i;
2473 virtio_set_status(vdev, 0);
2474 if (current_cpu) {
2475 /* Guest initiated reset */
2476 vdev->device_endian = virtio_current_cpu_endian();
2477 } else {
2478 /* System reset */
2479 vdev->device_endian = virtio_default_endian();
2482 if (k->reset) {
2483 k->reset(vdev);
2486 vdev->start_on_kick = false;
2487 vdev->started = false;
2488 vdev->broken = false;
2489 vdev->guest_features = 0;
2490 vdev->queue_sel = 0;
2491 vdev->status = 0;
2492 vdev->disabled = false;
2493 qatomic_set(&vdev->isr, 0);
2494 vdev->config_vector = VIRTIO_NO_VECTOR;
2495 virtio_notify_vector(vdev, vdev->config_vector);
2497 for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2498 vdev->vq[i].vring.desc = 0;
2499 vdev->vq[i].vring.avail = 0;
2500 vdev->vq[i].vring.used = 0;
2501 vdev->vq[i].last_avail_idx = 0;
2502 vdev->vq[i].shadow_avail_idx = 0;
2503 vdev->vq[i].used_idx = 0;
2504 vdev->vq[i].last_avail_wrap_counter = true;
2505 vdev->vq[i].shadow_avail_wrap_counter = true;
2506 vdev->vq[i].used_wrap_counter = true;
2507 virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
2508 vdev->vq[i].signalled_used = 0;
2509 vdev->vq[i].signalled_used_valid = false;
2510 vdev->vq[i].notification = true;
2511 vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
2512 vdev->vq[i].inuse = 0;
2513 virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
2517 uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
2519 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2520 uint8_t val;
2522 if (addr + sizeof(val) > vdev->config_len) {
2523 return (uint32_t)-1;
2526 k->get_config(vdev, vdev->config);
2528 val = ldub_p(vdev->config + addr);
2529 return val;
2532 uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
2534 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2535 uint16_t val;
2537 if (addr + sizeof(val) > vdev->config_len) {
2538 return (uint32_t)-1;
2541 k->get_config(vdev, vdev->config);
2543 val = lduw_p(vdev->config + addr);
2544 return val;
2547 uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
2549 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2550 uint32_t val;
2552 if (addr + sizeof(val) > vdev->config_len) {
2553 return (uint32_t)-1;
2556 k->get_config(vdev, vdev->config);
2558 val = ldl_p(vdev->config + addr);
2559 return val;
2562 void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
2564 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2565 uint8_t val = data;
2567 if (addr + sizeof(val) > vdev->config_len) {
2568 return;
2571 stb_p(vdev->config + addr, val);
2573 if (k->set_config) {
2574 k->set_config(vdev, vdev->config);
2578 void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
2580 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2581 uint16_t val = data;
2583 if (addr + sizeof(val) > vdev->config_len) {
2584 return;
2587 stw_p(vdev->config + addr, val);
2589 if (k->set_config) {
2590 k->set_config(vdev, vdev->config);
2594 void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
2596 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2597 uint32_t val = data;
2599 if (addr + sizeof(val) > vdev->config_len) {
2600 return;
2603 stl_p(vdev->config + addr, val);
2605 if (k->set_config) {
2606 k->set_config(vdev, vdev->config);
2610 uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr)
2612 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2613 uint8_t val;
2615 if (addr + sizeof(val) > vdev->config_len) {
2616 return (uint32_t)-1;
2619 k->get_config(vdev, vdev->config);
2621 val = ldub_p(vdev->config + addr);
2622 return val;
2625 uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr)
2627 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2628 uint16_t val;
2630 if (addr + sizeof(val) > vdev->config_len) {
2631 return (uint32_t)-1;
2634 k->get_config(vdev, vdev->config);
2636 val = lduw_le_p(vdev->config + addr);
2637 return val;
2640 uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr)
2642 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2643 uint32_t val;
2645 if (addr + sizeof(val) > vdev->config_len) {
2646 return (uint32_t)-1;
2649 k->get_config(vdev, vdev->config);
2651 val = ldl_le_p(vdev->config + addr);
2652 return val;
2655 void virtio_config_modern_writeb(VirtIODevice *vdev,
2656 uint32_t addr, uint32_t data)
2658 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2659 uint8_t val = data;
2661 if (addr + sizeof(val) > vdev->config_len) {
2662 return;
2665 stb_p(vdev->config + addr, val);
2667 if (k->set_config) {
2668 k->set_config(vdev, vdev->config);
2672 void virtio_config_modern_writew(VirtIODevice *vdev,
2673 uint32_t addr, uint32_t data)
2675 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2676 uint16_t val = data;
2678 if (addr + sizeof(val) > vdev->config_len) {
2679 return;
2682 stw_le_p(vdev->config + addr, val);
2684 if (k->set_config) {
2685 k->set_config(vdev, vdev->config);
2689 void virtio_config_modern_writel(VirtIODevice *vdev,
2690 uint32_t addr, uint32_t data)
2692 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2693 uint32_t val = data;
2695 if (addr + sizeof(val) > vdev->config_len) {
2696 return;
2699 stl_le_p(vdev->config + addr, val);
2701 if (k->set_config) {
2702 k->set_config(vdev, vdev->config);
2706 void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
2708 if (!vdev->vq[n].vring.num) {
2709 return;
2711 vdev->vq[n].vring.desc = addr;
2712 virtio_queue_update_rings(vdev, n);
2715 hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
2717 return vdev->vq[n].vring.desc;
2720 void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
2721 hwaddr avail, hwaddr used)
2723 if (!vdev->vq[n].vring.num) {
2724 return;
2726 vdev->vq[n].vring.desc = desc;
2727 vdev->vq[n].vring.avail = avail;
2728 vdev->vq[n].vring.used = used;
2729 virtio_init_region_cache(vdev, n);
2732 void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
2734 /* Don't allow guest to flip queue between existent and
2735 * nonexistent states, or to set it to an invalid size.
2737 if (!!num != !!vdev->vq[n].vring.num ||
2738 num > VIRTQUEUE_MAX_SIZE ||
2739 num < 0) {
2740 return;
2742 vdev->vq[n].vring.num = num;
2745 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
2747 return QLIST_FIRST(&vdev->vector_queues[vector]);
2750 VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
2752 return QLIST_NEXT(vq, node);
2755 int virtio_queue_get_num(VirtIODevice *vdev, int n)
2757 return vdev->vq[n].vring.num;
2760 int virtio_queue_get_max_num(VirtIODevice *vdev, int n)
2762 return vdev->vq[n].vring.num_default;
2765 int virtio_get_num_queues(VirtIODevice *vdev)
2767 int i;
2769 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2770 if (!virtio_queue_get_num(vdev, i)) {
2771 break;
2775 return i;
2778 void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
2780 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2781 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2783 /* virtio-1 compliant devices cannot change the alignment */
2784 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2785 error_report("tried to modify queue alignment for virtio-1 device");
2786 return;
2788 /* Check that the transport told us it was going to do this
2789 * (so a buggy transport will immediately assert rather than
2790 * silently failing to migrate this state)
2792 assert(k->has_variable_vring_alignment);
2794 if (align) {
2795 vdev->vq[n].vring.align = align;
2796 virtio_queue_update_rings(vdev, n);
2800 static void virtio_queue_notify_vq(VirtQueue *vq)
2802 if (vq->vring.desc && vq->handle_output) {
2803 VirtIODevice *vdev = vq->vdev;
2805 if (unlikely(vdev->broken)) {
2806 return;
2809 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2810 vq->handle_output(vdev, vq);
2812 if (unlikely(vdev->start_on_kick)) {
2813 virtio_set_started(vdev, true);
2818 void virtio_queue_notify(VirtIODevice *vdev, int n)
2820 VirtQueue *vq = &vdev->vq[n];
2822 if (unlikely(!vq->vring.desc || vdev->broken)) {
2823 return;
2826 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2827 if (vq->host_notifier_enabled) {
2828 event_notifier_set(&vq->host_notifier);
2829 } else if (vq->handle_output) {
2830 vq->handle_output(vdev, vq);
2832 if (unlikely(vdev->start_on_kick)) {
2833 virtio_set_started(vdev, true);
2838 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
2840 return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
2841 VIRTIO_NO_VECTOR;
2844 void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
2846 VirtQueue *vq = &vdev->vq[n];
2848 if (n < VIRTIO_QUEUE_MAX) {
2849 if (vdev->vector_queues &&
2850 vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
2851 QLIST_REMOVE(vq, node);
2853 vdev->vq[n].vector = vector;
2854 if (vdev->vector_queues &&
2855 vector != VIRTIO_NO_VECTOR) {
2856 QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
2861 VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
2862 VirtIOHandleOutput handle_output)
2864 int i;
2866 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2867 if (vdev->vq[i].vring.num == 0)
2868 break;
2871 if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
2872 abort();
2874 vdev->vq[i].vring.num = queue_size;
2875 vdev->vq[i].vring.num_default = queue_size;
2876 vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
2877 vdev->vq[i].handle_output = handle_output;
2878 vdev->vq[i].used_elems = g_new0(VirtQueueElement, queue_size);
2880 return &vdev->vq[i];
2883 void virtio_delete_queue(VirtQueue *vq)
2885 vq->vring.num = 0;
2886 vq->vring.num_default = 0;
2887 vq->handle_output = NULL;
2888 g_free(vq->used_elems);
2889 vq->used_elems = NULL;
2890 virtio_virtqueue_reset_region_cache(vq);
2893 void virtio_del_queue(VirtIODevice *vdev, int n)
2895 if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
2896 abort();
2899 virtio_delete_queue(&vdev->vq[n]);
2902 static void virtio_set_isr(VirtIODevice *vdev, int value)
2904 uint8_t old = qatomic_read(&vdev->isr);
2906 /* Do not write ISR if it does not change, so that its cacheline remains
2907 * shared in the common case where the guest does not read it.
2909 if ((old & value) != value) {
2910 qatomic_or(&vdev->isr, value);
2914 /* Called within rcu_read_lock(). */
2915 static bool virtio_split_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2917 uint16_t old, new;
2918 bool v;
2919 /* We need to expose used array entries before checking used event. */
2920 smp_mb();
2921 /* Always notify when queue is empty (when feature acknowledge) */
2922 if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
2923 !vq->inuse && virtio_queue_empty(vq)) {
2924 return true;
2927 if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
2928 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
2931 v = vq->signalled_used_valid;
2932 vq->signalled_used_valid = true;
2933 old = vq->signalled_used;
2934 new = vq->signalled_used = vq->used_idx;
2935 return !v || vring_need_event(vring_get_used_event(vq), new, old);
2938 static bool vring_packed_need_event(VirtQueue *vq, bool wrap,
2939 uint16_t off_wrap, uint16_t new,
2940 uint16_t old)
2942 int off = off_wrap & ~(1 << 15);
2944 if (wrap != off_wrap >> 15) {
2945 off -= vq->vring.num;
2948 return vring_need_event(off, new, old);
2951 /* Called within rcu_read_lock(). */
2952 static bool virtio_packed_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2954 VRingPackedDescEvent e;
2955 uint16_t old, new;
2956 bool v;
2957 VRingMemoryRegionCaches *caches;
2959 caches = vring_get_region_caches(vq);
2960 if (!caches) {
2961 return false;
2964 vring_packed_event_read(vdev, &caches->avail, &e);
2966 old = vq->signalled_used;
2967 new = vq->signalled_used = vq->used_idx;
2968 v = vq->signalled_used_valid;
2969 vq->signalled_used_valid = true;
2971 if (e.flags == VRING_PACKED_EVENT_FLAG_DISABLE) {
2972 return false;
2973 } else if (e.flags == VRING_PACKED_EVENT_FLAG_ENABLE) {
2974 return true;
2977 return !v || vring_packed_need_event(vq, vq->used_wrap_counter,
2978 e.off_wrap, new, old);
2981 /* Called within rcu_read_lock(). */
2982 static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2984 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
2985 return virtio_packed_should_notify(vdev, vq);
2986 } else {
2987 return virtio_split_should_notify(vdev, vq);
2991 void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
2993 WITH_RCU_READ_LOCK_GUARD() {
2994 if (!virtio_should_notify(vdev, vq)) {
2995 return;
2999 trace_virtio_notify_irqfd(vdev, vq);
3002 * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
3003 * windows drivers included in virtio-win 1.8.0 (circa 2015) are
3004 * incorrectly polling this bit during crashdump and hibernation
3005 * in MSI mode, causing a hang if this bit is never updated.
3006 * Recent releases of Windows do not really shut down, but rather
3007 * log out and hibernate to make the next startup faster. Hence,
3008 * this manifested as a more serious hang during shutdown with
3010 * Next driver release from 2016 fixed this problem, so working around it
3011 * is not a must, but it's easy to do so let's do it here.
3013 * Note: it's safe to update ISR from any thread as it was switched
3014 * to an atomic operation.
3016 virtio_set_isr(vq->vdev, 0x1);
3017 event_notifier_set(&vq->guest_notifier);
3020 static void virtio_irq(VirtQueue *vq)
3022 virtio_set_isr(vq->vdev, 0x1);
3023 virtio_notify_vector(vq->vdev, vq->vector);
3026 void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
3028 WITH_RCU_READ_LOCK_GUARD() {
3029 if (!virtio_should_notify(vdev, vq)) {
3030 return;
3034 trace_virtio_notify(vdev, vq);
3035 virtio_irq(vq);
3038 void virtio_notify_config(VirtIODevice *vdev)
3040 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
3041 return;
3043 virtio_set_isr(vdev, 0x3);
3044 vdev->generation++;
3045 virtio_notify_vector(vdev, vdev->config_vector);
3048 static bool virtio_device_endian_needed(void *opaque)
3050 VirtIODevice *vdev = opaque;
3052 assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
3053 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3054 return vdev->device_endian != virtio_default_endian();
3056 /* Devices conforming to VIRTIO 1.0 or later are always LE. */
3057 return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
3060 static bool virtio_64bit_features_needed(void *opaque)
3062 VirtIODevice *vdev = opaque;
3064 return (vdev->host_features >> 32) != 0;
3067 static bool virtio_virtqueue_needed(void *opaque)
3069 VirtIODevice *vdev = opaque;
3071 return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
3074 static bool virtio_packed_virtqueue_needed(void *opaque)
3076 VirtIODevice *vdev = opaque;
3078 return virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED);
3081 static bool virtio_ringsize_needed(void *opaque)
3083 VirtIODevice *vdev = opaque;
3084 int i;
3086 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3087 if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
3088 return true;
3091 return false;
3094 static bool virtio_extra_state_needed(void *opaque)
3096 VirtIODevice *vdev = opaque;
3097 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3098 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3100 return k->has_extra_state &&
3101 k->has_extra_state(qbus->parent);
3104 static bool virtio_broken_needed(void *opaque)
3106 VirtIODevice *vdev = opaque;
3108 return vdev->broken;
3111 static bool virtio_started_needed(void *opaque)
3113 VirtIODevice *vdev = opaque;
3115 return vdev->started;
3118 static bool virtio_disabled_needed(void *opaque)
3120 VirtIODevice *vdev = opaque;
3122 return vdev->disabled;
3125 static const VMStateDescription vmstate_virtqueue = {
3126 .name = "virtqueue_state",
3127 .version_id = 1,
3128 .minimum_version_id = 1,
3129 .fields = (VMStateField[]) {
3130 VMSTATE_UINT64(vring.avail, struct VirtQueue),
3131 VMSTATE_UINT64(vring.used, struct VirtQueue),
3132 VMSTATE_END_OF_LIST()
3136 static const VMStateDescription vmstate_packed_virtqueue = {
3137 .name = "packed_virtqueue_state",
3138 .version_id = 1,
3139 .minimum_version_id = 1,
3140 .fields = (VMStateField[]) {
3141 VMSTATE_UINT16(last_avail_idx, struct VirtQueue),
3142 VMSTATE_BOOL(last_avail_wrap_counter, struct VirtQueue),
3143 VMSTATE_UINT16(used_idx, struct VirtQueue),
3144 VMSTATE_BOOL(used_wrap_counter, struct VirtQueue),
3145 VMSTATE_UINT32(inuse, struct VirtQueue),
3146 VMSTATE_END_OF_LIST()
3150 static const VMStateDescription vmstate_virtio_virtqueues = {
3151 .name = "virtio/virtqueues",
3152 .version_id = 1,
3153 .minimum_version_id = 1,
3154 .needed = &virtio_virtqueue_needed,
3155 .fields = (VMStateField[]) {
3156 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
3157 VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
3158 VMSTATE_END_OF_LIST()
3162 static const VMStateDescription vmstate_virtio_packed_virtqueues = {
3163 .name = "virtio/packed_virtqueues",
3164 .version_id = 1,
3165 .minimum_version_id = 1,
3166 .needed = &virtio_packed_virtqueue_needed,
3167 .fields = (VMStateField[]) {
3168 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
3169 VIRTIO_QUEUE_MAX, 0, vmstate_packed_virtqueue, VirtQueue),
3170 VMSTATE_END_OF_LIST()
3174 static const VMStateDescription vmstate_ringsize = {
3175 .name = "ringsize_state",
3176 .version_id = 1,
3177 .minimum_version_id = 1,
3178 .fields = (VMStateField[]) {
3179 VMSTATE_UINT32(vring.num_default, struct VirtQueue),
3180 VMSTATE_END_OF_LIST()
3184 static const VMStateDescription vmstate_virtio_ringsize = {
3185 .name = "virtio/ringsize",
3186 .version_id = 1,
3187 .minimum_version_id = 1,
3188 .needed = &virtio_ringsize_needed,
3189 .fields = (VMStateField[]) {
3190 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
3191 VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
3192 VMSTATE_END_OF_LIST()
3196 static int get_extra_state(QEMUFile *f, void *pv, size_t size,
3197 const VMStateField *field)
3199 VirtIODevice *vdev = pv;
3200 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3201 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3203 if (!k->load_extra_state) {
3204 return -1;
3205 } else {
3206 return k->load_extra_state(qbus->parent, f);
3210 static int put_extra_state(QEMUFile *f, void *pv, size_t size,
3211 const VMStateField *field, JSONWriter *vmdesc)
3213 VirtIODevice *vdev = pv;
3214 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3215 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3217 k->save_extra_state(qbus->parent, f);
3218 return 0;
3221 static const VMStateInfo vmstate_info_extra_state = {
3222 .name = "virtqueue_extra_state",
3223 .get = get_extra_state,
3224 .put = put_extra_state,
3227 static const VMStateDescription vmstate_virtio_extra_state = {
3228 .name = "virtio/extra_state",
3229 .version_id = 1,
3230 .minimum_version_id = 1,
3231 .needed = &virtio_extra_state_needed,
3232 .fields = (VMStateField[]) {
3234 .name = "extra_state",
3235 .version_id = 0,
3236 .field_exists = NULL,
3237 .size = 0,
3238 .info = &vmstate_info_extra_state,
3239 .flags = VMS_SINGLE,
3240 .offset = 0,
3242 VMSTATE_END_OF_LIST()
3246 static const VMStateDescription vmstate_virtio_device_endian = {
3247 .name = "virtio/device_endian",
3248 .version_id = 1,
3249 .minimum_version_id = 1,
3250 .needed = &virtio_device_endian_needed,
3251 .fields = (VMStateField[]) {
3252 VMSTATE_UINT8(device_endian, VirtIODevice),
3253 VMSTATE_END_OF_LIST()
3257 static const VMStateDescription vmstate_virtio_64bit_features = {
3258 .name = "virtio/64bit_features",
3259 .version_id = 1,
3260 .minimum_version_id = 1,
3261 .needed = &virtio_64bit_features_needed,
3262 .fields = (VMStateField[]) {
3263 VMSTATE_UINT64(guest_features, VirtIODevice),
3264 VMSTATE_END_OF_LIST()
3268 static const VMStateDescription vmstate_virtio_broken = {
3269 .name = "virtio/broken",
3270 .version_id = 1,
3271 .minimum_version_id = 1,
3272 .needed = &virtio_broken_needed,
3273 .fields = (VMStateField[]) {
3274 VMSTATE_BOOL(broken, VirtIODevice),
3275 VMSTATE_END_OF_LIST()
3279 static const VMStateDescription vmstate_virtio_started = {
3280 .name = "virtio/started",
3281 .version_id = 1,
3282 .minimum_version_id = 1,
3283 .needed = &virtio_started_needed,
3284 .fields = (VMStateField[]) {
3285 VMSTATE_BOOL(started, VirtIODevice),
3286 VMSTATE_END_OF_LIST()
3290 static const VMStateDescription vmstate_virtio_disabled = {
3291 .name = "virtio/disabled",
3292 .version_id = 1,
3293 .minimum_version_id = 1,
3294 .needed = &virtio_disabled_needed,
3295 .fields = (VMStateField[]) {
3296 VMSTATE_BOOL(disabled, VirtIODevice),
3297 VMSTATE_END_OF_LIST()
3301 static const VMStateDescription vmstate_virtio = {
3302 .name = "virtio",
3303 .version_id = 1,
3304 .minimum_version_id = 1,
3305 .fields = (VMStateField[]) {
3306 VMSTATE_END_OF_LIST()
3308 .subsections = (const VMStateDescription*[]) {
3309 &vmstate_virtio_device_endian,
3310 &vmstate_virtio_64bit_features,
3311 &vmstate_virtio_virtqueues,
3312 &vmstate_virtio_ringsize,
3313 &vmstate_virtio_broken,
3314 &vmstate_virtio_extra_state,
3315 &vmstate_virtio_started,
3316 &vmstate_virtio_packed_virtqueues,
3317 &vmstate_virtio_disabled,
3318 NULL
3322 int virtio_save(VirtIODevice *vdev, QEMUFile *f)
3324 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3325 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3326 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
3327 uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
3328 int i;
3330 if (k->save_config) {
3331 k->save_config(qbus->parent, f);
3334 qemu_put_8s(f, &vdev->status);
3335 qemu_put_8s(f, &vdev->isr);
3336 qemu_put_be16s(f, &vdev->queue_sel);
3337 qemu_put_be32s(f, &guest_features_lo);
3338 qemu_put_be32(f, vdev->config_len);
3339 qemu_put_buffer(f, vdev->config, vdev->config_len);
3341 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3342 if (vdev->vq[i].vring.num == 0)
3343 break;
3346 qemu_put_be32(f, i);
3348 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3349 if (vdev->vq[i].vring.num == 0)
3350 break;
3352 qemu_put_be32(f, vdev->vq[i].vring.num);
3353 if (k->has_variable_vring_alignment) {
3354 qemu_put_be32(f, vdev->vq[i].vring.align);
3357 * Save desc now, the rest of the ring addresses are saved in
3358 * subsections for VIRTIO-1 devices.
3360 qemu_put_be64(f, vdev->vq[i].vring.desc);
3361 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
3362 if (k->save_queue) {
3363 k->save_queue(qbus->parent, i, f);
3367 if (vdc->save != NULL) {
3368 vdc->save(vdev, f);
3371 if (vdc->vmsd) {
3372 int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL);
3373 if (ret) {
3374 return ret;
3378 /* Subsections */
3379 return vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
3382 /* A wrapper for use as a VMState .put function */
3383 static int virtio_device_put(QEMUFile *f, void *opaque, size_t size,
3384 const VMStateField *field, JSONWriter *vmdesc)
3386 return virtio_save(VIRTIO_DEVICE(opaque), f);
3389 /* A wrapper for use as a VMState .get function */
3390 static int virtio_device_get(QEMUFile *f, void *opaque, size_t size,
3391 const VMStateField *field)
3393 VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
3394 DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev));
3396 return virtio_load(vdev, f, dc->vmsd->version_id);
3399 const VMStateInfo virtio_vmstate_info = {
3400 .name = "virtio",
3401 .get = virtio_device_get,
3402 .put = virtio_device_put,
3405 static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
3407 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
3408 bool bad = (val & ~(vdev->host_features)) != 0;
3410 val &= vdev->host_features;
3411 if (k->set_features) {
3412 k->set_features(vdev, val);
3414 vdev->guest_features = val;
3415 return bad ? -1 : 0;
3418 int virtio_set_features(VirtIODevice *vdev, uint64_t val)
3420 int ret;
3422 * The driver must not attempt to set features after feature negotiation
3423 * has finished.
3425 if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
3426 return -EINVAL;
3429 if (val & (1ull << VIRTIO_F_BAD_FEATURE)) {
3430 qemu_log_mask(LOG_GUEST_ERROR,
3431 "%s: guest driver for %s has enabled UNUSED(30) feature bit!\n",
3432 __func__, vdev->name);
3435 ret = virtio_set_features_nocheck(vdev, val);
3436 if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
3437 /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches. */
3438 int i;
3439 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3440 if (vdev->vq[i].vring.num != 0) {
3441 virtio_init_region_cache(vdev, i);
3445 if (!ret) {
3446 if (!virtio_device_started(vdev, vdev->status) &&
3447 !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3448 vdev->start_on_kick = true;
3451 return ret;
3454 size_t virtio_get_config_size(const VirtIOConfigSizeParams *params,
3455 uint64_t host_features)
3457 size_t config_size = params->min_size;
3458 const VirtIOFeature *feature_sizes = params->feature_sizes;
3459 size_t i;
3461 for (i = 0; feature_sizes[i].flags != 0; i++) {
3462 if (host_features & feature_sizes[i].flags) {
3463 config_size = MAX(feature_sizes[i].end, config_size);
3467 assert(config_size <= params->max_size);
3468 return config_size;
3471 int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
3473 int i, ret;
3474 int32_t config_len;
3475 uint32_t num;
3476 uint32_t features;
3477 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3478 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3479 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
3482 * We poison the endianness to ensure it does not get used before
3483 * subsections have been loaded.
3485 vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
3487 if (k->load_config) {
3488 ret = k->load_config(qbus->parent, f);
3489 if (ret)
3490 return ret;
3493 qemu_get_8s(f, &vdev->status);
3494 qemu_get_8s(f, &vdev->isr);
3495 qemu_get_be16s(f, &vdev->queue_sel);
3496 if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
3497 return -1;
3499 qemu_get_be32s(f, &features);
3502 * Temporarily set guest_features low bits - needed by
3503 * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
3504 * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
3506 * Note: devices should always test host features in future - don't create
3507 * new dependencies like this.
3509 vdev->guest_features = features;
3511 config_len = qemu_get_be32(f);
3514 * There are cases where the incoming config can be bigger or smaller
3515 * than what we have; so load what we have space for, and skip
3516 * any excess that's in the stream.
3518 qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
3520 while (config_len > vdev->config_len) {
3521 qemu_get_byte(f);
3522 config_len--;
3525 num = qemu_get_be32(f);
3527 if (num > VIRTIO_QUEUE_MAX) {
3528 error_report("Invalid number of virtqueues: 0x%x", num);
3529 return -1;
3532 for (i = 0; i < num; i++) {
3533 vdev->vq[i].vring.num = qemu_get_be32(f);
3534 if (k->has_variable_vring_alignment) {
3535 vdev->vq[i].vring.align = qemu_get_be32(f);
3537 vdev->vq[i].vring.desc = qemu_get_be64(f);
3538 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
3539 vdev->vq[i].signalled_used_valid = false;
3540 vdev->vq[i].notification = true;
3542 if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) {
3543 error_report("VQ %d address 0x0 "
3544 "inconsistent with Host index 0x%x",
3545 i, vdev->vq[i].last_avail_idx);
3546 return -1;
3548 if (k->load_queue) {
3549 ret = k->load_queue(qbus->parent, i, f);
3550 if (ret)
3551 return ret;
3555 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
3557 if (vdc->load != NULL) {
3558 ret = vdc->load(vdev, f, version_id);
3559 if (ret) {
3560 return ret;
3564 if (vdc->vmsd) {
3565 ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id);
3566 if (ret) {
3567 return ret;
3571 /* Subsections */
3572 ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
3573 if (ret) {
3574 return ret;
3577 if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
3578 vdev->device_endian = virtio_default_endian();
3581 if (virtio_64bit_features_needed(vdev)) {
3583 * Subsection load filled vdev->guest_features. Run them
3584 * through virtio_set_features to sanity-check them against
3585 * host_features.
3587 uint64_t features64 = vdev->guest_features;
3588 if (virtio_set_features_nocheck(vdev, features64) < 0) {
3589 error_report("Features 0x%" PRIx64 " unsupported. "
3590 "Allowed features: 0x%" PRIx64,
3591 features64, vdev->host_features);
3592 return -1;
3594 } else {
3595 if (virtio_set_features_nocheck(vdev, features) < 0) {
3596 error_report("Features 0x%x unsupported. "
3597 "Allowed features: 0x%" PRIx64,
3598 features, vdev->host_features);
3599 return -1;
3603 if (!virtio_device_started(vdev, vdev->status) &&
3604 !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3605 vdev->start_on_kick = true;
3608 RCU_READ_LOCK_GUARD();
3609 for (i = 0; i < num; i++) {
3610 if (vdev->vq[i].vring.desc) {
3611 uint16_t nheads;
3614 * VIRTIO-1 devices migrate desc, used, and avail ring addresses so
3615 * only the region cache needs to be set up. Legacy devices need
3616 * to calculate used and avail ring addresses based on the desc
3617 * address.
3619 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3620 virtio_init_region_cache(vdev, i);
3621 } else {
3622 virtio_queue_update_rings(vdev, i);
3625 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3626 vdev->vq[i].shadow_avail_idx = vdev->vq[i].last_avail_idx;
3627 vdev->vq[i].shadow_avail_wrap_counter =
3628 vdev->vq[i].last_avail_wrap_counter;
3629 continue;
3632 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
3633 /* Check it isn't doing strange things with descriptor numbers. */
3634 if (nheads > vdev->vq[i].vring.num) {
3635 virtio_error(vdev, "VQ %d size 0x%x Guest index 0x%x "
3636 "inconsistent with Host index 0x%x: delta 0x%x",
3637 i, vdev->vq[i].vring.num,
3638 vring_avail_idx(&vdev->vq[i]),
3639 vdev->vq[i].last_avail_idx, nheads);
3640 vdev->vq[i].used_idx = 0;
3641 vdev->vq[i].shadow_avail_idx = 0;
3642 vdev->vq[i].inuse = 0;
3643 continue;
3645 vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
3646 vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
3649 * Some devices migrate VirtQueueElements that have been popped
3650 * from the avail ring but not yet returned to the used ring.
3651 * Since max ring size < UINT16_MAX it's safe to use modulo
3652 * UINT16_MAX + 1 subtraction.
3654 vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
3655 vdev->vq[i].used_idx);
3656 if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
3657 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
3658 "used_idx 0x%x",
3659 i, vdev->vq[i].vring.num,
3660 vdev->vq[i].last_avail_idx,
3661 vdev->vq[i].used_idx);
3662 return -1;
3667 if (vdc->post_load) {
3668 ret = vdc->post_load(vdev);
3669 if (ret) {
3670 return ret;
3674 return 0;
3677 void virtio_cleanup(VirtIODevice *vdev)
3679 qemu_del_vm_change_state_handler(vdev->vmstate);
3682 static void virtio_vmstate_change(void *opaque, bool running, RunState state)
3684 VirtIODevice *vdev = opaque;
3685 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3686 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3687 bool backend_run = running && virtio_device_started(vdev, vdev->status);
3688 vdev->vm_running = running;
3690 if (backend_run) {
3691 virtio_set_status(vdev, vdev->status);
3694 if (k->vmstate_change) {
3695 k->vmstate_change(qbus->parent, backend_run);
3698 if (!backend_run) {
3699 virtio_set_status(vdev, vdev->status);
3703 void virtio_instance_init_common(Object *proxy_obj, void *data,
3704 size_t vdev_size, const char *vdev_name)
3706 DeviceState *vdev = data;
3708 object_initialize_child_with_props(proxy_obj, "virtio-backend", vdev,
3709 vdev_size, vdev_name, &error_abort,
3710 NULL);
3711 qdev_alias_all_properties(vdev, proxy_obj);
3714 void virtio_init(VirtIODevice *vdev, uint16_t device_id, size_t config_size)
3716 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3717 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3718 int i;
3719 int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
3721 if (nvectors) {
3722 vdev->vector_queues =
3723 g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
3726 vdev->start_on_kick = false;
3727 vdev->started = false;
3728 vdev->vhost_started = false;
3729 vdev->device_id = device_id;
3730 vdev->status = 0;
3731 qatomic_set(&vdev->isr, 0);
3732 vdev->queue_sel = 0;
3733 vdev->config_vector = VIRTIO_NO_VECTOR;
3734 vdev->vq = g_new0(VirtQueue, VIRTIO_QUEUE_MAX);
3735 vdev->vm_running = runstate_is_running();
3736 vdev->broken = false;
3737 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3738 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
3739 vdev->vq[i].vdev = vdev;
3740 vdev->vq[i].queue_index = i;
3741 vdev->vq[i].host_notifier_enabled = false;
3744 vdev->name = virtio_id_to_name(device_id);
3745 vdev->config_len = config_size;
3746 if (vdev->config_len) {
3747 vdev->config = g_malloc0(config_size);
3748 } else {
3749 vdev->config = NULL;
3751 vdev->vmstate = qdev_add_vm_change_state_handler(DEVICE(vdev),
3752 virtio_vmstate_change, vdev);
3753 vdev->device_endian = virtio_default_endian();
3754 vdev->use_guest_notifier_mask = true;
3758 * Only devices that have already been around prior to defining the virtio
3759 * standard support legacy mode; this includes devices not specified in the
3760 * standard. All newer devices conform to the virtio standard only.
3762 bool virtio_legacy_allowed(VirtIODevice *vdev)
3764 switch (vdev->device_id) {
3765 case VIRTIO_ID_NET:
3766 case VIRTIO_ID_BLOCK:
3767 case VIRTIO_ID_CONSOLE:
3768 case VIRTIO_ID_RNG:
3769 case VIRTIO_ID_BALLOON:
3770 case VIRTIO_ID_RPMSG:
3771 case VIRTIO_ID_SCSI:
3772 case VIRTIO_ID_9P:
3773 case VIRTIO_ID_RPROC_SERIAL:
3774 case VIRTIO_ID_CAIF:
3775 return true;
3776 default:
3777 return false;
3781 bool virtio_legacy_check_disabled(VirtIODevice *vdev)
3783 return vdev->disable_legacy_check;
3786 hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
3788 return vdev->vq[n].vring.desc;
3791 bool virtio_queue_enabled_legacy(VirtIODevice *vdev, int n)
3793 return virtio_queue_get_desc_addr(vdev, n) != 0;
3796 bool virtio_queue_enabled(VirtIODevice *vdev, int n)
3798 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3799 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3801 if (k->queue_enabled) {
3802 return k->queue_enabled(qbus->parent, n);
3804 return virtio_queue_enabled_legacy(vdev, n);
3807 hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
3809 return vdev->vq[n].vring.avail;
3812 hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
3814 return vdev->vq[n].vring.used;
3817 hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
3819 return sizeof(VRingDesc) * vdev->vq[n].vring.num;
3822 hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
3824 int s;
3826 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3827 return sizeof(struct VRingPackedDescEvent);
3830 s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
3831 return offsetof(VRingAvail, ring) +
3832 sizeof(uint16_t) * vdev->vq[n].vring.num + s;
3835 hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
3837 int s;
3839 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3840 return sizeof(struct VRingPackedDescEvent);
3843 s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
3844 return offsetof(VRingUsed, ring) +
3845 sizeof(VRingUsedElem) * vdev->vq[n].vring.num + s;
3848 static unsigned int virtio_queue_packed_get_last_avail_idx(VirtIODevice *vdev,
3849 int n)
3851 unsigned int avail, used;
3853 avail = vdev->vq[n].last_avail_idx;
3854 avail |= ((uint16_t)vdev->vq[n].last_avail_wrap_counter) << 15;
3856 used = vdev->vq[n].used_idx;
3857 used |= ((uint16_t)vdev->vq[n].used_wrap_counter) << 15;
3859 return avail | used << 16;
3862 static uint16_t virtio_queue_split_get_last_avail_idx(VirtIODevice *vdev,
3863 int n)
3865 return vdev->vq[n].last_avail_idx;
3868 unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
3870 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3871 return virtio_queue_packed_get_last_avail_idx(vdev, n);
3872 } else {
3873 return virtio_queue_split_get_last_avail_idx(vdev, n);
3877 static void virtio_queue_packed_set_last_avail_idx(VirtIODevice *vdev,
3878 int n, unsigned int idx)
3880 struct VirtQueue *vq = &vdev->vq[n];
3882 vq->last_avail_idx = vq->shadow_avail_idx = idx & 0x7fff;
3883 vq->last_avail_wrap_counter =
3884 vq->shadow_avail_wrap_counter = !!(idx & 0x8000);
3885 idx >>= 16;
3886 vq->used_idx = idx & 0x7ffff;
3887 vq->used_wrap_counter = !!(idx & 0x8000);
3890 static void virtio_queue_split_set_last_avail_idx(VirtIODevice *vdev,
3891 int n, unsigned int idx)
3893 vdev->vq[n].last_avail_idx = idx;
3894 vdev->vq[n].shadow_avail_idx = idx;
3897 void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n,
3898 unsigned int idx)
3900 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3901 virtio_queue_packed_set_last_avail_idx(vdev, n, idx);
3902 } else {
3903 virtio_queue_split_set_last_avail_idx(vdev, n, idx);
3907 static void virtio_queue_packed_restore_last_avail_idx(VirtIODevice *vdev,
3908 int n)
3910 /* We don't have a reference like avail idx in shared memory */
3911 return;
3914 static void virtio_queue_split_restore_last_avail_idx(VirtIODevice *vdev,
3915 int n)
3917 RCU_READ_LOCK_GUARD();
3918 if (vdev->vq[n].vring.desc) {
3919 vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]);
3920 vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx;
3924 void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n)
3926 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3927 virtio_queue_packed_restore_last_avail_idx(vdev, n);
3928 } else {
3929 virtio_queue_split_restore_last_avail_idx(vdev, n);
3933 static void virtio_queue_packed_update_used_idx(VirtIODevice *vdev, int n)
3935 /* used idx was updated through set_last_avail_idx() */
3936 return;
3939 static void virtio_split_packed_update_used_idx(VirtIODevice *vdev, int n)
3941 RCU_READ_LOCK_GUARD();
3942 if (vdev->vq[n].vring.desc) {
3943 vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
3947 void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
3949 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3950 return virtio_queue_packed_update_used_idx(vdev, n);
3951 } else {
3952 return virtio_split_packed_update_used_idx(vdev, n);
3956 void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
3958 vdev->vq[n].signalled_used_valid = false;
3961 VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
3963 return vdev->vq + n;
3966 uint16_t virtio_get_queue_index(VirtQueue *vq)
3968 return vq->queue_index;
3971 static void virtio_queue_guest_notifier_read(EventNotifier *n)
3973 VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
3974 if (event_notifier_test_and_clear(n)) {
3975 virtio_irq(vq);
3979 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
3980 bool with_irqfd)
3982 if (assign && !with_irqfd) {
3983 event_notifier_set_handler(&vq->guest_notifier,
3984 virtio_queue_guest_notifier_read);
3985 } else {
3986 event_notifier_set_handler(&vq->guest_notifier, NULL);
3988 if (!assign) {
3989 /* Test and clear notifier before closing it,
3990 * in case poll callback didn't have time to run. */
3991 virtio_queue_guest_notifier_read(&vq->guest_notifier);
3995 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
3997 return &vq->guest_notifier;
4000 static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n)
4002 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
4004 virtio_queue_set_notification(vq, 0);
4007 static bool virtio_queue_host_notifier_aio_poll(void *opaque)
4009 EventNotifier *n = opaque;
4010 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
4012 return vq->vring.desc && !virtio_queue_empty(vq);
4015 static void virtio_queue_host_notifier_aio_poll_ready(EventNotifier *n)
4017 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
4019 virtio_queue_notify_vq(vq);
4022 static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
4024 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
4026 /* Caller polls once more after this to catch requests that race with us */
4027 virtio_queue_set_notification(vq, 1);
4030 void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx)
4032 aio_set_event_notifier(ctx, &vq->host_notifier, true,
4033 virtio_queue_host_notifier_read,
4034 virtio_queue_host_notifier_aio_poll,
4035 virtio_queue_host_notifier_aio_poll_ready);
4036 aio_set_event_notifier_poll(ctx, &vq->host_notifier,
4037 virtio_queue_host_notifier_aio_poll_begin,
4038 virtio_queue_host_notifier_aio_poll_end);
4042 * Same as virtio_queue_aio_attach_host_notifier() but without polling. Use
4043 * this for rx virtqueues and similar cases where the virtqueue handler
4044 * function does not pop all elements. When the virtqueue is left non-empty
4045 * polling consumes CPU cycles and should not be used.
4047 void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue *vq, AioContext *ctx)
4049 aio_set_event_notifier(ctx, &vq->host_notifier, true,
4050 virtio_queue_host_notifier_read,
4051 NULL, NULL);
4054 void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx)
4056 aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL, NULL);
4057 /* Test and clear notifier before after disabling event,
4058 * in case poll callback didn't have time to run. */
4059 virtio_queue_host_notifier_read(&vq->host_notifier);
4062 void virtio_queue_host_notifier_read(EventNotifier *n)
4064 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
4065 if (event_notifier_test_and_clear(n)) {
4066 virtio_queue_notify_vq(vq);
4070 EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
4072 return &vq->host_notifier;
4075 void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled)
4077 vq->host_notifier_enabled = enabled;
4080 int virtio_queue_set_host_notifier_mr(VirtIODevice *vdev, int n,
4081 MemoryRegion *mr, bool assign)
4083 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
4084 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
4086 if (k->set_host_notifier_mr) {
4087 return k->set_host_notifier_mr(qbus->parent, n, mr, assign);
4090 return -1;
4093 void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
4095 g_free(vdev->bus_name);
4096 vdev->bus_name = g_strdup(bus_name);
4099 void G_GNUC_PRINTF(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
4101 va_list ap;
4103 va_start(ap, fmt);
4104 error_vreport(fmt, ap);
4105 va_end(ap);
4107 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
4108 vdev->status = vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET;
4109 virtio_notify_config(vdev);
4112 vdev->broken = true;
4115 static void virtio_memory_listener_commit(MemoryListener *listener)
4117 VirtIODevice *vdev = container_of(listener, VirtIODevice, listener);
4118 int i;
4120 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
4121 if (vdev->vq[i].vring.num == 0) {
4122 break;
4124 virtio_init_region_cache(vdev, i);
4128 static void virtio_device_realize(DeviceState *dev, Error **errp)
4130 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
4131 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
4132 Error *err = NULL;
4134 /* Devices should either use vmsd or the load/save methods */
4135 assert(!vdc->vmsd || !vdc->load);
4137 if (vdc->realize != NULL) {
4138 vdc->realize(dev, &err);
4139 if (err != NULL) {
4140 error_propagate(errp, err);
4141 return;
4145 virtio_bus_device_plugged(vdev, &err);
4146 if (err != NULL) {
4147 error_propagate(errp, err);
4148 vdc->unrealize(dev);
4149 return;
4152 vdev->listener.commit = virtio_memory_listener_commit;
4153 vdev->listener.name = "virtio";
4154 memory_listener_register(&vdev->listener, vdev->dma_as);
4155 QTAILQ_INSERT_TAIL(&virtio_list, vdev, next);
4158 static void virtio_device_unrealize(DeviceState *dev)
4160 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
4161 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
4163 memory_listener_unregister(&vdev->listener);
4164 virtio_bus_device_unplugged(vdev);
4166 if (vdc->unrealize != NULL) {
4167 vdc->unrealize(dev);
4170 QTAILQ_REMOVE(&virtio_list, vdev, next);
4171 g_free(vdev->bus_name);
4172 vdev->bus_name = NULL;
4175 static void virtio_device_free_virtqueues(VirtIODevice *vdev)
4177 int i;
4178 if (!vdev->vq) {
4179 return;
4182 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
4183 if (vdev->vq[i].vring.num == 0) {
4184 break;
4186 virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
4188 g_free(vdev->vq);
4191 static void virtio_device_instance_finalize(Object *obj)
4193 VirtIODevice *vdev = VIRTIO_DEVICE(obj);
4195 virtio_device_free_virtqueues(vdev);
4197 g_free(vdev->config);
4198 g_free(vdev->vector_queues);
4201 static Property virtio_properties[] = {
4202 DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
4203 DEFINE_PROP_BOOL("use-started", VirtIODevice, use_started, true),
4204 DEFINE_PROP_BOOL("use-disabled-flag", VirtIODevice, use_disabled_flag, true),
4205 DEFINE_PROP_BOOL("x-disable-legacy-check", VirtIODevice,
4206 disable_legacy_check, false),
4207 DEFINE_PROP_END_OF_LIST(),
4210 static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
4212 VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
4213 int i, n, r, err;
4216 * Batch all the host notifiers in a single transaction to avoid
4217 * quadratic time complexity in address_space_update_ioeventfds().
4219 memory_region_transaction_begin();
4220 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
4221 VirtQueue *vq = &vdev->vq[n];
4222 if (!virtio_queue_get_num(vdev, n)) {
4223 continue;
4225 r = virtio_bus_set_host_notifier(qbus, n, true);
4226 if (r < 0) {
4227 err = r;
4228 goto assign_error;
4230 event_notifier_set_handler(&vq->host_notifier,
4231 virtio_queue_host_notifier_read);
4234 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
4235 /* Kick right away to begin processing requests already in vring */
4236 VirtQueue *vq = &vdev->vq[n];
4237 if (!vq->vring.num) {
4238 continue;
4240 event_notifier_set(&vq->host_notifier);
4242 memory_region_transaction_commit();
4243 return 0;
4245 assign_error:
4246 i = n; /* save n for a second iteration after transaction is committed. */
4247 while (--n >= 0) {
4248 VirtQueue *vq = &vdev->vq[n];
4249 if (!virtio_queue_get_num(vdev, n)) {
4250 continue;
4253 event_notifier_set_handler(&vq->host_notifier, NULL);
4254 r = virtio_bus_set_host_notifier(qbus, n, false);
4255 assert(r >= 0);
4258 * The transaction expects the ioeventfds to be open when it
4259 * commits. Do it now, before the cleanup loop.
4261 memory_region_transaction_commit();
4263 while (--i >= 0) {
4264 if (!virtio_queue_get_num(vdev, i)) {
4265 continue;
4267 virtio_bus_cleanup_host_notifier(qbus, i);
4269 return err;
4272 int virtio_device_start_ioeventfd(VirtIODevice *vdev)
4274 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
4275 VirtioBusState *vbus = VIRTIO_BUS(qbus);
4277 return virtio_bus_start_ioeventfd(vbus);
4280 static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev)
4282 VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
4283 int n, r;
4286 * Batch all the host notifiers in a single transaction to avoid
4287 * quadratic time complexity in address_space_update_ioeventfds().
4289 memory_region_transaction_begin();
4290 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
4291 VirtQueue *vq = &vdev->vq[n];
4293 if (!virtio_queue_get_num(vdev, n)) {
4294 continue;
4296 event_notifier_set_handler(&vq->host_notifier, NULL);
4297 r = virtio_bus_set_host_notifier(qbus, n, false);
4298 assert(r >= 0);
4301 * The transaction expects the ioeventfds to be open when it
4302 * commits. Do it now, before the cleanup loop.
4304 memory_region_transaction_commit();
4306 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
4307 if (!virtio_queue_get_num(vdev, n)) {
4308 continue;
4310 virtio_bus_cleanup_host_notifier(qbus, n);
4314 int virtio_device_grab_ioeventfd(VirtIODevice *vdev)
4316 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
4317 VirtioBusState *vbus = VIRTIO_BUS(qbus);
4319 return virtio_bus_grab_ioeventfd(vbus);
4322 void virtio_device_release_ioeventfd(VirtIODevice *vdev)
4324 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
4325 VirtioBusState *vbus = VIRTIO_BUS(qbus);
4327 virtio_bus_release_ioeventfd(vbus);
4330 static void virtio_device_class_init(ObjectClass *klass, void *data)
4332 /* Set the default value here. */
4333 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
4334 DeviceClass *dc = DEVICE_CLASS(klass);
4336 dc->realize = virtio_device_realize;
4337 dc->unrealize = virtio_device_unrealize;
4338 dc->bus_type = TYPE_VIRTIO_BUS;
4339 device_class_set_props(dc, virtio_properties);
4340 vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl;
4341 vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
4343 vdc->legacy_features |= VIRTIO_LEGACY_FEATURES;
4345 QTAILQ_INIT(&virtio_list);
4348 bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev)
4350 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
4351 VirtioBusState *vbus = VIRTIO_BUS(qbus);
4353 return virtio_bus_ioeventfd_enabled(vbus);
4356 VirtioInfoList *qmp_x_query_virtio(Error **errp)
4358 VirtioInfoList *list = NULL;
4359 VirtioInfoList *node;
4360 VirtIODevice *vdev;
4362 QTAILQ_FOREACH(vdev, &virtio_list, next) {
4363 DeviceState *dev = DEVICE(vdev);
4364 Error *err = NULL;
4365 QObject *obj = qmp_qom_get(dev->canonical_path, "realized", &err);
4367 if (err == NULL) {
4368 GString *is_realized = qobject_to_json_pretty(obj, true);
4369 /* virtio device is NOT realized, remove it from list */
4370 if (!strncmp(is_realized->str, "false", 4)) {
4371 QTAILQ_REMOVE(&virtio_list, vdev, next);
4372 } else {
4373 node = g_new0(VirtioInfoList, 1);
4374 node->value = g_new(VirtioInfo, 1);
4375 node->value->path = g_strdup(dev->canonical_path);
4376 node->value->name = g_strdup(vdev->name);
4377 QAPI_LIST_PREPEND(list, node->value);
4379 g_string_free(is_realized, true);
4381 qobject_unref(obj);
4384 return list;
4387 static VirtIODevice *virtio_device_find(const char *path)
4389 VirtIODevice *vdev;
4391 QTAILQ_FOREACH(vdev, &virtio_list, next) {
4392 DeviceState *dev = DEVICE(vdev);
4394 if (strcmp(dev->canonical_path, path) != 0) {
4395 continue;
4398 Error *err = NULL;
4399 QObject *obj = qmp_qom_get(dev->canonical_path, "realized", &err);
4400 if (err == NULL) {
4401 GString *is_realized = qobject_to_json_pretty(obj, true);
4402 /* virtio device is NOT realized, remove it from list */
4403 if (!strncmp(is_realized->str, "false", 4)) {
4404 g_string_free(is_realized, true);
4405 qobject_unref(obj);
4406 QTAILQ_REMOVE(&virtio_list, vdev, next);
4407 return NULL;
4409 g_string_free(is_realized, true);
4410 } else {
4411 /* virtio device doesn't exist in QOM tree */
4412 QTAILQ_REMOVE(&virtio_list, vdev, next);
4413 qobject_unref(obj);
4414 return NULL;
4416 /* device exists in QOM tree & is realized */
4417 qobject_unref(obj);
4418 return vdev;
4420 return NULL;
4423 #define CONVERT_FEATURES(type, map, is_status, bitmap) \
4424 ({ \
4425 type *list = NULL; \
4426 type *node; \
4427 for (i = 0; map[i].virtio_bit != -1; i++) { \
4428 if (is_status) { \
4429 bit = map[i].virtio_bit; \
4431 else { \
4432 bit = 1ULL << map[i].virtio_bit; \
4434 if ((bitmap & bit) == 0) { \
4435 continue; \
4437 node = g_new0(type, 1); \
4438 node->value = g_strdup(map[i].feature_desc); \
4439 node->next = list; \
4440 list = node; \
4441 bitmap ^= bit; \
4443 list; \
4446 static VirtioDeviceStatus *qmp_decode_status(uint8_t bitmap)
4448 VirtioDeviceStatus *status;
4449 uint8_t bit;
4450 int i;
4452 status = g_new0(VirtioDeviceStatus, 1);
4453 status->statuses = CONVERT_FEATURES(strList, virtio_config_status_map,
4454 1, bitmap);
4455 status->has_unknown_statuses = bitmap != 0;
4456 if (status->has_unknown_statuses) {
4457 status->unknown_statuses = bitmap;
4460 return status;
4463 static VhostDeviceProtocols *qmp_decode_protocols(uint64_t bitmap)
4465 VhostDeviceProtocols *vhu_protocols;
4466 uint64_t bit;
4467 int i;
4469 vhu_protocols = g_new0(VhostDeviceProtocols, 1);
4470 vhu_protocols->protocols =
4471 CONVERT_FEATURES(strList,
4472 vhost_user_protocol_map, 0, bitmap);
4473 vhu_protocols->has_unknown_protocols = bitmap != 0;
4474 if (vhu_protocols->has_unknown_protocols) {
4475 vhu_protocols->unknown_protocols = bitmap;
4478 return vhu_protocols;
4481 static VirtioDeviceFeatures *qmp_decode_features(uint16_t device_id,
4482 uint64_t bitmap)
4484 VirtioDeviceFeatures *features;
4485 uint64_t bit;
4486 int i;
4488 features = g_new0(VirtioDeviceFeatures, 1);
4489 features->has_dev_features = true;
4491 /* transport features */
4492 features->transports = CONVERT_FEATURES(strList, virtio_transport_map, 0,
4493 bitmap);
4495 /* device features */
4496 switch (device_id) {
4497 #ifdef CONFIG_VIRTIO_SERIAL
4498 case VIRTIO_ID_CONSOLE:
4499 features->dev_features =
4500 CONVERT_FEATURES(strList, virtio_serial_feature_map, 0, bitmap);
4501 break;
4502 #endif
4503 #ifdef CONFIG_VIRTIO_BLK
4504 case VIRTIO_ID_BLOCK:
4505 features->dev_features =
4506 CONVERT_FEATURES(strList, virtio_blk_feature_map, 0, bitmap);
4507 break;
4508 #endif
4509 #ifdef CONFIG_VIRTIO_GPU
4510 case VIRTIO_ID_GPU:
4511 features->dev_features =
4512 CONVERT_FEATURES(strList, virtio_gpu_feature_map, 0, bitmap);
4513 break;
4514 #endif
4515 #ifdef CONFIG_VIRTIO_NET
4516 case VIRTIO_ID_NET:
4517 features->dev_features =
4518 CONVERT_FEATURES(strList, virtio_net_feature_map, 0, bitmap);
4519 break;
4520 #endif
4521 #ifdef CONFIG_VIRTIO_SCSI
4522 case VIRTIO_ID_SCSI:
4523 features->dev_features =
4524 CONVERT_FEATURES(strList, virtio_scsi_feature_map, 0, bitmap);
4525 break;
4526 #endif
4527 #ifdef CONFIG_VIRTIO_BALLOON
4528 case VIRTIO_ID_BALLOON:
4529 features->dev_features =
4530 CONVERT_FEATURES(strList, virtio_balloon_feature_map, 0, bitmap);
4531 break;
4532 #endif
4533 #ifdef CONFIG_VIRTIO_IOMMU
4534 case VIRTIO_ID_IOMMU:
4535 features->dev_features =
4536 CONVERT_FEATURES(strList, virtio_iommu_feature_map, 0, bitmap);
4537 break;
4538 #endif
4539 #ifdef CONFIG_VIRTIO_INPUT
4540 case VIRTIO_ID_INPUT:
4541 features->dev_features =
4542 CONVERT_FEATURES(strList, virtio_input_feature_map, 0, bitmap);
4543 break;
4544 #endif
4545 #ifdef CONFIG_VHOST_USER_FS
4546 case VIRTIO_ID_FS:
4547 features->dev_features =
4548 CONVERT_FEATURES(strList, virtio_fs_feature_map, 0, bitmap);
4549 break;
4550 #endif
4551 #ifdef CONFIG_VHOST_VSOCK
4552 case VIRTIO_ID_VSOCK:
4553 features->dev_features =
4554 CONVERT_FEATURES(strList, virtio_vsock_feature_map, 0, bitmap);
4555 break;
4556 #endif
4557 #ifdef CONFIG_VIRTIO_CRYPTO
4558 case VIRTIO_ID_CRYPTO:
4559 features->dev_features =
4560 CONVERT_FEATURES(strList, virtio_crypto_feature_map, 0, bitmap);
4561 break;
4562 #endif
4563 #ifdef CONFIG_VIRTIO_MEM
4564 case VIRTIO_ID_MEM:
4565 features->dev_features =
4566 CONVERT_FEATURES(strList, virtio_mem_feature_map, 0, bitmap);
4567 break;
4568 #endif
4569 #ifdef CONFIG_VIRTIO_I2C_ADAPTER
4570 case VIRTIO_ID_I2C_ADAPTER:
4571 features->dev_features =
4572 CONVERT_FEATURES(strList, virtio_i2c_feature_map, 0, bitmap);
4573 break;
4574 #endif
4575 #ifdef CONFIG_VIRTIO_RNG
4576 case VIRTIO_ID_RNG:
4577 features->dev_features =
4578 CONVERT_FEATURES(strList, virtio_rng_feature_map, 0, bitmap);
4579 break;
4580 #endif
4581 /* No features */
4582 case VIRTIO_ID_9P:
4583 case VIRTIO_ID_PMEM:
4584 case VIRTIO_ID_IOMEM:
4585 case VIRTIO_ID_RPMSG:
4586 case VIRTIO_ID_CLOCK:
4587 case VIRTIO_ID_MAC80211_WLAN:
4588 case VIRTIO_ID_MAC80211_HWSIM:
4589 case VIRTIO_ID_RPROC_SERIAL:
4590 case VIRTIO_ID_MEMORY_BALLOON:
4591 case VIRTIO_ID_CAIF:
4592 case VIRTIO_ID_SIGNAL_DIST:
4593 case VIRTIO_ID_PSTORE:
4594 case VIRTIO_ID_SOUND:
4595 case VIRTIO_ID_BT:
4596 case VIRTIO_ID_RPMB:
4597 case VIRTIO_ID_VIDEO_ENCODER:
4598 case VIRTIO_ID_VIDEO_DECODER:
4599 case VIRTIO_ID_SCMI:
4600 case VIRTIO_ID_NITRO_SEC_MOD:
4601 case VIRTIO_ID_WATCHDOG:
4602 case VIRTIO_ID_CAN:
4603 case VIRTIO_ID_DMABUF:
4604 case VIRTIO_ID_PARAM_SERV:
4605 case VIRTIO_ID_AUDIO_POLICY:
4606 case VIRTIO_ID_GPIO:
4607 break;
4608 default:
4609 g_assert_not_reached();
4612 features->has_unknown_dev_features = bitmap != 0;
4613 if (features->has_unknown_dev_features) {
4614 features->unknown_dev_features = bitmap;
4617 return features;
4620 VirtioStatus *qmp_x_query_virtio_status(const char *path, Error **errp)
4622 VirtIODevice *vdev;
4623 VirtioStatus *status;
4625 vdev = virtio_device_find(path);
4626 if (vdev == NULL) {
4627 error_setg(errp, "Path %s is not a VirtIODevice", path);
4628 return NULL;
4631 status = g_new0(VirtioStatus, 1);
4632 status->name = g_strdup(vdev->name);
4633 status->device_id = vdev->device_id;
4634 status->vhost_started = vdev->vhost_started;
4635 status->guest_features = qmp_decode_features(vdev->device_id,
4636 vdev->guest_features);
4637 status->host_features = qmp_decode_features(vdev->device_id,
4638 vdev->host_features);
4639 status->backend_features = qmp_decode_features(vdev->device_id,
4640 vdev->backend_features);
4642 switch (vdev->device_endian) {
4643 case VIRTIO_DEVICE_ENDIAN_LITTLE:
4644 status->device_endian = g_strdup("little");
4645 break;
4646 case VIRTIO_DEVICE_ENDIAN_BIG:
4647 status->device_endian = g_strdup("big");
4648 break;
4649 default:
4650 status->device_endian = g_strdup("unknown");
4651 break;
4654 status->num_vqs = virtio_get_num_queues(vdev);
4655 status->status = qmp_decode_status(vdev->status);
4656 status->isr = vdev->isr;
4657 status->queue_sel = vdev->queue_sel;
4658 status->vm_running = vdev->vm_running;
4659 status->broken = vdev->broken;
4660 status->disabled = vdev->disabled;
4661 status->use_started = vdev->use_started;
4662 status->started = vdev->started;
4663 status->start_on_kick = vdev->start_on_kick;
4664 status->disable_legacy_check = vdev->disable_legacy_check;
4665 status->bus_name = g_strdup(vdev->bus_name);
4666 status->use_guest_notifier_mask = vdev->use_guest_notifier_mask;
4667 status->has_vhost_dev = vdev->vhost_started;
4669 if (vdev->vhost_started) {
4670 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
4671 struct vhost_dev *hdev = vdc->get_vhost(vdev);
4673 status->vhost_dev = g_new0(VhostStatus, 1);
4674 status->vhost_dev->n_mem_sections = hdev->n_mem_sections;
4675 status->vhost_dev->n_tmp_sections = hdev->n_tmp_sections;
4676 status->vhost_dev->nvqs = hdev->nvqs;
4677 status->vhost_dev->vq_index = hdev->vq_index;
4678 status->vhost_dev->features =
4679 qmp_decode_features(vdev->device_id, hdev->features);
4680 status->vhost_dev->acked_features =
4681 qmp_decode_features(vdev->device_id, hdev->acked_features);
4682 status->vhost_dev->backend_features =
4683 qmp_decode_features(vdev->device_id, hdev->backend_features);
4684 status->vhost_dev->protocol_features =
4685 qmp_decode_protocols(hdev->protocol_features);
4686 status->vhost_dev->max_queues = hdev->max_queues;
4687 status->vhost_dev->backend_cap = hdev->backend_cap;
4688 status->vhost_dev->log_enabled = hdev->log_enabled;
4689 status->vhost_dev->log_size = hdev->log_size;
4692 return status;
4695 VirtVhostQueueStatus *qmp_x_query_virtio_vhost_queue_status(const char *path,
4696 uint16_t queue,
4697 Error **errp)
4699 VirtIODevice *vdev;
4700 VirtVhostQueueStatus *status;
4702 vdev = virtio_device_find(path);
4703 if (vdev == NULL) {
4704 error_setg(errp, "Path %s is not a VirtIODevice", path);
4705 return NULL;
4708 if (!vdev->vhost_started) {
4709 error_setg(errp, "Error: vhost device has not started yet");
4710 return NULL;
4713 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
4714 struct vhost_dev *hdev = vdc->get_vhost(vdev);
4716 if (queue < hdev->vq_index || queue >= hdev->vq_index + hdev->nvqs) {
4717 error_setg(errp, "Invalid vhost virtqueue number %d", queue);
4718 return NULL;
4721 status = g_new0(VirtVhostQueueStatus, 1);
4722 status->name = g_strdup(vdev->name);
4723 status->kick = hdev->vqs[queue].kick;
4724 status->call = hdev->vqs[queue].call;
4725 status->desc = (uintptr_t)hdev->vqs[queue].desc;
4726 status->avail = (uintptr_t)hdev->vqs[queue].avail;
4727 status->used = (uintptr_t)hdev->vqs[queue].used;
4728 status->num = hdev->vqs[queue].num;
4729 status->desc_phys = hdev->vqs[queue].desc_phys;
4730 status->desc_size = hdev->vqs[queue].desc_size;
4731 status->avail_phys = hdev->vqs[queue].avail_phys;
4732 status->avail_size = hdev->vqs[queue].avail_size;
4733 status->used_phys = hdev->vqs[queue].used_phys;
4734 status->used_size = hdev->vqs[queue].used_size;
4736 return status;
4739 VirtQueueStatus *qmp_x_query_virtio_queue_status(const char *path,
4740 uint16_t queue,
4741 Error **errp)
4743 VirtIODevice *vdev;
4744 VirtQueueStatus *status;
4746 vdev = virtio_device_find(path);
4747 if (vdev == NULL) {
4748 error_setg(errp, "Path %s is not a VirtIODevice", path);
4749 return NULL;
4752 if (queue >= VIRTIO_QUEUE_MAX || !virtio_queue_get_num(vdev, queue)) {
4753 error_setg(errp, "Invalid virtqueue number %d", queue);
4754 return NULL;
4757 status = g_new0(VirtQueueStatus, 1);
4758 status->name = g_strdup(vdev->name);
4759 status->queue_index = vdev->vq[queue].queue_index;
4760 status->inuse = vdev->vq[queue].inuse;
4761 status->vring_num = vdev->vq[queue].vring.num;
4762 status->vring_num_default = vdev->vq[queue].vring.num_default;
4763 status->vring_align = vdev->vq[queue].vring.align;
4764 status->vring_desc = vdev->vq[queue].vring.desc;
4765 status->vring_avail = vdev->vq[queue].vring.avail;
4766 status->vring_used = vdev->vq[queue].vring.used;
4767 status->used_idx = vdev->vq[queue].used_idx;
4768 status->signalled_used = vdev->vq[queue].signalled_used;
4769 status->signalled_used_valid = vdev->vq[queue].signalled_used_valid;
4771 if (vdev->vhost_started) {
4772 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
4773 struct vhost_dev *hdev = vdc->get_vhost(vdev);
4775 /* check if vq index exists for vhost as well */
4776 if (queue >= hdev->vq_index && queue < hdev->vq_index + hdev->nvqs) {
4777 status->has_last_avail_idx = true;
4779 int vhost_vq_index =
4780 hdev->vhost_ops->vhost_get_vq_index(hdev, queue);
4781 struct vhost_vring_state state = {
4782 .index = vhost_vq_index,
4785 status->last_avail_idx =
4786 hdev->vhost_ops->vhost_get_vring_base(hdev, &state);
4788 } else {
4789 status->has_shadow_avail_idx = true;
4790 status->has_last_avail_idx = true;
4791 status->last_avail_idx = vdev->vq[queue].last_avail_idx;
4792 status->shadow_avail_idx = vdev->vq[queue].shadow_avail_idx;
4795 return status;
4798 static strList *qmp_decode_vring_desc_flags(uint16_t flags)
4800 strList *list = NULL;
4801 strList *node;
4802 int i;
4804 struct {
4805 uint16_t flag;
4806 const char *value;
4807 } map[] = {
4808 { VRING_DESC_F_NEXT, "next" },
4809 { VRING_DESC_F_WRITE, "write" },
4810 { VRING_DESC_F_INDIRECT, "indirect" },
4811 { 1 << VRING_PACKED_DESC_F_AVAIL, "avail" },
4812 { 1 << VRING_PACKED_DESC_F_USED, "used" },
4813 { 0, "" }
4816 for (i = 0; map[i].flag; i++) {
4817 if ((map[i].flag & flags) == 0) {
4818 continue;
4820 node = g_malloc0(sizeof(strList));
4821 node->value = g_strdup(map[i].value);
4822 node->next = list;
4823 list = node;
4826 return list;
4829 VirtioQueueElement *qmp_x_query_virtio_queue_element(const char *path,
4830 uint16_t queue,
4831 bool has_index,
4832 uint16_t index,
4833 Error **errp)
4835 VirtIODevice *vdev;
4836 VirtQueue *vq;
4837 VirtioQueueElement *element = NULL;
4839 vdev = virtio_device_find(path);
4840 if (vdev == NULL) {
4841 error_setg(errp, "Path %s is not a VirtIO device", path);
4842 return NULL;
4845 if (queue >= VIRTIO_QUEUE_MAX || !virtio_queue_get_num(vdev, queue)) {
4846 error_setg(errp, "Invalid virtqueue number %d", queue);
4847 return NULL;
4849 vq = &vdev->vq[queue];
4851 if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
4852 error_setg(errp, "Packed ring not supported");
4853 return NULL;
4854 } else {
4855 unsigned int head, i, max;
4856 VRingMemoryRegionCaches *caches;
4857 MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
4858 MemoryRegionCache *desc_cache;
4859 VRingDesc desc;
4860 VirtioRingDescList *list = NULL;
4861 VirtioRingDescList *node;
4862 int rc; int ndescs;
4864 RCU_READ_LOCK_GUARD();
4866 max = vq->vring.num;
4868 if (!has_index) {
4869 head = vring_avail_ring(vq, vq->last_avail_idx % vq->vring.num);
4870 } else {
4871 head = vring_avail_ring(vq, index % vq->vring.num);
4873 i = head;
4875 caches = vring_get_region_caches(vq);
4876 if (!caches) {
4877 error_setg(errp, "Region caches not initialized");
4878 return NULL;
4880 if (caches->desc.len < max * sizeof(VRingDesc)) {
4881 error_setg(errp, "Cannot map descriptor ring");
4882 return NULL;
4885 desc_cache = &caches->desc;
4886 vring_split_desc_read(vdev, &desc, desc_cache, i);
4887 if (desc.flags & VRING_DESC_F_INDIRECT) {
4888 int64_t len;
4889 len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
4890 desc.addr, desc.len, false);
4891 desc_cache = &indirect_desc_cache;
4892 if (len < desc.len) {
4893 error_setg(errp, "Cannot map indirect buffer");
4894 goto done;
4897 max = desc.len / sizeof(VRingDesc);
4898 i = 0;
4899 vring_split_desc_read(vdev, &desc, desc_cache, i);
4902 element = g_new0(VirtioQueueElement, 1);
4903 element->avail = g_new0(VirtioRingAvail, 1);
4904 element->used = g_new0(VirtioRingUsed, 1);
4905 element->name = g_strdup(vdev->name);
4906 element->index = head;
4907 element->avail->flags = vring_avail_flags(vq);
4908 element->avail->idx = vring_avail_idx(vq);
4909 element->avail->ring = head;
4910 element->used->flags = vring_used_flags(vq);
4911 element->used->idx = vring_used_idx(vq);
4912 ndescs = 0;
4914 do {
4915 /* A buggy driver may produce an infinite loop */
4916 if (ndescs >= max) {
4917 break;
4919 node = g_new0(VirtioRingDescList, 1);
4920 node->value = g_new0(VirtioRingDesc, 1);
4921 node->value->addr = desc.addr;
4922 node->value->len = desc.len;
4923 node->value->flags = qmp_decode_vring_desc_flags(desc.flags);
4924 node->next = list;
4925 list = node;
4927 ndescs++;
4928 rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache,
4929 max, &i);
4930 } while (rc == VIRTQUEUE_READ_DESC_MORE);
4931 element->descs = list;
4932 done:
4933 address_space_cache_destroy(&indirect_desc_cache);
4936 return element;
4939 static const TypeInfo virtio_device_info = {
4940 .name = TYPE_VIRTIO_DEVICE,
4941 .parent = TYPE_DEVICE,
4942 .instance_size = sizeof(VirtIODevice),
4943 .class_init = virtio_device_class_init,
4944 .instance_finalize = virtio_device_instance_finalize,
4945 .abstract = true,
4946 .class_size = sizeof(VirtioDeviceClass),
4949 static void virtio_register_types(void)
4951 type_register_static(&virtio_device_info);
4954 type_init(virtio_register_types)