4 #include "hw/virtio/vhost-backend.h"
5 #include "hw/virtio/virtio.h"
6 #include "exec/memory.h"
8 /* Generic structures common for any vhost based device. */
10 struct vhost_inflight
{
18 struct vhost_virtqueue
{
25 unsigned long long desc_phys
;
27 unsigned long long avail_phys
;
29 unsigned long long used_phys
;
31 EventNotifier masked_notifier
;
32 struct vhost_dev
*dev
;
35 typedef unsigned long vhost_log_chunk_t
;
36 #define VHOST_LOG_PAGE 0x1000
37 #define VHOST_LOG_BITS (8 * sizeof(vhost_log_chunk_t))
38 #define VHOST_LOG_CHUNK (VHOST_LOG_PAGE * VHOST_LOG_BITS)
39 #define VHOST_INVALID_FEATURE_BIT (0xff)
42 unsigned long long size
;
45 vhost_log_chunk_t
*log
;
50 struct vhost_dev
*hdev
;
54 QLIST_ENTRY(vhost_iommu
) iommu_next
;
57 typedef struct VhostDevConfigOps
{
58 /* Vhost device config space changed callback
60 int (*vhost_dev_config_notifier
)(struct vhost_dev
*dev
);
66 MemoryListener memory_listener
;
67 MemoryListener iommu_listener
;
68 struct vhost_memory
*mem
;
70 MemoryRegionSection
*mem_sections
;
72 MemoryRegionSection
*tmp_sections
;
73 struct vhost_virtqueue
*vqs
;
75 /* the first virtqueue which would be used by this vhost dev */
77 /* one past the last vq index for the virtio device (not vhost) */
79 /* if non-zero, minimum required value for max_queues */
82 uint64_t acked_features
;
83 uint64_t backend_features
;
84 uint64_t protocol_features
;
90 Error
*migration_blocker
;
91 const VhostOps
*vhost_ops
;
93 struct vhost_log
*log
;
94 QLIST_ENTRY(vhost_dev
) entry
;
95 QLIST_HEAD(, vhost_iommu
) iommu_list
;
97 const VhostDevConfigOps
*config_ops
;
100 extern const VhostOps kernel_ops
;
101 extern const VhostOps user_ops
;
102 extern const VhostOps vdpa_ops
;
105 struct vhost_dev dev
;
106 struct vhost_virtqueue vqs
[2];
111 int vhost_dev_init(struct vhost_dev
*hdev
, void *opaque
,
112 VhostBackendType backend_type
,
113 uint32_t busyloop_timeout
, Error
**errp
);
114 void vhost_dev_cleanup(struct vhost_dev
*hdev
);
115 int vhost_dev_start(struct vhost_dev
*hdev
, VirtIODevice
*vdev
);
116 void vhost_dev_stop(struct vhost_dev
*hdev
, VirtIODevice
*vdev
);
117 int vhost_dev_enable_notifiers(struct vhost_dev
*hdev
, VirtIODevice
*vdev
);
118 void vhost_dev_disable_notifiers(struct vhost_dev
*hdev
, VirtIODevice
*vdev
);
120 /* Test and clear masked event pending status.
121 * Should be called after unmask to avoid losing events.
123 bool vhost_virtqueue_pending(struct vhost_dev
*hdev
, int n
);
125 /* Mask/unmask events from this vq.
127 void vhost_virtqueue_mask(struct vhost_dev
*hdev
, VirtIODevice
*vdev
, int n
,
129 uint64_t vhost_get_features(struct vhost_dev
*hdev
, const int *feature_bits
,
131 void vhost_ack_features(struct vhost_dev
*hdev
, const int *feature_bits
,
133 bool vhost_has_free_slot(void);
135 int vhost_net_set_backend(struct vhost_dev
*hdev
,
136 struct vhost_vring_file
*file
);
138 int vhost_device_iotlb_miss(struct vhost_dev
*dev
, uint64_t iova
, int write
);
139 int vhost_dev_get_config(struct vhost_dev
*hdev
, uint8_t *config
,
140 uint32_t config_len
, Error
**errp
);
141 int vhost_dev_set_config(struct vhost_dev
*dev
, const uint8_t *data
,
142 uint32_t offset
, uint32_t size
, uint32_t flags
);
143 /* notifier callback in case vhost device config space changed
145 void vhost_dev_set_config_notifier(struct vhost_dev
*dev
,
146 const VhostDevConfigOps
*ops
);
148 void vhost_dev_reset_inflight(struct vhost_inflight
*inflight
);
149 void vhost_dev_free_inflight(struct vhost_inflight
*inflight
);
150 void vhost_dev_save_inflight(struct vhost_inflight
*inflight
, QEMUFile
*f
);
151 int vhost_dev_load_inflight(struct vhost_inflight
*inflight
, QEMUFile
*f
);
152 int vhost_dev_prepare_inflight(struct vhost_dev
*hdev
, VirtIODevice
*vdev
);
153 int vhost_dev_set_inflight(struct vhost_dev
*dev
,
154 struct vhost_inflight
*inflight
);
155 int vhost_dev_get_inflight(struct vhost_dev
*dev
, uint16_t queue_size
,
156 struct vhost_inflight
*inflight
);