pcihp: acpi: ignore coldplugged bridges when composing hotpluggable slots
[qemu.git] / hw / virtio / vdpa-dev.c
blob01b41eb0f123ab3341e7fddef2a32fbf7fe2d977
1 /*
2 * Vhost Vdpa Device
4 * Copyright (c) Huawei Technologies Co., Ltd. 2022. All Rights Reserved.
6 * Authors:
7 * Longpeng <longpeng2@huawei.com>
9 * Largely based on the "vhost-user-blk-pci.c" and "vhost-user-blk.c"
10 * implemented by:
11 * Changpeng Liu <changpeng.liu@intel.com>
13 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
14 * See the COPYING.LIB file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include <sys/ioctl.h>
18 #include <linux/vhost.h>
19 #include "qapi/error.h"
20 #include "qemu/error-report.h"
21 #include "qemu/cutils.h"
22 #include "hw/qdev-core.h"
23 #include "hw/qdev-properties.h"
24 #include "hw/qdev-properties-system.h"
25 #include "hw/virtio/vhost.h"
26 #include "hw/virtio/virtio.h"
27 #include "hw/virtio/virtio-bus.h"
28 #include "hw/virtio/virtio-access.h"
29 #include "hw/virtio/vdpa-dev.h"
30 #include "sysemu/sysemu.h"
31 #include "sysemu/runstate.h"
33 static void
34 vhost_vdpa_device_dummy_handle_output(VirtIODevice *vdev, VirtQueue *vq)
36 /* Nothing to do */
39 static uint32_t
40 vhost_vdpa_device_get_u32(int fd, unsigned long int cmd, Error **errp)
42 uint32_t val = (uint32_t)-1;
44 if (ioctl(fd, cmd, &val) < 0) {
45 error_setg(errp, "vhost-vdpa-device: cmd 0x%lx failed: %s",
46 cmd, strerror(errno));
49 return val;
52 static void vhost_vdpa_device_realize(DeviceState *dev, Error **errp)
54 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
55 VhostVdpaDevice *v = VHOST_VDPA_DEVICE(vdev);
56 struct vhost_vdpa_iova_range iova_range;
57 uint16_t max_queue_size;
58 struct vhost_virtqueue *vqs;
59 int i, ret;
61 if (!v->vhostdev) {
62 error_setg(errp, "vhost-vdpa-device: vhostdev are missing");
63 return;
66 v->vhostfd = qemu_open(v->vhostdev, O_RDWR, errp);
67 if (*errp) {
68 return;
70 v->vdpa.device_fd = v->vhostfd;
72 v->vdev_id = vhost_vdpa_device_get_u32(v->vhostfd,
73 VHOST_VDPA_GET_DEVICE_ID, errp);
74 if (*errp) {
75 goto out;
78 max_queue_size = vhost_vdpa_device_get_u32(v->vhostfd,
79 VHOST_VDPA_GET_VRING_NUM, errp);
80 if (*errp) {
81 goto out;
84 if (v->queue_size > max_queue_size) {
85 error_setg(errp, "vhost-vdpa-device: invalid queue_size: %u (max:%u)",
86 v->queue_size, max_queue_size);
87 goto out;
88 } else if (!v->queue_size) {
89 v->queue_size = max_queue_size;
92 v->num_queues = vhost_vdpa_device_get_u32(v->vhostfd,
93 VHOST_VDPA_GET_VQS_COUNT, errp);
94 if (*errp) {
95 goto out;
98 if (!v->num_queues || v->num_queues > VIRTIO_QUEUE_MAX) {
99 error_setg(errp, "invalid number of virtqueues: %u (max:%u)",
100 v->num_queues, VIRTIO_QUEUE_MAX);
101 goto out;
104 v->dev.nvqs = v->num_queues;
105 vqs = g_new0(struct vhost_virtqueue, v->dev.nvqs);
106 v->dev.vqs = vqs;
107 v->dev.vq_index = 0;
108 v->dev.vq_index_end = v->dev.nvqs;
109 v->dev.backend_features = 0;
110 v->started = false;
112 ret = vhost_vdpa_get_iova_range(v->vhostfd, &iova_range);
113 if (ret < 0) {
114 error_setg(errp, "vhost-vdpa-device: get iova range failed: %s",
115 strerror(-ret));
116 goto free_vqs;
118 v->vdpa.iova_range = iova_range;
120 ret = vhost_dev_init(&v->dev, &v->vdpa, VHOST_BACKEND_TYPE_VDPA, 0, NULL);
121 if (ret < 0) {
122 error_setg(errp, "vhost-vdpa-device: vhost initialization failed: %s",
123 strerror(-ret));
124 goto free_vqs;
127 v->config_size = vhost_vdpa_device_get_u32(v->vhostfd,
128 VHOST_VDPA_GET_CONFIG_SIZE,
129 errp);
130 if (*errp) {
131 goto vhost_cleanup;
135 * Invoke .post_init() to initialize the transport-specific fields
136 * before calling virtio_init().
138 if (v->post_init && v->post_init(v, errp) < 0) {
139 goto vhost_cleanup;
142 v->config = g_malloc0(v->config_size);
144 ret = vhost_dev_get_config(&v->dev, v->config, v->config_size, NULL);
145 if (ret < 0) {
146 error_setg(errp, "vhost-vdpa-device: get config failed");
147 goto free_config;
150 virtio_init(vdev, v->vdev_id, v->config_size);
152 v->virtqs = g_new0(VirtQueue *, v->dev.nvqs);
153 for (i = 0; i < v->dev.nvqs; i++) {
154 v->virtqs[i] = virtio_add_queue(vdev, v->queue_size,
155 vhost_vdpa_device_dummy_handle_output);
158 return;
160 free_config:
161 g_free(v->config);
162 vhost_cleanup:
163 vhost_dev_cleanup(&v->dev);
164 free_vqs:
165 g_free(vqs);
166 out:
167 qemu_close(v->vhostfd);
168 v->vhostfd = -1;
171 static void vhost_vdpa_device_unrealize(DeviceState *dev)
173 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
174 VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev);
175 int i;
177 virtio_set_status(vdev, 0);
179 for (i = 0; i < s->num_queues; i++) {
180 virtio_delete_queue(s->virtqs[i]);
182 g_free(s->virtqs);
183 virtio_cleanup(vdev);
185 g_free(s->config);
186 g_free(s->dev.vqs);
187 vhost_dev_cleanup(&s->dev);
188 qemu_close(s->vhostfd);
189 s->vhostfd = -1;
192 static void
193 vhost_vdpa_device_get_config(VirtIODevice *vdev, uint8_t *config)
195 VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev);
197 memcpy(config, s->config, s->config_size);
200 static void
201 vhost_vdpa_device_set_config(VirtIODevice *vdev, const uint8_t *config)
203 VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev);
204 int ret;
206 ret = vhost_dev_set_config(&s->dev, s->config, 0, s->config_size,
207 VHOST_SET_CONFIG_TYPE_MASTER);
208 if (ret) {
209 error_report("set device config space failed");
210 return;
214 static uint64_t vhost_vdpa_device_get_features(VirtIODevice *vdev,
215 uint64_t features,
216 Error **errp)
218 VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev);
219 uint64_t backend_features = s->dev.features;
221 if (!virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM)) {
222 virtio_clear_feature(&backend_features, VIRTIO_F_IOMMU_PLATFORM);
225 return backend_features;
228 static int vhost_vdpa_device_start(VirtIODevice *vdev, Error **errp)
230 VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev);
231 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
232 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
233 int i, ret;
235 if (!k->set_guest_notifiers) {
236 error_setg(errp, "binding does not support guest notifiers");
237 return -ENOSYS;
240 ret = vhost_dev_enable_notifiers(&s->dev, vdev);
241 if (ret < 0) {
242 error_setg_errno(errp, -ret, "Error enabling host notifiers");
243 return ret;
246 ret = k->set_guest_notifiers(qbus->parent, s->dev.nvqs, true);
247 if (ret < 0) {
248 error_setg_errno(errp, -ret, "Error binding guest notifier");
249 goto err_host_notifiers;
252 s->dev.acked_features = vdev->guest_features;
254 ret = vhost_dev_start(&s->dev, vdev, false);
255 if (ret < 0) {
256 error_setg_errno(errp, -ret, "Error starting vhost");
257 goto err_guest_notifiers;
259 s->started = true;
262 * guest_notifier_mask/pending not used yet, so just unmask
263 * everything here. virtio-pci will do the right thing by
264 * enabling/disabling irqfd.
266 for (i = 0; i < s->dev.nvqs; i++) {
267 vhost_virtqueue_mask(&s->dev, vdev, i, false);
270 return ret;
272 err_guest_notifiers:
273 k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false);
274 err_host_notifiers:
275 vhost_dev_disable_notifiers(&s->dev, vdev);
276 return ret;
279 static void vhost_vdpa_device_stop(VirtIODevice *vdev)
281 VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev);
282 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
283 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
284 int ret;
286 if (!s->started) {
287 return;
289 s->started = false;
291 if (!k->set_guest_notifiers) {
292 return;
295 vhost_dev_stop(&s->dev, vdev, false);
297 ret = k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false);
298 if (ret < 0) {
299 error_report("vhost guest notifier cleanup failed: %d", ret);
300 return;
303 vhost_dev_disable_notifiers(&s->dev, vdev);
306 static void vhost_vdpa_device_set_status(VirtIODevice *vdev, uint8_t status)
308 VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev);
309 bool should_start = virtio_device_started(vdev, status);
310 Error *local_err = NULL;
311 int ret;
313 if (!vdev->vm_running) {
314 should_start = false;
317 if (s->started == should_start) {
318 return;
321 if (should_start) {
322 ret = vhost_vdpa_device_start(vdev, &local_err);
323 if (ret < 0) {
324 error_reportf_err(local_err, "vhost-vdpa-device: start failed: ");
326 } else {
327 vhost_vdpa_device_stop(vdev);
331 static Property vhost_vdpa_device_properties[] = {
332 DEFINE_PROP_STRING("vhostdev", VhostVdpaDevice, vhostdev),
333 DEFINE_PROP_UINT16("queue-size", VhostVdpaDevice, queue_size, 0),
334 DEFINE_PROP_END_OF_LIST(),
337 static const VMStateDescription vmstate_vhost_vdpa_device = {
338 .name = "vhost-vdpa-device",
339 .unmigratable = 1,
340 .minimum_version_id = 1,
341 .version_id = 1,
342 .fields = (VMStateField[]) {
343 VMSTATE_VIRTIO_DEVICE,
344 VMSTATE_END_OF_LIST()
348 static void vhost_vdpa_device_class_init(ObjectClass *klass, void *data)
350 DeviceClass *dc = DEVICE_CLASS(klass);
351 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
353 device_class_set_props(dc, vhost_vdpa_device_properties);
354 dc->desc = "VDPA-based generic device assignment";
355 dc->vmsd = &vmstate_vhost_vdpa_device;
356 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
357 vdc->realize = vhost_vdpa_device_realize;
358 vdc->unrealize = vhost_vdpa_device_unrealize;
359 vdc->get_config = vhost_vdpa_device_get_config;
360 vdc->set_config = vhost_vdpa_device_set_config;
361 vdc->get_features = vhost_vdpa_device_get_features;
362 vdc->set_status = vhost_vdpa_device_set_status;
365 static void vhost_vdpa_device_instance_init(Object *obj)
367 VhostVdpaDevice *s = VHOST_VDPA_DEVICE(obj);
369 device_add_bootindex_property(obj, &s->bootindex, "bootindex",
370 NULL, DEVICE(obj));
373 static const TypeInfo vhost_vdpa_device_info = {
374 .name = TYPE_VHOST_VDPA_DEVICE,
375 .parent = TYPE_VIRTIO_DEVICE,
376 .instance_size = sizeof(VhostVdpaDevice),
377 .class_init = vhost_vdpa_device_class_init,
378 .instance_init = vhost_vdpa_device_instance_init,
381 static void register_vhost_vdpa_device_type(void)
383 type_register_static(&vhost_vdpa_device_info);
386 type_init(register_vhost_vdpa_device_type);