migration/rdma: Return -1 instead of negative errno code
[qemu/armbru.git] / hw / virtio / vhost-user-scmi.c
blob918bb7dcf7d848d5bb962be4ace574fabce43366
1 /*
2 * Vhost-user SCMI virtio device
4 * SPDX-FileCopyrightText: Red Hat, Inc.
5 * SPDX-License-Identifier: GPL-2.0-or-later
7 * Implementation based on other vhost-user devices in QEMU.
8 */
10 #include "qemu/osdep.h"
11 #include "qapi/error.h"
12 #include "qemu/error-report.h"
13 #include "hw/virtio/virtio-bus.h"
14 #include "hw/virtio/vhost-user-scmi.h"
15 #include "standard-headers/linux/virtio_ids.h"
16 #include "standard-headers/linux/virtio_scmi.h"
17 #include "trace.h"
20 * In this version, we don't support VIRTIO_SCMI_F_SHARED_MEMORY.
21 * Note that VIRTIO_SCMI_F_SHARED_MEMORY is currently not supported in
22 * Linux VirtIO SCMI guest driver.
24 static const int feature_bits[] = {
25 VIRTIO_F_VERSION_1,
26 VIRTIO_F_NOTIFY_ON_EMPTY,
27 VIRTIO_RING_F_INDIRECT_DESC,
28 VIRTIO_RING_F_EVENT_IDX,
29 VIRTIO_F_RING_RESET,
30 VIRTIO_SCMI_F_P2A_CHANNELS,
31 VHOST_INVALID_FEATURE_BIT
34 static int vu_scmi_start(VirtIODevice *vdev)
36 VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
37 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
38 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
39 struct vhost_dev *vhost_dev = &scmi->vhost_dev;
40 int ret, i;
42 if (!k->set_guest_notifiers) {
43 error_report("binding does not support guest notifiers");
44 return -ENOSYS;
47 ret = vhost_dev_enable_notifiers(vhost_dev, vdev);
48 if (ret < 0) {
49 error_report("Error enabling host notifiers: %d", ret);
50 return ret;
53 ret = k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, true);
54 if (ret < 0) {
55 error_report("Error binding guest notifier: %d", ret);
56 goto err_host_notifiers;
59 vhost_ack_features(&scmi->vhost_dev, feature_bits, vdev->guest_features);
61 ret = vhost_dev_start(&scmi->vhost_dev, vdev, true);
62 if (ret < 0) {
63 error_report("Error starting vhost-user-scmi: %d", ret);
64 goto err_guest_notifiers;
66 scmi->started_vu = true;
69 * guest_notifier_mask/pending not used yet, so just unmask
70 * everything here. virtio-pci will do the right thing by
71 * enabling/disabling irqfd.
73 for (i = 0; i < scmi->vhost_dev.nvqs; i++) {
74 vhost_virtqueue_mask(&scmi->vhost_dev, vdev, i, false);
76 return 0;
78 err_guest_notifiers:
79 k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, false);
80 err_host_notifiers:
81 vhost_dev_disable_notifiers(vhost_dev, vdev);
83 return ret;
86 static void vu_scmi_stop(VirtIODevice *vdev)
88 VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
89 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
90 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
91 struct vhost_dev *vhost_dev = &scmi->vhost_dev;
92 int ret;
94 /* vhost_dev_is_started() check in the callers is not fully reliable. */
95 if (!scmi->started_vu) {
96 return;
98 scmi->started_vu = false;
100 if (!k->set_guest_notifiers) {
101 return;
104 vhost_dev_stop(vhost_dev, vdev, true);
106 ret = k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, false);
107 if (ret < 0) {
108 error_report("vhost guest notifier cleanup failed: %d", ret);
109 return;
111 vhost_dev_disable_notifiers(vhost_dev, vdev);
114 static void vu_scmi_set_status(VirtIODevice *vdev, uint8_t status)
116 VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
117 bool should_start = virtio_device_should_start(vdev, status);
119 if (!scmi->connected) {
120 return;
122 if (vhost_dev_is_started(&scmi->vhost_dev) == should_start) {
123 return;
126 if (should_start) {
127 vu_scmi_start(vdev);
128 } else {
129 vu_scmi_stop(vdev);
133 static uint64_t vu_scmi_get_features(VirtIODevice *vdev, uint64_t features,
134 Error **errp)
136 VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
138 return vhost_get_features(&scmi->vhost_dev, feature_bits, features);
141 static void vu_scmi_handle_output(VirtIODevice *vdev, VirtQueue *vq)
144 * Not normally called; it's the daemon that handles the queue;
145 * however virtio's cleanup path can call this.
149 static void vu_scmi_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
151 VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
153 if (idx == VIRTIO_CONFIG_IRQ_IDX) {
154 return;
157 vhost_virtqueue_mask(&scmi->vhost_dev, vdev, idx, mask);
160 static bool vu_scmi_guest_notifier_pending(VirtIODevice *vdev, int idx)
162 VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
164 return vhost_virtqueue_pending(&scmi->vhost_dev, idx);
167 static void vu_scmi_connect(DeviceState *dev)
169 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
170 VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
172 if (scmi->connected) {
173 return;
175 scmi->connected = true;
177 /* restore vhost state */
178 if (virtio_device_started(vdev, vdev->status)) {
179 vu_scmi_start(vdev);
183 static void vu_scmi_disconnect(DeviceState *dev)
185 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
186 VHostUserSCMI *scmi = VHOST_USER_SCMI(vdev);
188 if (!scmi->connected) {
189 return;
191 scmi->connected = false;
193 if (vhost_dev_is_started(&scmi->vhost_dev)) {
194 vu_scmi_stop(vdev);
198 static void vu_scmi_event(void *opaque, QEMUChrEvent event)
200 DeviceState *dev = opaque;
202 switch (event) {
203 case CHR_EVENT_OPENED:
204 vu_scmi_connect(dev);
205 break;
206 case CHR_EVENT_CLOSED:
207 vu_scmi_disconnect(dev);
208 break;
209 case CHR_EVENT_BREAK:
210 case CHR_EVENT_MUX_IN:
211 case CHR_EVENT_MUX_OUT:
212 /* Ignore */
213 break;
217 static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserSCMI *scmi)
219 virtio_delete_queue(scmi->cmd_vq);
220 virtio_delete_queue(scmi->event_vq);
221 g_free(scmi->vhost_dev.vqs);
222 virtio_cleanup(vdev);
223 vhost_user_cleanup(&scmi->vhost_user);
226 static void vu_scmi_device_realize(DeviceState *dev, Error **errp)
228 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
229 VHostUserSCMI *scmi = VHOST_USER_SCMI(dev);
230 int ret;
232 if (!scmi->chardev.chr) {
233 error_setg(errp, "vhost-user-scmi: chardev is mandatory");
234 return;
237 vdev->host_features |= (1ULL << VIRTIO_SCMI_F_P2A_CHANNELS);
239 if (!vhost_user_init(&scmi->vhost_user, &scmi->chardev, errp)) {
240 return;
243 virtio_init(vdev, VIRTIO_ID_SCMI, 0);
245 scmi->cmd_vq = virtio_add_queue(vdev, 256, vu_scmi_handle_output);
246 scmi->event_vq = virtio_add_queue(vdev, 256, vu_scmi_handle_output);
247 scmi->vhost_dev.nvqs = 2;
248 scmi->vhost_dev.vqs = g_new0(struct vhost_virtqueue, scmi->vhost_dev.nvqs);
250 ret = vhost_dev_init(&scmi->vhost_dev, &scmi->vhost_user,
251 VHOST_BACKEND_TYPE_USER, 0, errp);
252 if (ret < 0) {
253 error_setg_errno(errp, -ret,
254 "vhost-user-scmi: vhost_dev_init() failed");
255 do_vhost_user_cleanup(vdev, scmi);
256 return;
259 qemu_chr_fe_set_handlers(&scmi->chardev, NULL, NULL, vu_scmi_event, NULL,
260 dev, NULL, true);
262 return;
265 static void vu_scmi_device_unrealize(DeviceState *dev)
267 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
268 VHostUserSCMI *scmi = VHOST_USER_SCMI(dev);
270 vu_scmi_set_status(vdev, 0);
271 vhost_dev_cleanup(&scmi->vhost_dev);
272 do_vhost_user_cleanup(vdev, scmi);
275 static const VMStateDescription vu_scmi_vmstate = {
276 .name = "vhost-user-scmi",
277 .unmigratable = 1,
280 static Property vu_scmi_properties[] = {
281 DEFINE_PROP_CHR("chardev", VHostUserSCMI, chardev),
282 DEFINE_PROP_END_OF_LIST(),
285 static void vu_scmi_class_init(ObjectClass *klass, void *data)
287 DeviceClass *dc = DEVICE_CLASS(klass);
288 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
290 device_class_set_props(dc, vu_scmi_properties);
291 dc->vmsd = &vu_scmi_vmstate;
292 set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
293 vdc->realize = vu_scmi_device_realize;
294 vdc->unrealize = vu_scmi_device_unrealize;
295 vdc->get_features = vu_scmi_get_features;
296 vdc->set_status = vu_scmi_set_status;
297 vdc->guest_notifier_mask = vu_scmi_guest_notifier_mask;
298 vdc->guest_notifier_pending = vu_scmi_guest_notifier_pending;
301 static const TypeInfo vu_scmi_info = {
302 .name = TYPE_VHOST_USER_SCMI,
303 .parent = TYPE_VIRTIO_DEVICE,
304 .instance_size = sizeof(VHostUserSCMI),
305 .class_init = vu_scmi_class_init,
308 static void vu_scmi_register_types(void)
310 type_register_static(&vu_scmi_info);
313 type_init(vu_scmi_register_types)