qemu-keymap: properly check return from xkb_keymap_mod_get_index
[qemu/kevin.git] / net / vhost-vdpa.c
blobe19ab063fa11a2744582b03dfa7ba5d919b547cd
1 /*
2 * vhost-vdpa.c
4 * Copyright(c) 2017-2018 Intel Corporation.
5 * Copyright(c) 2020 Red Hat, Inc.
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
13 #include "clients.h"
14 #include "hw/virtio/virtio-net.h"
15 #include "net/vhost_net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
20 #include "qemu/log.h"
21 #include "qemu/memalign.h"
22 #include "qemu/option.h"
23 #include "qapi/error.h"
24 #include <linux/vhost.h>
25 #include <sys/ioctl.h>
26 #include <err.h>
27 #include "standard-headers/linux/virtio_net.h"
28 #include "monitor/monitor.h"
29 #include "migration/migration.h"
30 #include "migration/misc.h"
31 #include "hw/virtio/vhost.h"
33 /* Todo:need to add the multiqueue support here */
34 typedef struct VhostVDPAState {
35 NetClientState nc;
36 struct vhost_vdpa vhost_vdpa;
37 Notifier migration_state;
38 VHostNetState *vhost_net;
40 /* Control commands shadow buffers */
41 void *cvq_cmd_out_buffer;
42 virtio_net_ctrl_ack *status;
44 /* The device always have SVQ enabled */
45 bool always_svq;
47 /* The device can isolate CVQ in its own ASID */
48 bool cvq_isolated;
50 bool started;
51 } VhostVDPAState;
53 const int vdpa_feature_bits[] = {
54 VIRTIO_F_NOTIFY_ON_EMPTY,
55 VIRTIO_RING_F_INDIRECT_DESC,
56 VIRTIO_RING_F_EVENT_IDX,
57 VIRTIO_F_ANY_LAYOUT,
58 VIRTIO_F_VERSION_1,
59 VIRTIO_NET_F_CSUM,
60 VIRTIO_NET_F_GUEST_CSUM,
61 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
62 VIRTIO_NET_F_GSO,
63 VIRTIO_NET_F_GUEST_TSO4,
64 VIRTIO_NET_F_GUEST_TSO6,
65 VIRTIO_NET_F_GUEST_ECN,
66 VIRTIO_NET_F_GUEST_UFO,
67 VIRTIO_NET_F_HOST_TSO4,
68 VIRTIO_NET_F_HOST_TSO6,
69 VIRTIO_NET_F_HOST_ECN,
70 VIRTIO_NET_F_HOST_UFO,
71 VIRTIO_NET_F_MRG_RXBUF,
72 VIRTIO_NET_F_MTU,
73 VIRTIO_NET_F_CTRL_RX,
74 VIRTIO_NET_F_CTRL_RX_EXTRA,
75 VIRTIO_NET_F_CTRL_VLAN,
76 VIRTIO_NET_F_CTRL_MAC_ADDR,
77 VIRTIO_NET_F_RSS,
78 VIRTIO_NET_F_MQ,
79 VIRTIO_NET_F_CTRL_VQ,
80 VIRTIO_F_IOMMU_PLATFORM,
81 VIRTIO_F_RING_PACKED,
82 VIRTIO_F_RING_RESET,
83 VIRTIO_NET_F_RSS,
84 VIRTIO_NET_F_HASH_REPORT,
85 VIRTIO_NET_F_STATUS,
86 VHOST_INVALID_FEATURE_BIT
89 /** Supported device specific feature bits with SVQ */
90 static const uint64_t vdpa_svq_device_features =
91 BIT_ULL(VIRTIO_NET_F_CSUM) |
92 BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
93 BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
94 BIT_ULL(VIRTIO_NET_F_MTU) |
95 BIT_ULL(VIRTIO_NET_F_MAC) |
96 BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
97 BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
98 BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
99 BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
100 BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
101 BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
102 BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
103 BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
104 BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
105 BIT_ULL(VIRTIO_NET_F_STATUS) |
106 BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
107 BIT_ULL(VIRTIO_NET_F_MQ) |
108 BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
109 BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
110 /* VHOST_F_LOG_ALL is exposed by SVQ */
111 BIT_ULL(VHOST_F_LOG_ALL) |
112 BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
113 BIT_ULL(VIRTIO_NET_F_STANDBY) |
114 BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX);
116 #define VHOST_VDPA_NET_CVQ_ASID 1
118 VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
120 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
121 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
122 return s->vhost_net;
125 static size_t vhost_vdpa_net_cvq_cmd_len(void)
128 * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
129 * In buffer is always 1 byte, so it should fit here
131 return sizeof(struct virtio_net_ctrl_hdr) +
132 2 * sizeof(struct virtio_net_ctrl_mac) +
133 MAC_TABLE_ENTRIES * ETH_ALEN;
136 static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
138 return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
141 static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp)
143 uint64_t invalid_dev_features =
144 features & ~vdpa_svq_device_features &
145 /* Transport are all accepted at this point */
146 ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
147 VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
149 if (invalid_dev_features) {
150 error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
151 invalid_dev_features);
152 return false;
155 return vhost_svq_valid_features(features, errp);
158 static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
160 uint32_t device_id;
161 int ret;
162 struct vhost_dev *hdev;
164 hdev = (struct vhost_dev *)&net->dev;
165 ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id);
166 if (device_id != VIRTIO_ID_NET) {
167 return -ENOTSUP;
169 return ret;
172 static int vhost_vdpa_add(NetClientState *ncs, void *be,
173 int queue_pair_index, int nvqs)
175 VhostNetOptions options;
176 struct vhost_net *net = NULL;
177 VhostVDPAState *s;
178 int ret;
180 options.backend_type = VHOST_BACKEND_TYPE_VDPA;
181 assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
182 s = DO_UPCAST(VhostVDPAState, nc, ncs);
183 options.net_backend = ncs;
184 options.opaque = be;
185 options.busyloop_timeout = 0;
186 options.nvqs = nvqs;
188 net = vhost_net_init(&options);
189 if (!net) {
190 error_report("failed to init vhost_net for queue");
191 goto err_init;
193 s->vhost_net = net;
194 ret = vhost_vdpa_net_check_device_id(net);
195 if (ret) {
196 goto err_check;
198 return 0;
199 err_check:
200 vhost_net_cleanup(net);
201 g_free(net);
202 err_init:
203 return -1;
206 static void vhost_vdpa_cleanup(NetClientState *nc)
208 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
211 * If a peer NIC is attached, do not cleanup anything.
212 * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup()
213 * when the guest is shutting down.
215 if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) {
216 return;
218 munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len());
219 munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len());
220 if (s->vhost_net) {
221 vhost_net_cleanup(s->vhost_net);
222 g_free(s->vhost_net);
223 s->vhost_net = NULL;
225 if (s->vhost_vdpa.device_fd >= 0) {
226 qemu_close(s->vhost_vdpa.device_fd);
227 s->vhost_vdpa.device_fd = -1;
231 static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
233 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
235 return true;
238 static bool vhost_vdpa_has_ufo(NetClientState *nc)
240 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
241 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
242 uint64_t features = 0;
243 features |= (1ULL << VIRTIO_NET_F_HOST_UFO);
244 features = vhost_net_get_features(s->vhost_net, features);
245 return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO));
249 static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc,
250 Error **errp)
252 const char *driver = object_class_get_name(oc);
254 if (!g_str_has_prefix(driver, "virtio-net-")) {
255 error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*");
256 return false;
259 return true;
262 /** Dummy receive in case qemu falls back to userland tap networking */
263 static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
264 size_t size)
266 return size;
269 /** From any vdpa net client, get the netclient of the first queue pair */
270 static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
272 NICState *nic = qemu_get_nic(s->nc.peer);
273 NetClientState *nc0 = qemu_get_peer(nic->ncs, 0);
275 return DO_UPCAST(VhostVDPAState, nc, nc0);
278 static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
280 struct vhost_vdpa *v = &s->vhost_vdpa;
281 VirtIONet *n;
282 VirtIODevice *vdev;
283 int data_queue_pairs, cvq, r;
285 /* We are only called on the first data vqs and only if x-svq is not set */
286 if (s->vhost_vdpa.shadow_vqs_enabled == enable) {
287 return;
290 vdev = v->dev->vdev;
291 n = VIRTIO_NET(vdev);
292 if (!n->vhost_started) {
293 return;
296 data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
297 cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
298 n->max_ncs - n->max_queue_pairs : 0;
300 * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
301 * in the future and resume the device if read-only operations between
302 * suspend and reset goes wrong.
304 vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq);
306 /* Start will check migration setup_or_active to configure or not SVQ */
307 r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq);
308 if (unlikely(r < 0)) {
309 error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r);
313 static void vdpa_net_migration_state_notifier(Notifier *notifier, void *data)
315 MigrationState *migration = data;
316 VhostVDPAState *s = container_of(notifier, VhostVDPAState,
317 migration_state);
319 if (migration_in_setup(migration)) {
320 vhost_vdpa_net_log_global_enable(s, true);
321 } else if (migration_has_failed(migration)) {
322 vhost_vdpa_net_log_global_enable(s, false);
326 static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
328 struct vhost_vdpa *v = &s->vhost_vdpa;
330 add_migration_state_change_notifier(&s->migration_state);
331 if (v->shadow_vqs_enabled) {
332 v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
333 v->iova_range.last);
337 static int vhost_vdpa_net_data_start(NetClientState *nc)
339 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
340 struct vhost_vdpa *v = &s->vhost_vdpa;
342 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
344 if (s->always_svq ||
345 migration_is_setup_or_active(migrate_get_current()->state)) {
346 v->shadow_vqs_enabled = true;
347 v->shadow_data = true;
348 } else {
349 v->shadow_vqs_enabled = false;
350 v->shadow_data = false;
353 if (v->index == 0) {
354 vhost_vdpa_net_data_start_first(s);
355 return 0;
358 if (v->shadow_vqs_enabled) {
359 VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s);
360 v->iova_tree = s0->vhost_vdpa.iova_tree;
363 return 0;
366 static void vhost_vdpa_net_client_stop(NetClientState *nc)
368 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
369 struct vhost_dev *dev;
371 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
373 if (s->vhost_vdpa.index == 0) {
374 remove_migration_state_change_notifier(&s->migration_state);
377 dev = s->vhost_vdpa.dev;
378 if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
379 g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
383 static NetClientInfo net_vhost_vdpa_info = {
384 .type = NET_CLIENT_DRIVER_VHOST_VDPA,
385 .size = sizeof(VhostVDPAState),
386 .receive = vhost_vdpa_receive,
387 .start = vhost_vdpa_net_data_start,
388 .stop = vhost_vdpa_net_client_stop,
389 .cleanup = vhost_vdpa_cleanup,
390 .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
391 .has_ufo = vhost_vdpa_has_ufo,
392 .check_peer_type = vhost_vdpa_check_peer_type,
395 static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index,
396 Error **errp)
398 struct vhost_vring_state state = {
399 .index = vq_index,
401 int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
403 if (unlikely(r < 0)) {
404 r = -errno;
405 error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index);
406 return r;
409 return state.num;
412 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
413 unsigned vq_group,
414 unsigned asid_num)
416 struct vhost_vring_state asid = {
417 .index = vq_group,
418 .num = asid_num,
420 int r;
422 r = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
423 if (unlikely(r < 0)) {
424 error_report("Can't set vq group %u asid %u, errno=%d (%s)",
425 asid.index, asid.num, errno, g_strerror(errno));
427 return r;
430 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
432 VhostIOVATree *tree = v->iova_tree;
433 DMAMap needle = {
435 * No need to specify size or to look for more translations since
436 * this contiguous chunk was allocated by us.
438 .translated_addr = (hwaddr)(uintptr_t)addr,
440 const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
441 int r;
443 if (unlikely(!map)) {
444 error_report("Cannot locate expected map");
445 return;
448 r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1);
449 if (unlikely(r != 0)) {
450 error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
453 vhost_iova_tree_remove(tree, *map);
456 /** Map CVQ buffer. */
457 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
458 bool write)
460 DMAMap map = {};
461 int r;
463 map.translated_addr = (hwaddr)(uintptr_t)buf;
464 map.size = size - 1;
465 map.perm = write ? IOMMU_RW : IOMMU_RO,
466 r = vhost_iova_tree_map_alloc(v->iova_tree, &map);
467 if (unlikely(r != IOVA_OK)) {
468 error_report("Cannot map injected element");
469 return r;
472 r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova,
473 vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
474 if (unlikely(r < 0)) {
475 goto dma_map_err;
478 return 0;
480 dma_map_err:
481 vhost_iova_tree_remove(v->iova_tree, map);
482 return r;
485 static int vhost_vdpa_net_cvq_start(NetClientState *nc)
487 VhostVDPAState *s, *s0;
488 struct vhost_vdpa *v;
489 int64_t cvq_group;
490 int r;
491 Error *err = NULL;
493 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
495 s = DO_UPCAST(VhostVDPAState, nc, nc);
496 v = &s->vhost_vdpa;
498 s0 = vhost_vdpa_net_first_nc_vdpa(s);
499 v->shadow_data = s0->vhost_vdpa.shadow_vqs_enabled;
500 v->shadow_vqs_enabled = s->always_svq;
501 s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
503 if (s->vhost_vdpa.shadow_data) {
504 /* SVQ is already configured for all virtqueues */
505 goto out;
509 * If we early return in these cases SVQ will not be enabled. The migration
510 * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
512 if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
513 return 0;
516 if (!s->cvq_isolated) {
517 return 0;
520 cvq_group = vhost_vdpa_get_vring_group(v->device_fd,
521 v->dev->vq_index_end - 1,
522 &err);
523 if (unlikely(cvq_group < 0)) {
524 error_report_err(err);
525 return cvq_group;
528 r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
529 if (unlikely(r < 0)) {
530 return r;
533 v->shadow_vqs_enabled = true;
534 s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
536 out:
537 if (!s->vhost_vdpa.shadow_vqs_enabled) {
538 return 0;
541 if (s0->vhost_vdpa.iova_tree) {
543 * SVQ is already configured for all virtqueues. Reuse IOVA tree for
544 * simplicity, whether CVQ shares ASID with guest or not, because:
545 * - Memory listener need access to guest's memory addresses allocated
546 * in the IOVA tree.
547 * - There should be plenty of IOVA address space for both ASID not to
548 * worry about collisions between them. Guest's translations are
549 * still validated with virtio virtqueue_pop so there is no risk for
550 * the guest to access memory that it shouldn't.
552 * To allocate a iova tree per ASID is doable but it complicates the
553 * code and it is not worth it for the moment.
555 v->iova_tree = s0->vhost_vdpa.iova_tree;
556 } else {
557 v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
558 v->iova_range.last);
561 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
562 vhost_vdpa_net_cvq_cmd_page_len(), false);
563 if (unlikely(r < 0)) {
564 return r;
567 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status,
568 vhost_vdpa_net_cvq_cmd_page_len(), true);
569 if (unlikely(r < 0)) {
570 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
573 return r;
576 static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
578 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
580 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
582 if (s->vhost_vdpa.shadow_vqs_enabled) {
583 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
584 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
587 vhost_vdpa_net_client_stop(nc);
590 static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
591 size_t in_len)
593 /* Buffers for the device */
594 const struct iovec out = {
595 .iov_base = s->cvq_cmd_out_buffer,
596 .iov_len = out_len,
598 const struct iovec in = {
599 .iov_base = s->status,
600 .iov_len = sizeof(virtio_net_ctrl_ack),
602 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
603 int r;
605 r = vhost_svq_add(svq, &out, 1, &in, 1, NULL);
606 if (unlikely(r != 0)) {
607 if (unlikely(r == -ENOSPC)) {
608 qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
609 __func__);
611 return r;
615 * We can poll here since we've had BQL from the time we sent the
616 * descriptor. Also, we need to take the answer before SVQ pulls by itself,
617 * when BQL is released
619 return vhost_svq_poll(svq);
622 static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
623 uint8_t cmd, const void *data,
624 size_t data_size)
626 const struct virtio_net_ctrl_hdr ctrl = {
627 .class = class,
628 .cmd = cmd,
631 assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
633 memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
634 memcpy(s->cvq_cmd_out_buffer + sizeof(ctrl), data, data_size);
636 return vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + data_size,
637 sizeof(virtio_net_ctrl_ack));
640 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
642 if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
643 ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC,
644 VIRTIO_NET_CTRL_MAC_ADDR_SET,
645 n->mac, sizeof(n->mac));
646 if (unlikely(dev_written < 0)) {
647 return dev_written;
650 return *s->status != VIRTIO_NET_OK;
653 return 0;
656 static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
657 const VirtIONet *n)
659 struct virtio_net_ctrl_mq mq;
660 ssize_t dev_written;
662 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
663 return 0;
666 mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
667 dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ,
668 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &mq,
669 sizeof(mq));
670 if (unlikely(dev_written < 0)) {
671 return dev_written;
674 return *s->status != VIRTIO_NET_OK;
677 static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
678 const VirtIONet *n)
680 uint64_t offloads;
681 ssize_t dev_written;
683 if (!virtio_vdev_has_feature(&n->parent_obj,
684 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
685 return 0;
688 if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) {
690 * According to VirtIO standard, "Upon feature negotiation
691 * corresponding offload gets enabled to preserve
692 * backward compatibility.".
694 * Therefore, there is no need to send this CVQ command if the
695 * driver also enables all supported offloads, which aligns with
696 * the device's defaults.
698 * Note that the device's defaults can mismatch the driver's
699 * configuration only at live migration.
701 return 0;
704 offloads = cpu_to_le64(n->curr_guest_offloads);
705 dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
706 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
707 &offloads, sizeof(offloads));
708 if (unlikely(dev_written < 0)) {
709 return dev_written;
712 return *s->status != VIRTIO_NET_OK;
715 static int vhost_vdpa_net_load(NetClientState *nc)
717 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
718 struct vhost_vdpa *v = &s->vhost_vdpa;
719 const VirtIONet *n;
720 int r;
722 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
724 if (!v->shadow_vqs_enabled) {
725 return 0;
728 n = VIRTIO_NET(v->dev->vdev);
729 r = vhost_vdpa_net_load_mac(s, n);
730 if (unlikely(r < 0)) {
731 return r;
733 r = vhost_vdpa_net_load_mq(s, n);
734 if (unlikely(r)) {
735 return r;
737 r = vhost_vdpa_net_load_offloads(s, n);
738 if (unlikely(r)) {
739 return r;
742 return 0;
745 static NetClientInfo net_vhost_vdpa_cvq_info = {
746 .type = NET_CLIENT_DRIVER_VHOST_VDPA,
747 .size = sizeof(VhostVDPAState),
748 .receive = vhost_vdpa_receive,
749 .start = vhost_vdpa_net_cvq_start,
750 .load = vhost_vdpa_net_load,
751 .stop = vhost_vdpa_net_cvq_stop,
752 .cleanup = vhost_vdpa_cleanup,
753 .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
754 .has_ufo = vhost_vdpa_has_ufo,
755 .check_peer_type = vhost_vdpa_check_peer_type,
759 * Validate and copy control virtqueue commands.
761 * Following QEMU guidelines, we offer a copy of the buffers to the device to
762 * prevent TOCTOU bugs.
764 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
765 VirtQueueElement *elem,
766 void *opaque)
768 VhostVDPAState *s = opaque;
769 size_t in_len;
770 virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
771 /* Out buffer sent to both the vdpa device and the device model */
772 struct iovec out = {
773 .iov_base = s->cvq_cmd_out_buffer,
775 /* in buffer used for device model */
776 const struct iovec in = {
777 .iov_base = &status,
778 .iov_len = sizeof(status),
780 ssize_t dev_written = -EINVAL;
782 out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
783 s->cvq_cmd_out_buffer,
784 vhost_vdpa_net_cvq_cmd_len());
785 if (*(uint8_t *)s->cvq_cmd_out_buffer == VIRTIO_NET_CTRL_ANNOUNCE) {
787 * Guest announce capability is emulated by qemu, so don't forward to
788 * the device.
790 dev_written = sizeof(status);
791 *s->status = VIRTIO_NET_OK;
792 } else {
793 dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
794 if (unlikely(dev_written < 0)) {
795 goto out;
799 if (unlikely(dev_written < sizeof(status))) {
800 error_report("Insufficient written data (%zu)", dev_written);
801 goto out;
804 if (*s->status != VIRTIO_NET_OK) {
805 goto out;
808 status = VIRTIO_NET_ERR;
809 virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1);
810 if (status != VIRTIO_NET_OK) {
811 error_report("Bad CVQ processing in model");
814 out:
815 in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
816 sizeof(status));
817 if (unlikely(in_len < sizeof(status))) {
818 error_report("Bad device CVQ written length");
820 vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
821 g_free(elem);
822 return dev_written < 0 ? dev_written : 0;
825 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
826 .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
830 * Probe if CVQ is isolated
832 * @device_fd The vdpa device fd
833 * @features Features offered by the device.
834 * @cvq_index The control vq pair index
836 * Returns <0 in case of failure, 0 if false and 1 if true.
838 static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features,
839 int cvq_index, Error **errp)
841 uint64_t backend_features;
842 int64_t cvq_group;
843 uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE |
844 VIRTIO_CONFIG_S_DRIVER |
845 VIRTIO_CONFIG_S_FEATURES_OK;
846 int r;
848 ERRP_GUARD();
850 r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
851 if (unlikely(r < 0)) {
852 error_setg_errno(errp, errno, "Cannot get vdpa backend_features");
853 return r;
856 if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) {
857 return 0;
860 r = ioctl(device_fd, VHOST_SET_FEATURES, &features);
861 if (unlikely(r)) {
862 error_setg_errno(errp, errno, "Cannot set features");
865 r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
866 if (unlikely(r)) {
867 error_setg_errno(errp, -r, "Cannot set device features");
868 goto out;
871 cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp);
872 if (unlikely(cvq_group < 0)) {
873 if (cvq_group != -ENOTSUP) {
874 r = cvq_group;
875 goto out;
879 * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend
880 * support ASID even if the parent driver does not. The CVQ cannot be
881 * isolated in this case.
883 error_free(*errp);
884 *errp = NULL;
885 r = 0;
886 goto out;
889 for (int i = 0; i < cvq_index; ++i) {
890 int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp);
891 if (unlikely(group < 0)) {
892 r = group;
893 goto out;
896 if (group == (int64_t)cvq_group) {
897 r = 0;
898 goto out;
902 r = 1;
904 out:
905 status = 0;
906 ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
907 return r;
910 static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
911 const char *device,
912 const char *name,
913 int vdpa_device_fd,
914 int queue_pair_index,
915 int nvqs,
916 bool is_datapath,
917 bool svq,
918 struct vhost_vdpa_iova_range iova_range,
919 uint64_t features,
920 Error **errp)
922 NetClientState *nc = NULL;
923 VhostVDPAState *s;
924 int ret = 0;
925 assert(name);
926 int cvq_isolated;
928 if (is_datapath) {
929 nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
930 name);
931 } else {
932 cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features,
933 queue_pair_index * 2,
934 errp);
935 if (unlikely(cvq_isolated < 0)) {
936 return NULL;
939 nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer,
940 device, name);
942 qemu_set_info_str(nc, TYPE_VHOST_VDPA);
943 s = DO_UPCAST(VhostVDPAState, nc, nc);
945 s->vhost_vdpa.device_fd = vdpa_device_fd;
946 s->vhost_vdpa.index = queue_pair_index;
947 s->always_svq = svq;
948 s->migration_state.notify = vdpa_net_migration_state_notifier;
949 s->vhost_vdpa.shadow_vqs_enabled = svq;
950 s->vhost_vdpa.iova_range = iova_range;
951 s->vhost_vdpa.shadow_data = svq;
952 if (queue_pair_index == 0) {
953 vhost_vdpa_net_valid_svq_features(features,
954 &s->vhost_vdpa.migration_blocker);
955 } else if (!is_datapath) {
956 s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
957 PROT_READ | PROT_WRITE,
958 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
959 s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
960 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
961 -1, 0);
963 s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
964 s->vhost_vdpa.shadow_vq_ops_opaque = s;
965 s->cvq_isolated = cvq_isolated;
968 * TODO: We cannot migrate devices with CVQ and no x-svq enabled as
969 * there is no way to set the device state (MAC, MQ, etc) before
970 * starting the datapath.
972 * Migration blocker ownership now belongs to s->vhost_vdpa.
974 if (!svq) {
975 error_setg(&s->vhost_vdpa.migration_blocker,
976 "net vdpa cannot migrate with CVQ feature");
979 ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
980 if (ret) {
981 qemu_del_net_client(nc);
982 return NULL;
984 return nc;
987 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
989 int ret = ioctl(fd, VHOST_GET_FEATURES, features);
990 if (unlikely(ret < 0)) {
991 error_setg_errno(errp, errno,
992 "Fail to query features from vhost-vDPA device");
994 return ret;
997 static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
998 int *has_cvq, Error **errp)
1000 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
1001 g_autofree struct vhost_vdpa_config *config = NULL;
1002 __virtio16 *max_queue_pairs;
1003 int ret;
1005 if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
1006 *has_cvq = 1;
1007 } else {
1008 *has_cvq = 0;
1011 if (features & (1 << VIRTIO_NET_F_MQ)) {
1012 config = g_malloc0(config_size + sizeof(*max_queue_pairs));
1013 config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
1014 config->len = sizeof(*max_queue_pairs);
1016 ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
1017 if (ret) {
1018 error_setg(errp, "Fail to get config from vhost-vDPA device");
1019 return -ret;
1022 max_queue_pairs = (__virtio16 *)&config->buf;
1024 return lduw_le_p(max_queue_pairs);
1027 return 1;
1030 int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
1031 NetClientState *peer, Error **errp)
1033 const NetdevVhostVDPAOptions *opts;
1034 uint64_t features;
1035 int vdpa_device_fd;
1036 g_autofree NetClientState **ncs = NULL;
1037 struct vhost_vdpa_iova_range iova_range;
1038 NetClientState *nc;
1039 int queue_pairs, r, i = 0, has_cvq = 0;
1041 assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1042 opts = &netdev->u.vhost_vdpa;
1043 if (!opts->vhostdev && !opts->vhostfd) {
1044 error_setg(errp,
1045 "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
1046 return -1;
1049 if (opts->vhostdev && opts->vhostfd) {
1050 error_setg(errp,
1051 "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
1052 return -1;
1055 if (opts->vhostdev) {
1056 vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp);
1057 if (vdpa_device_fd == -1) {
1058 return -errno;
1060 } else {
1061 /* has_vhostfd */
1062 vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp);
1063 if (vdpa_device_fd == -1) {
1064 error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: ");
1065 return -1;
1069 r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp);
1070 if (unlikely(r < 0)) {
1071 goto err;
1074 queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
1075 &has_cvq, errp);
1076 if (queue_pairs < 0) {
1077 qemu_close(vdpa_device_fd);
1078 return queue_pairs;
1081 r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
1082 if (unlikely(r < 0)) {
1083 error_setg(errp, "vhost-vdpa: get iova range failed: %s",
1084 strerror(-r));
1085 goto err;
1088 if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) {
1089 goto err;
1092 ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
1094 for (i = 0; i < queue_pairs; i++) {
1095 ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1096 vdpa_device_fd, i, 2, true, opts->x_svq,
1097 iova_range, features, errp);
1098 if (!ncs[i])
1099 goto err;
1102 if (has_cvq) {
1103 nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1104 vdpa_device_fd, i, 1, false,
1105 opts->x_svq, iova_range, features, errp);
1106 if (!nc)
1107 goto err;
1110 return 0;
1112 err:
1113 if (i) {
1114 for (i--; i >= 0; i--) {
1115 qemu_del_net_client(ncs[i]);
1119 qemu_close(vdpa_device_fd);
1121 return -1;