4 * Copyright(c) 2017-2018 Intel Corporation.
5 * Copyright(c) 2020 Red Hat, Inc.
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
14 #include "hw/virtio/virtio-net.h"
15 #include "net/vhost_net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
21 #include "qemu/memalign.h"
22 #include "qemu/option.h"
23 #include "qapi/error.h"
24 #include <linux/vhost.h>
25 #include <sys/ioctl.h>
27 #include "standard-headers/linux/virtio_net.h"
28 #include "monitor/monitor.h"
29 #include "migration/migration.h"
30 #include "migration/misc.h"
31 #include "hw/virtio/vhost.h"
33 /* Todo:need to add the multiqueue support here */
34 typedef struct VhostVDPAState
{
36 struct vhost_vdpa vhost_vdpa
;
37 Notifier migration_state
;
38 VHostNetState
*vhost_net
;
40 /* Control commands shadow buffers */
41 void *cvq_cmd_out_buffer
;
42 virtio_net_ctrl_ack
*status
;
44 /* The device always have SVQ enabled */
47 /* The device can isolate CVQ in its own ASID */
54 * The array is sorted alphabetically in ascending order,
55 * with the exception of VHOST_INVALID_FEATURE_BIT,
56 * which should always be the last entry.
58 const int vdpa_feature_bits
[] = {
60 VIRTIO_F_IOMMU_PLATFORM
,
61 VIRTIO_F_NOTIFY_ON_EMPTY
,
66 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
,
67 VIRTIO_NET_F_CTRL_MAC_ADDR
,
69 VIRTIO_NET_F_CTRL_RX_EXTRA
,
70 VIRTIO_NET_F_CTRL_VLAN
,
73 VIRTIO_NET_F_GUEST_CSUM
,
74 VIRTIO_NET_F_GUEST_ECN
,
75 VIRTIO_NET_F_GUEST_TSO4
,
76 VIRTIO_NET_F_GUEST_TSO6
,
77 VIRTIO_NET_F_GUEST_UFO
,
78 VIRTIO_NET_F_GUEST_USO4
,
79 VIRTIO_NET_F_GUEST_USO6
,
80 VIRTIO_NET_F_HASH_REPORT
,
81 VIRTIO_NET_F_HOST_ECN
,
82 VIRTIO_NET_F_HOST_TSO4
,
83 VIRTIO_NET_F_HOST_TSO6
,
84 VIRTIO_NET_F_HOST_UFO
,
85 VIRTIO_NET_F_HOST_USO
,
87 VIRTIO_NET_F_MRG_RXBUF
,
91 VIRTIO_RING_F_EVENT_IDX
,
92 VIRTIO_RING_F_INDIRECT_DESC
,
94 /* VHOST_INVALID_FEATURE_BIT should always be the last entry */
95 VHOST_INVALID_FEATURE_BIT
98 /** Supported device specific feature bits with SVQ */
99 static const uint64_t vdpa_svq_device_features
=
100 BIT_ULL(VIRTIO_NET_F_CSUM
) |
101 BIT_ULL(VIRTIO_NET_F_GUEST_CSUM
) |
102 BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
) |
103 BIT_ULL(VIRTIO_NET_F_MTU
) |
104 BIT_ULL(VIRTIO_NET_F_MAC
) |
105 BIT_ULL(VIRTIO_NET_F_GUEST_TSO4
) |
106 BIT_ULL(VIRTIO_NET_F_GUEST_TSO6
) |
107 BIT_ULL(VIRTIO_NET_F_GUEST_ECN
) |
108 BIT_ULL(VIRTIO_NET_F_GUEST_UFO
) |
109 BIT_ULL(VIRTIO_NET_F_HOST_TSO4
) |
110 BIT_ULL(VIRTIO_NET_F_HOST_TSO6
) |
111 BIT_ULL(VIRTIO_NET_F_HOST_ECN
) |
112 BIT_ULL(VIRTIO_NET_F_HOST_UFO
) |
113 BIT_ULL(VIRTIO_NET_F_MRG_RXBUF
) |
114 BIT_ULL(VIRTIO_NET_F_STATUS
) |
115 BIT_ULL(VIRTIO_NET_F_CTRL_VQ
) |
116 BIT_ULL(VIRTIO_NET_F_CTRL_RX
) |
117 BIT_ULL(VIRTIO_NET_F_CTRL_VLAN
) |
118 BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA
) |
119 BIT_ULL(VIRTIO_NET_F_MQ
) |
120 BIT_ULL(VIRTIO_F_ANY_LAYOUT
) |
121 BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR
) |
122 /* VHOST_F_LOG_ALL is exposed by SVQ */
123 BIT_ULL(VHOST_F_LOG_ALL
) |
124 BIT_ULL(VIRTIO_NET_F_RSC_EXT
) |
125 BIT_ULL(VIRTIO_NET_F_STANDBY
) |
126 BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX
);
128 #define VHOST_VDPA_NET_CVQ_ASID 1
130 VHostNetState
*vhost_vdpa_get_vhost_net(NetClientState
*nc
)
132 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
133 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
137 static size_t vhost_vdpa_net_cvq_cmd_len(void)
140 * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
141 * In buffer is always 1 byte, so it should fit here
143 return sizeof(struct virtio_net_ctrl_hdr
) +
144 2 * sizeof(struct virtio_net_ctrl_mac
) +
145 MAC_TABLE_ENTRIES
* ETH_ALEN
;
148 static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
150 return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
153 static bool vhost_vdpa_net_valid_svq_features(uint64_t features
, Error
**errp
)
155 uint64_t invalid_dev_features
=
156 features
& ~vdpa_svq_device_features
&
157 /* Transport are all accepted at this point */
158 ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START
,
159 VIRTIO_TRANSPORT_F_END
- VIRTIO_TRANSPORT_F_START
);
161 if (invalid_dev_features
) {
162 error_setg(errp
, "vdpa svq does not work with features 0x%" PRIx64
,
163 invalid_dev_features
);
167 return vhost_svq_valid_features(features
, errp
);
170 static int vhost_vdpa_net_check_device_id(struct vhost_net
*net
)
174 struct vhost_dev
*hdev
;
176 hdev
= (struct vhost_dev
*)&net
->dev
;
177 ret
= hdev
->vhost_ops
->vhost_get_device_id(hdev
, &device_id
);
178 if (device_id
!= VIRTIO_ID_NET
) {
184 static int vhost_vdpa_add(NetClientState
*ncs
, void *be
,
185 int queue_pair_index
, int nvqs
)
187 VhostNetOptions options
;
188 struct vhost_net
*net
= NULL
;
192 options
.backend_type
= VHOST_BACKEND_TYPE_VDPA
;
193 assert(ncs
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
194 s
= DO_UPCAST(VhostVDPAState
, nc
, ncs
);
195 options
.net_backend
= ncs
;
197 options
.busyloop_timeout
= 0;
200 net
= vhost_net_init(&options
);
202 error_report("failed to init vhost_net for queue");
206 ret
= vhost_vdpa_net_check_device_id(net
);
212 vhost_net_cleanup(net
);
218 static void vhost_vdpa_cleanup(NetClientState
*nc
)
220 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
223 * If a peer NIC is attached, do not cleanup anything.
224 * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup()
225 * when the guest is shutting down.
227 if (nc
->peer
&& nc
->peer
->info
->type
== NET_CLIENT_DRIVER_NIC
) {
230 munmap(s
->cvq_cmd_out_buffer
, vhost_vdpa_net_cvq_cmd_page_len());
231 munmap(s
->status
, vhost_vdpa_net_cvq_cmd_page_len());
233 vhost_net_cleanup(s
->vhost_net
);
234 g_free(s
->vhost_net
);
237 if (s
->vhost_vdpa
.device_fd
>= 0) {
238 qemu_close(s
->vhost_vdpa
.device_fd
);
239 s
->vhost_vdpa
.device_fd
= -1;
243 static bool vhost_vdpa_has_vnet_hdr(NetClientState
*nc
)
245 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
250 static bool vhost_vdpa_has_ufo(NetClientState
*nc
)
252 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
253 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
254 uint64_t features
= 0;
255 features
|= (1ULL << VIRTIO_NET_F_HOST_UFO
);
256 features
= vhost_net_get_features(s
->vhost_net
, features
);
257 return !!(features
& (1ULL << VIRTIO_NET_F_HOST_UFO
));
261 static bool vhost_vdpa_check_peer_type(NetClientState
*nc
, ObjectClass
*oc
,
264 const char *driver
= object_class_get_name(oc
);
266 if (!g_str_has_prefix(driver
, "virtio-net-")) {
267 error_setg(errp
, "vhost-vdpa requires frontend driver virtio-net-*");
274 /** Dummy receive in case qemu falls back to userland tap networking */
275 static ssize_t
vhost_vdpa_receive(NetClientState
*nc
, const uint8_t *buf
,
281 /** From any vdpa net client, get the netclient of the first queue pair */
282 static VhostVDPAState
*vhost_vdpa_net_first_nc_vdpa(VhostVDPAState
*s
)
284 NICState
*nic
= qemu_get_nic(s
->nc
.peer
);
285 NetClientState
*nc0
= qemu_get_peer(nic
->ncs
, 0);
287 return DO_UPCAST(VhostVDPAState
, nc
, nc0
);
290 static void vhost_vdpa_net_log_global_enable(VhostVDPAState
*s
, bool enable
)
292 struct vhost_vdpa
*v
= &s
->vhost_vdpa
;
295 int data_queue_pairs
, cvq
, r
;
297 /* We are only called on the first data vqs and only if x-svq is not set */
298 if (s
->vhost_vdpa
.shadow_vqs_enabled
== enable
) {
303 n
= VIRTIO_NET(vdev
);
304 if (!n
->vhost_started
) {
308 data_queue_pairs
= n
->multiqueue
? n
->max_queue_pairs
: 1;
309 cvq
= virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_VQ
) ?
310 n
->max_ncs
- n
->max_queue_pairs
: 0;
312 * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
313 * in the future and resume the device if read-only operations between
314 * suspend and reset goes wrong.
316 vhost_net_stop(vdev
, n
->nic
->ncs
, data_queue_pairs
, cvq
);
318 /* Start will check migration setup_or_active to configure or not SVQ */
319 r
= vhost_net_start(vdev
, n
->nic
->ncs
, data_queue_pairs
, cvq
);
320 if (unlikely(r
< 0)) {
321 error_report("unable to start vhost net: %s(%d)", g_strerror(-r
), -r
);
325 static void vdpa_net_migration_state_notifier(Notifier
*notifier
, void *data
)
327 MigrationState
*migration
= data
;
328 VhostVDPAState
*s
= container_of(notifier
, VhostVDPAState
,
331 if (migration_in_setup(migration
)) {
332 vhost_vdpa_net_log_global_enable(s
, true);
333 } else if (migration_has_failed(migration
)) {
334 vhost_vdpa_net_log_global_enable(s
, false);
338 static void vhost_vdpa_net_data_start_first(VhostVDPAState
*s
)
340 struct vhost_vdpa
*v
= &s
->vhost_vdpa
;
342 migration_add_notifier(&s
->migration_state
,
343 vdpa_net_migration_state_notifier
);
344 if (v
->shadow_vqs_enabled
) {
345 v
->iova_tree
= vhost_iova_tree_new(v
->iova_range
.first
,
350 static int vhost_vdpa_net_data_start(NetClientState
*nc
)
352 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
353 struct vhost_vdpa
*v
= &s
->vhost_vdpa
;
355 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
358 migration_is_setup_or_active(migrate_get_current()->state
)) {
359 v
->shadow_vqs_enabled
= true;
360 v
->shadow_data
= true;
362 v
->shadow_vqs_enabled
= false;
363 v
->shadow_data
= false;
367 vhost_vdpa_net_data_start_first(s
);
371 if (v
->shadow_vqs_enabled
) {
372 VhostVDPAState
*s0
= vhost_vdpa_net_first_nc_vdpa(s
);
373 v
->iova_tree
= s0
->vhost_vdpa
.iova_tree
;
379 static int vhost_vdpa_net_data_load(NetClientState
*nc
)
381 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
382 struct vhost_vdpa
*v
= &s
->vhost_vdpa
;
383 bool has_cvq
= v
->dev
->vq_index_end
% 2;
389 for (int i
= 0; i
< v
->dev
->nvqs
; ++i
) {
390 vhost_vdpa_set_vring_ready(v
, i
+ v
->dev
->vq_index
);
395 static void vhost_vdpa_net_client_stop(NetClientState
*nc
)
397 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
398 struct vhost_dev
*dev
;
400 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
402 if (s
->vhost_vdpa
.index
== 0) {
403 migration_remove_notifier(&s
->migration_state
);
406 dev
= s
->vhost_vdpa
.dev
;
407 if (dev
->vq_index
+ dev
->nvqs
== dev
->vq_index_end
) {
408 g_clear_pointer(&s
->vhost_vdpa
.iova_tree
, vhost_iova_tree_delete
);
410 s
->vhost_vdpa
.iova_tree
= NULL
;
414 static NetClientInfo net_vhost_vdpa_info
= {
415 .type
= NET_CLIENT_DRIVER_VHOST_VDPA
,
416 .size
= sizeof(VhostVDPAState
),
417 .receive
= vhost_vdpa_receive
,
418 .start
= vhost_vdpa_net_data_start
,
419 .load
= vhost_vdpa_net_data_load
,
420 .stop
= vhost_vdpa_net_client_stop
,
421 .cleanup
= vhost_vdpa_cleanup
,
422 .has_vnet_hdr
= vhost_vdpa_has_vnet_hdr
,
423 .has_ufo
= vhost_vdpa_has_ufo
,
424 .check_peer_type
= vhost_vdpa_check_peer_type
,
427 static int64_t vhost_vdpa_get_vring_group(int device_fd
, unsigned vq_index
,
430 struct vhost_vring_state state
= {
433 int r
= ioctl(device_fd
, VHOST_VDPA_GET_VRING_GROUP
, &state
);
435 if (unlikely(r
< 0)) {
437 error_setg_errno(errp
, errno
, "Cannot get VQ %u group", vq_index
);
444 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa
*v
,
448 struct vhost_vring_state asid
= {
454 r
= ioctl(v
->device_fd
, VHOST_VDPA_SET_GROUP_ASID
, &asid
);
455 if (unlikely(r
< 0)) {
456 error_report("Can't set vq group %u asid %u, errno=%d (%s)",
457 asid
.index
, asid
.num
, errno
, g_strerror(errno
));
462 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa
*v
, void *addr
)
464 VhostIOVATree
*tree
= v
->iova_tree
;
467 * No need to specify size or to look for more translations since
468 * this contiguous chunk was allocated by us.
470 .translated_addr
= (hwaddr
)(uintptr_t)addr
,
472 const DMAMap
*map
= vhost_iova_tree_find_iova(tree
, &needle
);
475 if (unlikely(!map
)) {
476 error_report("Cannot locate expected map");
480 r
= vhost_vdpa_dma_unmap(v
, v
->address_space_id
, map
->iova
, map
->size
+ 1);
481 if (unlikely(r
!= 0)) {
482 error_report("Device cannot unmap: %s(%d)", g_strerror(r
), r
);
485 vhost_iova_tree_remove(tree
, *map
);
488 /** Map CVQ buffer. */
489 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa
*v
, void *buf
, size_t size
,
495 map
.translated_addr
= (hwaddr
)(uintptr_t)buf
;
497 map
.perm
= write
? IOMMU_RW
: IOMMU_RO
,
498 r
= vhost_iova_tree_map_alloc(v
->iova_tree
, &map
);
499 if (unlikely(r
!= IOVA_OK
)) {
500 error_report("Cannot map injected element");
504 r
= vhost_vdpa_dma_map(v
, v
->address_space_id
, map
.iova
,
505 vhost_vdpa_net_cvq_cmd_page_len(), buf
, !write
);
506 if (unlikely(r
< 0)) {
513 vhost_iova_tree_remove(v
->iova_tree
, map
);
517 static int vhost_vdpa_net_cvq_start(NetClientState
*nc
)
519 VhostVDPAState
*s
, *s0
;
520 struct vhost_vdpa
*v
;
525 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
527 s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
530 s0
= vhost_vdpa_net_first_nc_vdpa(s
);
531 v
->shadow_data
= s0
->vhost_vdpa
.shadow_vqs_enabled
;
532 v
->shadow_vqs_enabled
= s0
->vhost_vdpa
.shadow_vqs_enabled
;
533 s
->vhost_vdpa
.address_space_id
= VHOST_VDPA_GUEST_PA_ASID
;
535 if (s
->vhost_vdpa
.shadow_data
) {
536 /* SVQ is already configured for all virtqueues */
541 * If we early return in these cases SVQ will not be enabled. The migration
542 * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
544 if (!vhost_vdpa_net_valid_svq_features(v
->dev
->features
, NULL
)) {
548 if (!s
->cvq_isolated
) {
552 cvq_group
= vhost_vdpa_get_vring_group(v
->device_fd
,
553 v
->dev
->vq_index_end
- 1,
555 if (unlikely(cvq_group
< 0)) {
556 error_report_err(err
);
560 r
= vhost_vdpa_set_address_space_id(v
, cvq_group
, VHOST_VDPA_NET_CVQ_ASID
);
561 if (unlikely(r
< 0)) {
565 v
->shadow_vqs_enabled
= true;
566 s
->vhost_vdpa
.address_space_id
= VHOST_VDPA_NET_CVQ_ASID
;
569 if (!s
->vhost_vdpa
.shadow_vqs_enabled
) {
573 if (s0
->vhost_vdpa
.iova_tree
) {
575 * SVQ is already configured for all virtqueues. Reuse IOVA tree for
576 * simplicity, whether CVQ shares ASID with guest or not, because:
577 * - Memory listener need access to guest's memory addresses allocated
579 * - There should be plenty of IOVA address space for both ASID not to
580 * worry about collisions between them. Guest's translations are
581 * still validated with virtio virtqueue_pop so there is no risk for
582 * the guest to access memory that it shouldn't.
584 * To allocate a iova tree per ASID is doable but it complicates the
585 * code and it is not worth it for the moment.
587 v
->iova_tree
= s0
->vhost_vdpa
.iova_tree
;
589 v
->iova_tree
= vhost_iova_tree_new(v
->iova_range
.first
,
593 r
= vhost_vdpa_cvq_map_buf(&s
->vhost_vdpa
, s
->cvq_cmd_out_buffer
,
594 vhost_vdpa_net_cvq_cmd_page_len(), false);
595 if (unlikely(r
< 0)) {
599 r
= vhost_vdpa_cvq_map_buf(&s
->vhost_vdpa
, s
->status
,
600 vhost_vdpa_net_cvq_cmd_page_len(), true);
601 if (unlikely(r
< 0)) {
602 vhost_vdpa_cvq_unmap_buf(&s
->vhost_vdpa
, s
->cvq_cmd_out_buffer
);
608 static void vhost_vdpa_net_cvq_stop(NetClientState
*nc
)
610 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
612 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
614 if (s
->vhost_vdpa
.shadow_vqs_enabled
) {
615 vhost_vdpa_cvq_unmap_buf(&s
->vhost_vdpa
, s
->cvq_cmd_out_buffer
);
616 vhost_vdpa_cvq_unmap_buf(&s
->vhost_vdpa
, s
->status
);
619 vhost_vdpa_net_client_stop(nc
);
622 static ssize_t
vhost_vdpa_net_cvq_add(VhostVDPAState
*s
, size_t out_len
,
625 /* Buffers for the device */
626 const struct iovec out
= {
627 .iov_base
= s
->cvq_cmd_out_buffer
,
630 const struct iovec in
= {
631 .iov_base
= s
->status
,
632 .iov_len
= sizeof(virtio_net_ctrl_ack
),
634 VhostShadowVirtqueue
*svq
= g_ptr_array_index(s
->vhost_vdpa
.shadow_vqs
, 0);
637 r
= vhost_svq_add(svq
, &out
, 1, &in
, 1, NULL
);
638 if (unlikely(r
!= 0)) {
639 if (unlikely(r
== -ENOSPC
)) {
640 qemu_log_mask(LOG_GUEST_ERROR
, "%s: No space on device queue\n",
647 * We can poll here since we've had BQL from the time we sent the
648 * descriptor. Also, we need to take the answer before SVQ pulls by itself,
649 * when BQL is released
651 return vhost_svq_poll(svq
, 1);
654 static ssize_t
vhost_vdpa_net_load_cmd(VhostVDPAState
*s
, uint8_t class,
655 uint8_t cmd
, const struct iovec
*data_sg
,
658 const struct virtio_net_ctrl_hdr ctrl
= {
662 size_t data_size
= iov_size(data_sg
, data_num
);
664 assert(data_size
< vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl
));
666 /* pack the CVQ command header */
667 memcpy(s
->cvq_cmd_out_buffer
, &ctrl
, sizeof(ctrl
));
669 /* pack the CVQ command command-specific-data */
670 iov_to_buf(data_sg
, data_num
, 0,
671 s
->cvq_cmd_out_buffer
+ sizeof(ctrl
), data_size
);
673 return vhost_vdpa_net_cvq_add(s
, data_size
+ sizeof(ctrl
),
674 sizeof(virtio_net_ctrl_ack
));
677 static int vhost_vdpa_net_load_mac(VhostVDPAState
*s
, const VirtIONet
*n
)
679 if (virtio_vdev_has_feature(&n
->parent_obj
, VIRTIO_NET_F_CTRL_MAC_ADDR
)) {
680 const struct iovec data
= {
681 .iov_base
= (void *)n
->mac
,
682 .iov_len
= sizeof(n
->mac
),
684 ssize_t dev_written
= vhost_vdpa_net_load_cmd(s
, VIRTIO_NET_CTRL_MAC
,
685 VIRTIO_NET_CTRL_MAC_ADDR_SET
,
687 if (unlikely(dev_written
< 0)) {
690 if (*s
->status
!= VIRTIO_NET_OK
) {
696 * According to VirtIO standard, "The device MUST have an
697 * empty MAC filtering table on reset.".
699 * Therefore, there is no need to send this CVQ command if the
700 * driver also sets an empty MAC filter table, which aligns with
701 * the device's defaults.
703 * Note that the device's defaults can mismatch the driver's
704 * configuration only at live migration.
706 if (!virtio_vdev_has_feature(&n
->parent_obj
, VIRTIO_NET_F_CTRL_RX
) ||
707 n
->mac_table
.in_use
== 0) {
711 uint32_t uni_entries
= n
->mac_table
.first_multi
,
712 uni_macs_size
= uni_entries
* ETH_ALEN
,
713 mul_entries
= n
->mac_table
.in_use
- uni_entries
,
714 mul_macs_size
= mul_entries
* ETH_ALEN
;
715 struct virtio_net_ctrl_mac uni
= {
716 .entries
= cpu_to_le32(uni_entries
),
718 struct virtio_net_ctrl_mac mul
= {
719 .entries
= cpu_to_le32(mul_entries
),
721 const struct iovec data
[] = {
724 .iov_len
= sizeof(uni
),
726 .iov_base
= n
->mac_table
.macs
,
727 .iov_len
= uni_macs_size
,
730 .iov_len
= sizeof(mul
),
732 .iov_base
= &n
->mac_table
.macs
[uni_macs_size
],
733 .iov_len
= mul_macs_size
,
736 ssize_t dev_written
= vhost_vdpa_net_load_cmd(s
,
738 VIRTIO_NET_CTRL_MAC_TABLE_SET
,
739 data
, ARRAY_SIZE(data
));
740 if (unlikely(dev_written
< 0)) {
743 if (*s
->status
!= VIRTIO_NET_OK
) {
750 static int vhost_vdpa_net_load_mq(VhostVDPAState
*s
,
753 struct virtio_net_ctrl_mq mq
;
756 if (!virtio_vdev_has_feature(&n
->parent_obj
, VIRTIO_NET_F_MQ
)) {
760 mq
.virtqueue_pairs
= cpu_to_le16(n
->curr_queue_pairs
);
761 const struct iovec data
= {
763 .iov_len
= sizeof(mq
),
765 dev_written
= vhost_vdpa_net_load_cmd(s
, VIRTIO_NET_CTRL_MQ
,
766 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET
,
768 if (unlikely(dev_written
< 0)) {
771 if (*s
->status
!= VIRTIO_NET_OK
) {
778 static int vhost_vdpa_net_load_offloads(VhostVDPAState
*s
,
784 if (!virtio_vdev_has_feature(&n
->parent_obj
,
785 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
)) {
789 if (n
->curr_guest_offloads
== virtio_net_supported_guest_offloads(n
)) {
791 * According to VirtIO standard, "Upon feature negotiation
792 * corresponding offload gets enabled to preserve
793 * backward compatibility.".
795 * Therefore, there is no need to send this CVQ command if the
796 * driver also enables all supported offloads, which aligns with
797 * the device's defaults.
799 * Note that the device's defaults can mismatch the driver's
800 * configuration only at live migration.
805 offloads
= cpu_to_le64(n
->curr_guest_offloads
);
806 const struct iovec data
= {
807 .iov_base
= &offloads
,
808 .iov_len
= sizeof(offloads
),
810 dev_written
= vhost_vdpa_net_load_cmd(s
, VIRTIO_NET_CTRL_GUEST_OFFLOADS
,
811 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET
,
813 if (unlikely(dev_written
< 0)) {
816 if (*s
->status
!= VIRTIO_NET_OK
) {
823 static int vhost_vdpa_net_load_rx_mode(VhostVDPAState
*s
,
827 const struct iovec data
= {
829 .iov_len
= sizeof(on
),
831 return vhost_vdpa_net_load_cmd(s
, VIRTIO_NET_CTRL_RX
,
835 static int vhost_vdpa_net_load_rx(VhostVDPAState
*s
,
840 if (!virtio_vdev_has_feature(&n
->parent_obj
, VIRTIO_NET_F_CTRL_RX
)) {
845 * According to virtio_net_reset(), device turns promiscuous mode
848 * Additionally, according to VirtIO standard, "Since there are
849 * no guarantees, it can use a hash filter or silently switch to
850 * allmulti or promiscuous mode if it is given too many addresses.".
851 * QEMU marks `n->mac_table.uni_overflow` if guest sets too many
852 * non-multicast MAC addresses, indicating that promiscuous mode
855 * Therefore, QEMU should only send this CVQ command if the
856 * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off,
857 * which sets promiscuous mode on, different from the device's defaults.
859 * Note that the device's defaults can mismatch the driver's
860 * configuration only at live migration.
862 if (!n
->mac_table
.uni_overflow
&& !n
->promisc
) {
863 dev_written
= vhost_vdpa_net_load_rx_mode(s
,
864 VIRTIO_NET_CTRL_RX_PROMISC
, 0);
865 if (unlikely(dev_written
< 0)) {
868 if (*s
->status
!= VIRTIO_NET_OK
) {
874 * According to virtio_net_reset(), device turns all-multicast mode
877 * According to VirtIO standard, "Since there are no guarantees,
878 * it can use a hash filter or silently switch to allmulti or
879 * promiscuous mode if it is given too many addresses.". QEMU marks
880 * `n->mac_table.multi_overflow` if guest sets too many
881 * non-multicast MAC addresses.
883 * Therefore, QEMU should only send this CVQ command if the
884 * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on,
885 * which sets all-multicast mode on, different from the device's defaults.
887 * Note that the device's defaults can mismatch the driver's
888 * configuration only at live migration.
890 if (n
->mac_table
.multi_overflow
|| n
->allmulti
) {
891 dev_written
= vhost_vdpa_net_load_rx_mode(s
,
892 VIRTIO_NET_CTRL_RX_ALLMULTI
, 1);
893 if (unlikely(dev_written
< 0)) {
896 if (*s
->status
!= VIRTIO_NET_OK
) {
901 if (!virtio_vdev_has_feature(&n
->parent_obj
, VIRTIO_NET_F_CTRL_RX_EXTRA
)) {
906 * According to virtio_net_reset(), device turns all-unicast mode
909 * Therefore, QEMU should only send this CVQ command if the driver
910 * sets all-unicast mode on, different from the device's defaults.
912 * Note that the device's defaults can mismatch the driver's
913 * configuration only at live migration.
916 dev_written
= vhost_vdpa_net_load_rx_mode(s
,
917 VIRTIO_NET_CTRL_RX_ALLUNI
, 1);
918 if (dev_written
< 0) {
921 if (*s
->status
!= VIRTIO_NET_OK
) {
927 * According to virtio_net_reset(), device turns non-multicast mode
930 * Therefore, QEMU should only send this CVQ command if the driver
931 * sets non-multicast mode on, different from the device's defaults.
933 * Note that the device's defaults can mismatch the driver's
934 * configuration only at live migration.
937 dev_written
= vhost_vdpa_net_load_rx_mode(s
,
938 VIRTIO_NET_CTRL_RX_NOMULTI
, 1);
939 if (dev_written
< 0) {
942 if (*s
->status
!= VIRTIO_NET_OK
) {
948 * According to virtio_net_reset(), device turns non-unicast mode
951 * Therefore, QEMU should only send this CVQ command if the driver
952 * sets non-unicast mode on, different from the device's defaults.
954 * Note that the device's defaults can mismatch the driver's
955 * configuration only at live migration.
958 dev_written
= vhost_vdpa_net_load_rx_mode(s
,
959 VIRTIO_NET_CTRL_RX_NOUNI
, 1);
960 if (dev_written
< 0) {
963 if (*s
->status
!= VIRTIO_NET_OK
) {
969 * According to virtio_net_reset(), device turns non-broadcast mode
972 * Therefore, QEMU should only send this CVQ command if the driver
973 * sets non-broadcast mode on, different from the device's defaults.
975 * Note that the device's defaults can mismatch the driver's
976 * configuration only at live migration.
979 dev_written
= vhost_vdpa_net_load_rx_mode(s
,
980 VIRTIO_NET_CTRL_RX_NOBCAST
, 1);
981 if (dev_written
< 0) {
984 if (*s
->status
!= VIRTIO_NET_OK
) {
992 static int vhost_vdpa_net_load_single_vlan(VhostVDPAState
*s
,
996 const struct iovec data
= {
998 .iov_len
= sizeof(vid
),
1000 ssize_t dev_written
= vhost_vdpa_net_load_cmd(s
, VIRTIO_NET_CTRL_VLAN
,
1001 VIRTIO_NET_CTRL_VLAN_ADD
,
1003 if (unlikely(dev_written
< 0)) {
1006 if (unlikely(*s
->status
!= VIRTIO_NET_OK
)) {
1013 static int vhost_vdpa_net_load_vlan(VhostVDPAState
*s
,
1018 if (!virtio_vdev_has_feature(&n
->parent_obj
, VIRTIO_NET_F_CTRL_VLAN
)) {
1022 for (int i
= 0; i
< MAX_VLAN
>> 5; i
++) {
1023 for (int j
= 0; n
->vlans
[i
] && j
<= 0x1f; j
++) {
1024 if (n
->vlans
[i
] & (1U << j
)) {
1025 r
= vhost_vdpa_net_load_single_vlan(s
, n
, (i
<< 5) + j
);
1026 if (unlikely(r
!= 0)) {
1036 static int vhost_vdpa_net_cvq_load(NetClientState
*nc
)
1038 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
1039 struct vhost_vdpa
*v
= &s
->vhost_vdpa
;
1043 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
1045 vhost_vdpa_set_vring_ready(v
, v
->dev
->vq_index
);
1047 if (v
->shadow_vqs_enabled
) {
1048 n
= VIRTIO_NET(v
->dev
->vdev
);
1049 r
= vhost_vdpa_net_load_mac(s
, n
);
1050 if (unlikely(r
< 0)) {
1053 r
= vhost_vdpa_net_load_mq(s
, n
);
1057 r
= vhost_vdpa_net_load_offloads(s
, n
);
1061 r
= vhost_vdpa_net_load_rx(s
, n
);
1065 r
= vhost_vdpa_net_load_vlan(s
, n
);
1071 for (int i
= 0; i
< v
->dev
->vq_index
; ++i
) {
1072 vhost_vdpa_set_vring_ready(v
, i
);
1078 static NetClientInfo net_vhost_vdpa_cvq_info
= {
1079 .type
= NET_CLIENT_DRIVER_VHOST_VDPA
,
1080 .size
= sizeof(VhostVDPAState
),
1081 .receive
= vhost_vdpa_receive
,
1082 .start
= vhost_vdpa_net_cvq_start
,
1083 .load
= vhost_vdpa_net_cvq_load
,
1084 .stop
= vhost_vdpa_net_cvq_stop
,
1085 .cleanup
= vhost_vdpa_cleanup
,
1086 .has_vnet_hdr
= vhost_vdpa_has_vnet_hdr
,
1087 .has_ufo
= vhost_vdpa_has_ufo
,
1088 .check_peer_type
= vhost_vdpa_check_peer_type
,
1092 * Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to
1095 * Considering that QEMU cannot send the entire filter table to the
1096 * vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ
1097 * command to enable promiscuous mode to receive all packets,
1098 * according to VirtIO standard, "Since there are no guarantees,
1099 * it can use a hash filter or silently switch to allmulti or
1100 * promiscuous mode if it is given too many addresses.".
1102 * Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and
1103 * marks `n->mac_table.x_overflow` accordingly, it should have
1104 * the same effect on the device model to receive
1105 * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses.
1106 * The same applies to multicast MAC addresses.
1108 * Therefore, QEMU can provide the device model with a fake
1109 * VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1)
1110 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast
1111 * MAC addresses. This ensures that the device model marks
1112 * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`,
1113 * allowing all packets to be received, which aligns with the
1114 * state of the vdpa device.
1116 static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState
*s
,
1117 VirtQueueElement
*elem
,
1120 struct virtio_net_ctrl_mac mac_data
, *mac_ptr
;
1121 struct virtio_net_ctrl_hdr
*hdr_ptr
;
1125 /* parse the non-multicast MAC address entries from CVQ command */
1126 cursor
= sizeof(*hdr_ptr
);
1127 r
= iov_to_buf(elem
->out_sg
, elem
->out_num
, cursor
,
1128 &mac_data
, sizeof(mac_data
));
1129 if (unlikely(r
!= sizeof(mac_data
))) {
1131 * If the CVQ command is invalid, we should simulate the vdpa device
1132 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1134 *s
->status
= VIRTIO_NET_ERR
;
1135 return sizeof(*s
->status
);
1137 cursor
+= sizeof(mac_data
) + le32_to_cpu(mac_data
.entries
) * ETH_ALEN
;
1139 /* parse the multicast MAC address entries from CVQ command */
1140 r
= iov_to_buf(elem
->out_sg
, elem
->out_num
, cursor
,
1141 &mac_data
, sizeof(mac_data
));
1142 if (r
!= sizeof(mac_data
)) {
1144 * If the CVQ command is invalid, we should simulate the vdpa device
1145 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1147 *s
->status
= VIRTIO_NET_ERR
;
1148 return sizeof(*s
->status
);
1150 cursor
+= sizeof(mac_data
) + le32_to_cpu(mac_data
.entries
) * ETH_ALEN
;
1152 /* validate the CVQ command */
1153 if (iov_size(elem
->out_sg
, elem
->out_num
) != cursor
) {
1155 * If the CVQ command is invalid, we should simulate the vdpa device
1156 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1158 *s
->status
= VIRTIO_NET_ERR
;
1159 return sizeof(*s
->status
);
1163 * According to VirtIO standard, "Since there are no guarantees,
1164 * it can use a hash filter or silently switch to allmulti or
1165 * promiscuous mode if it is given too many addresses.".
1167 * Therefore, considering that QEMU is unable to send the entire
1168 * filter table to the vdpa device, it should send the
1169 * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode
1171 r
= vhost_vdpa_net_load_rx_mode(s
, VIRTIO_NET_CTRL_RX_PROMISC
, 1);
1172 if (unlikely(r
< 0)) {
1175 if (*s
->status
!= VIRTIO_NET_OK
) {
1176 return sizeof(*s
->status
);
1180 * QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ
1181 * command to the device model, including (`MAC_TABLE_ENTRIES` + 1)
1182 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1)
1183 * multicast MAC addresses.
1185 * By doing so, the device model can mark `n->mac_table.uni_overflow`
1186 * and `n->mac_table.multi_overflow`, enabling all packets to be
1187 * received, which aligns with the state of the vdpa device.
1190 uint32_t fake_uni_entries
= MAC_TABLE_ENTRIES
+ 1,
1191 fake_mul_entries
= MAC_TABLE_ENTRIES
+ 1,
1192 fake_cvq_size
= sizeof(struct virtio_net_ctrl_hdr
) +
1193 sizeof(mac_data
) + fake_uni_entries
* ETH_ALEN
+
1194 sizeof(mac_data
) + fake_mul_entries
* ETH_ALEN
;
1196 assert(fake_cvq_size
< vhost_vdpa_net_cvq_cmd_page_len());
1197 out
->iov_len
= fake_cvq_size
;
1199 /* pack the header for fake CVQ command */
1200 hdr_ptr
= out
->iov_base
+ cursor
;
1201 hdr_ptr
->class = VIRTIO_NET_CTRL_MAC
;
1202 hdr_ptr
->cmd
= VIRTIO_NET_CTRL_MAC_TABLE_SET
;
1203 cursor
+= sizeof(*hdr_ptr
);
1206 * Pack the non-multicast MAC addresses part for fake CVQ command.
1208 * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1209 * addresses provided in CVQ command. Therefore, only the entries
1210 * field need to be prepared in the CVQ command.
1212 mac_ptr
= out
->iov_base
+ cursor
;
1213 mac_ptr
->entries
= cpu_to_le32(fake_uni_entries
);
1214 cursor
+= sizeof(*mac_ptr
) + fake_uni_entries
* ETH_ALEN
;
1217 * Pack the multicast MAC addresses part for fake CVQ command.
1219 * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1220 * addresses provided in CVQ command. Therefore, only the entries
1221 * field need to be prepared in the CVQ command.
1223 mac_ptr
= out
->iov_base
+ cursor
;
1224 mac_ptr
->entries
= cpu_to_le32(fake_mul_entries
);
1227 * Simulating QEMU poll a vdpa device used buffer
1228 * for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1230 return sizeof(*s
->status
);
1234 * Validate and copy control virtqueue commands.
1236 * Following QEMU guidelines, we offer a copy of the buffers to the device to
1237 * prevent TOCTOU bugs.
1239 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue
*svq
,
1240 VirtQueueElement
*elem
,
1243 VhostVDPAState
*s
= opaque
;
1245 const struct virtio_net_ctrl_hdr
*ctrl
;
1246 virtio_net_ctrl_ack status
= VIRTIO_NET_ERR
;
1247 /* Out buffer sent to both the vdpa device and the device model */
1248 struct iovec out
= {
1249 .iov_base
= s
->cvq_cmd_out_buffer
,
1251 /* in buffer used for device model */
1252 const struct iovec in
= {
1253 .iov_base
= &status
,
1254 .iov_len
= sizeof(status
),
1256 ssize_t dev_written
= -EINVAL
;
1258 out
.iov_len
= iov_to_buf(elem
->out_sg
, elem
->out_num
, 0,
1259 s
->cvq_cmd_out_buffer
,
1260 vhost_vdpa_net_cvq_cmd_page_len());
1262 ctrl
= s
->cvq_cmd_out_buffer
;
1263 if (ctrl
->class == VIRTIO_NET_CTRL_ANNOUNCE
) {
1265 * Guest announce capability is emulated by qemu, so don't forward to
1268 dev_written
= sizeof(status
);
1269 *s
->status
= VIRTIO_NET_OK
;
1270 } else if (unlikely(ctrl
->class == VIRTIO_NET_CTRL_MAC
&&
1271 ctrl
->cmd
== VIRTIO_NET_CTRL_MAC_TABLE_SET
&&
1272 iov_size(elem
->out_sg
, elem
->out_num
) > out
.iov_len
)) {
1274 * Due to the size limitation of the out buffer sent to the vdpa device,
1275 * which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive
1276 * MAC addresses set by the driver for the filter table can cause
1277 * truncation of the CVQ command in QEMU. As a result, the vdpa device
1278 * rejects the flawed CVQ command.
1280 * Therefore, QEMU must handle this situation instead of sending
1281 * the CVQ command directly.
1283 dev_written
= vhost_vdpa_net_excessive_mac_filter_cvq_add(s
, elem
,
1285 if (unlikely(dev_written
< 0)) {
1289 dev_written
= vhost_vdpa_net_cvq_add(s
, out
.iov_len
, sizeof(status
));
1290 if (unlikely(dev_written
< 0)) {
1295 if (unlikely(dev_written
< sizeof(status
))) {
1296 error_report("Insufficient written data (%zu)", dev_written
);
1300 if (*s
->status
!= VIRTIO_NET_OK
) {
1304 status
= VIRTIO_NET_ERR
;
1305 virtio_net_handle_ctrl_iov(svq
->vdev
, &in
, 1, &out
, 1);
1306 if (status
!= VIRTIO_NET_OK
) {
1307 error_report("Bad CVQ processing in model");
1311 in_len
= iov_from_buf(elem
->in_sg
, elem
->in_num
, 0, &status
,
1313 if (unlikely(in_len
< sizeof(status
))) {
1314 error_report("Bad device CVQ written length");
1316 vhost_svq_push_elem(svq
, elem
, MIN(in_len
, sizeof(status
)));
1318 * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when
1319 * the function successfully forwards the CVQ command, indicated
1320 * by a non-negative value of `dev_written`. Otherwise, it still
1322 * This function should only free the `elem` when it owns.
1324 if (dev_written
>= 0) {
1327 return dev_written
< 0 ? dev_written
: 0;
1330 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops
= {
1331 .avail_handler
= vhost_vdpa_net_handle_ctrl_avail
,
1335 * Probe if CVQ is isolated
1337 * @device_fd The vdpa device fd
1338 * @features Features offered by the device.
1339 * @cvq_index The control vq pair index
1341 * Returns <0 in case of failure, 0 if false and 1 if true.
1343 static int vhost_vdpa_probe_cvq_isolation(int device_fd
, uint64_t features
,
1344 int cvq_index
, Error
**errp
)
1346 uint64_t backend_features
;
1348 uint8_t status
= VIRTIO_CONFIG_S_ACKNOWLEDGE
|
1349 VIRTIO_CONFIG_S_DRIVER
;
1354 r
= ioctl(device_fd
, VHOST_GET_BACKEND_FEATURES
, &backend_features
);
1355 if (unlikely(r
< 0)) {
1356 error_setg_errno(errp
, errno
, "Cannot get vdpa backend_features");
1360 if (!(backend_features
& BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID
))) {
1364 r
= ioctl(device_fd
, VHOST_VDPA_SET_STATUS
, &status
);
1366 error_setg_errno(errp
, -r
, "Cannot set device status");
1370 r
= ioctl(device_fd
, VHOST_SET_FEATURES
, &features
);
1372 error_setg_errno(errp
, -r
, "Cannot set features");
1376 status
|= VIRTIO_CONFIG_S_FEATURES_OK
;
1377 r
= ioctl(device_fd
, VHOST_VDPA_SET_STATUS
, &status
);
1379 error_setg_errno(errp
, -r
, "Cannot set device status");
1383 cvq_group
= vhost_vdpa_get_vring_group(device_fd
, cvq_index
, errp
);
1384 if (unlikely(cvq_group
< 0)) {
1385 if (cvq_group
!= -ENOTSUP
) {
1391 * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend
1392 * support ASID even if the parent driver does not. The CVQ cannot be
1393 * isolated in this case.
1401 for (int i
= 0; i
< cvq_index
; ++i
) {
1402 int64_t group
= vhost_vdpa_get_vring_group(device_fd
, i
, errp
);
1403 if (unlikely(group
< 0)) {
1408 if (group
== (int64_t)cvq_group
) {
1418 ioctl(device_fd
, VHOST_VDPA_SET_STATUS
, &status
);
1422 static NetClientState
*net_vhost_vdpa_init(NetClientState
*peer
,
1426 int queue_pair_index
,
1430 struct vhost_vdpa_iova_range iova_range
,
1434 NetClientState
*nc
= NULL
;
1438 int cvq_isolated
= 0;
1441 nc
= qemu_new_net_client(&net_vhost_vdpa_info
, peer
, device
,
1444 cvq_isolated
= vhost_vdpa_probe_cvq_isolation(vdpa_device_fd
, features
,
1445 queue_pair_index
* 2,
1447 if (unlikely(cvq_isolated
< 0)) {
1451 nc
= qemu_new_net_control_client(&net_vhost_vdpa_cvq_info
, peer
,
1454 qemu_set_info_str(nc
, TYPE_VHOST_VDPA
);
1455 s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
1457 s
->vhost_vdpa
.device_fd
= vdpa_device_fd
;
1458 s
->vhost_vdpa
.index
= queue_pair_index
;
1459 s
->always_svq
= svq
;
1460 s
->migration_state
.notify
= NULL
;
1461 s
->vhost_vdpa
.shadow_vqs_enabled
= svq
;
1462 s
->vhost_vdpa
.iova_range
= iova_range
;
1463 s
->vhost_vdpa
.shadow_data
= svq
;
1464 if (queue_pair_index
== 0) {
1465 vhost_vdpa_net_valid_svq_features(features
,
1466 &s
->vhost_vdpa
.migration_blocker
);
1467 } else if (!is_datapath
) {
1468 s
->cvq_cmd_out_buffer
= mmap(NULL
, vhost_vdpa_net_cvq_cmd_page_len(),
1469 PROT_READ
| PROT_WRITE
,
1470 MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
1471 s
->status
= mmap(NULL
, vhost_vdpa_net_cvq_cmd_page_len(),
1472 PROT_READ
| PROT_WRITE
, MAP_SHARED
| MAP_ANONYMOUS
,
1475 s
->vhost_vdpa
.shadow_vq_ops
= &vhost_vdpa_net_svq_ops
;
1476 s
->vhost_vdpa
.shadow_vq_ops_opaque
= s
;
1477 s
->cvq_isolated
= cvq_isolated
;
1479 ret
= vhost_vdpa_add(nc
, (void *)&s
->vhost_vdpa
, queue_pair_index
, nvqs
);
1481 qemu_del_net_client(nc
);
1487 static int vhost_vdpa_get_features(int fd
, uint64_t *features
, Error
**errp
)
1489 int ret
= ioctl(fd
, VHOST_GET_FEATURES
, features
);
1490 if (unlikely(ret
< 0)) {
1491 error_setg_errno(errp
, errno
,
1492 "Fail to query features from vhost-vDPA device");
1497 static int vhost_vdpa_get_max_queue_pairs(int fd
, uint64_t features
,
1498 int *has_cvq
, Error
**errp
)
1500 unsigned long config_size
= offsetof(struct vhost_vdpa_config
, buf
);
1501 g_autofree
struct vhost_vdpa_config
*config
= NULL
;
1502 __virtio16
*max_queue_pairs
;
1505 if (features
& (1 << VIRTIO_NET_F_CTRL_VQ
)) {
1511 if (features
& (1 << VIRTIO_NET_F_MQ
)) {
1512 config
= g_malloc0(config_size
+ sizeof(*max_queue_pairs
));
1513 config
->off
= offsetof(struct virtio_net_config
, max_virtqueue_pairs
);
1514 config
->len
= sizeof(*max_queue_pairs
);
1516 ret
= ioctl(fd
, VHOST_VDPA_GET_CONFIG
, config
);
1518 error_setg(errp
, "Fail to get config from vhost-vDPA device");
1522 max_queue_pairs
= (__virtio16
*)&config
->buf
;
1524 return lduw_le_p(max_queue_pairs
);
1530 int net_init_vhost_vdpa(const Netdev
*netdev
, const char *name
,
1531 NetClientState
*peer
, Error
**errp
)
1533 const NetdevVhostVDPAOptions
*opts
;
1536 g_autofree NetClientState
**ncs
= NULL
;
1537 struct vhost_vdpa_iova_range iova_range
;
1539 int queue_pairs
, r
, i
= 0, has_cvq
= 0;
1541 assert(netdev
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
1542 opts
= &netdev
->u
.vhost_vdpa
;
1543 if (!opts
->vhostdev
&& !opts
->vhostfd
) {
1545 "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
1549 if (opts
->vhostdev
&& opts
->vhostfd
) {
1551 "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
1555 if (opts
->vhostdev
) {
1556 vdpa_device_fd
= qemu_open(opts
->vhostdev
, O_RDWR
, errp
);
1557 if (vdpa_device_fd
== -1) {
1562 vdpa_device_fd
= monitor_fd_param(monitor_cur(), opts
->vhostfd
, errp
);
1563 if (vdpa_device_fd
== -1) {
1564 error_prepend(errp
, "vhost-vdpa: unable to parse vhostfd: ");
1569 r
= vhost_vdpa_get_features(vdpa_device_fd
, &features
, errp
);
1570 if (unlikely(r
< 0)) {
1574 queue_pairs
= vhost_vdpa_get_max_queue_pairs(vdpa_device_fd
, features
,
1576 if (queue_pairs
< 0) {
1577 qemu_close(vdpa_device_fd
);
1581 r
= vhost_vdpa_get_iova_range(vdpa_device_fd
, &iova_range
);
1582 if (unlikely(r
< 0)) {
1583 error_setg(errp
, "vhost-vdpa: get iova range failed: %s",
1588 if (opts
->x_svq
&& !vhost_vdpa_net_valid_svq_features(features
, errp
)) {
1592 ncs
= g_malloc0(sizeof(*ncs
) * queue_pairs
);
1594 for (i
= 0; i
< queue_pairs
; i
++) {
1595 ncs
[i
] = net_vhost_vdpa_init(peer
, TYPE_VHOST_VDPA
, name
,
1596 vdpa_device_fd
, i
, 2, true, opts
->x_svq
,
1597 iova_range
, features
, errp
);
1603 nc
= net_vhost_vdpa_init(peer
, TYPE_VHOST_VDPA
, name
,
1604 vdpa_device_fd
, i
, 1, false,
1605 opts
->x_svq
, iova_range
, features
, errp
);
1614 for (i
--; i
>= 0; i
--) {
1615 qemu_del_net_client(ncs
[i
]);
1619 qemu_close(vdpa_device_fd
);