4 * Copyright(c) 2017-2018 Intel Corporation.
5 * Copyright(c) 2020 Red Hat, Inc.
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
14 #include "hw/virtio/virtio-net.h"
15 #include "net/vhost_net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
21 #include "qemu/memalign.h"
22 #include "qemu/option.h"
23 #include "qapi/error.h"
24 #include <linux/vhost.h>
25 #include <sys/ioctl.h>
27 #include "standard-headers/linux/virtio_net.h"
28 #include "monitor/monitor.h"
29 #include "migration/misc.h"
30 #include "hw/virtio/vhost.h"
33 /* Todo:need to add the multiqueue support here */
34 typedef struct VhostVDPAState
{
36 struct vhost_vdpa vhost_vdpa
;
37 NotifierWithReturn migration_state
;
38 VHostNetState
*vhost_net
;
40 /* Control commands shadow buffers */
41 void *cvq_cmd_out_buffer
;
42 virtio_net_ctrl_ack
*status
;
44 /* The device always have SVQ enabled */
47 /* The device can isolate CVQ in its own ASID */
54 * The array is sorted alphabetically in ascending order,
55 * with the exception of VHOST_INVALID_FEATURE_BIT,
56 * which should always be the last entry.
58 const int vdpa_feature_bits
[] = {
60 VIRTIO_F_IOMMU_PLATFORM
,
61 VIRTIO_F_NOTIFY_ON_EMPTY
,
65 VIRTIO_F_NOTIFICATION_DATA
,
67 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
,
68 VIRTIO_NET_F_CTRL_MAC_ADDR
,
70 VIRTIO_NET_F_CTRL_RX_EXTRA
,
71 VIRTIO_NET_F_CTRL_VLAN
,
74 VIRTIO_NET_F_GUEST_CSUM
,
75 VIRTIO_NET_F_GUEST_ECN
,
76 VIRTIO_NET_F_GUEST_TSO4
,
77 VIRTIO_NET_F_GUEST_TSO6
,
78 VIRTIO_NET_F_GUEST_UFO
,
79 VIRTIO_NET_F_GUEST_USO4
,
80 VIRTIO_NET_F_GUEST_USO6
,
81 VIRTIO_NET_F_HASH_REPORT
,
82 VIRTIO_NET_F_HOST_ECN
,
83 VIRTIO_NET_F_HOST_TSO4
,
84 VIRTIO_NET_F_HOST_TSO6
,
85 VIRTIO_NET_F_HOST_UFO
,
86 VIRTIO_NET_F_HOST_USO
,
88 VIRTIO_NET_F_MRG_RXBUF
,
92 VIRTIO_RING_F_EVENT_IDX
,
93 VIRTIO_RING_F_INDIRECT_DESC
,
95 /* VHOST_INVALID_FEATURE_BIT should always be the last entry */
96 VHOST_INVALID_FEATURE_BIT
99 /** Supported device specific feature bits with SVQ */
100 static const uint64_t vdpa_svq_device_features
=
101 BIT_ULL(VIRTIO_NET_F_CSUM
) |
102 BIT_ULL(VIRTIO_NET_F_GUEST_CSUM
) |
103 BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
) |
104 BIT_ULL(VIRTIO_NET_F_MTU
) |
105 BIT_ULL(VIRTIO_NET_F_MAC
) |
106 BIT_ULL(VIRTIO_NET_F_GUEST_TSO4
) |
107 BIT_ULL(VIRTIO_NET_F_GUEST_TSO6
) |
108 BIT_ULL(VIRTIO_NET_F_GUEST_ECN
) |
109 BIT_ULL(VIRTIO_NET_F_GUEST_UFO
) |
110 BIT_ULL(VIRTIO_NET_F_HOST_TSO4
) |
111 BIT_ULL(VIRTIO_NET_F_HOST_TSO6
) |
112 BIT_ULL(VIRTIO_NET_F_HOST_ECN
) |
113 BIT_ULL(VIRTIO_NET_F_HOST_UFO
) |
114 BIT_ULL(VIRTIO_NET_F_MRG_RXBUF
) |
115 BIT_ULL(VIRTIO_NET_F_STATUS
) |
116 BIT_ULL(VIRTIO_NET_F_CTRL_VQ
) |
117 BIT_ULL(VIRTIO_NET_F_CTRL_RX
) |
118 BIT_ULL(VIRTIO_NET_F_CTRL_VLAN
) |
119 BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA
) |
120 BIT_ULL(VIRTIO_NET_F_MQ
) |
121 BIT_ULL(VIRTIO_F_ANY_LAYOUT
) |
122 BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR
) |
123 /* VHOST_F_LOG_ALL is exposed by SVQ */
124 BIT_ULL(VHOST_F_LOG_ALL
) |
125 BIT_ULL(VIRTIO_NET_F_HASH_REPORT
) |
126 BIT_ULL(VIRTIO_NET_F_RSS
) |
127 BIT_ULL(VIRTIO_NET_F_RSC_EXT
) |
128 BIT_ULL(VIRTIO_NET_F_STANDBY
) |
129 BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX
);
131 #define VHOST_VDPA_NET_CVQ_ASID 1
133 VHostNetState
*vhost_vdpa_get_vhost_net(NetClientState
*nc
)
135 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
136 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
140 static size_t vhost_vdpa_net_cvq_cmd_len(void)
143 * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
144 * In buffer is always 1 byte, so it should fit here
146 return sizeof(struct virtio_net_ctrl_hdr
) +
147 2 * sizeof(struct virtio_net_ctrl_mac
) +
148 MAC_TABLE_ENTRIES
* ETH_ALEN
;
151 static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
153 return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
156 static bool vhost_vdpa_net_valid_svq_features(uint64_t features
, Error
**errp
)
158 uint64_t invalid_dev_features
=
159 features
& ~vdpa_svq_device_features
&
160 /* Transport are all accepted at this point */
161 ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START
,
162 VIRTIO_TRANSPORT_F_END
- VIRTIO_TRANSPORT_F_START
);
164 if (invalid_dev_features
) {
165 error_setg(errp
, "vdpa svq does not work with features 0x%" PRIx64
,
166 invalid_dev_features
);
170 return vhost_svq_valid_features(features
, errp
);
173 static int vhost_vdpa_net_check_device_id(struct vhost_net
*net
)
177 struct vhost_dev
*hdev
;
179 hdev
= (struct vhost_dev
*)&net
->dev
;
180 ret
= hdev
->vhost_ops
->vhost_get_device_id(hdev
, &device_id
);
181 if (device_id
!= VIRTIO_ID_NET
) {
187 static int vhost_vdpa_add(NetClientState
*ncs
, void *be
,
188 int queue_pair_index
, int nvqs
)
190 VhostNetOptions options
;
191 struct vhost_net
*net
= NULL
;
195 options
.backend_type
= VHOST_BACKEND_TYPE_VDPA
;
196 assert(ncs
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
197 s
= DO_UPCAST(VhostVDPAState
, nc
, ncs
);
198 options
.net_backend
= ncs
;
200 options
.busyloop_timeout
= 0;
203 net
= vhost_net_init(&options
);
205 error_report("failed to init vhost_net for queue");
209 ret
= vhost_vdpa_net_check_device_id(net
);
215 vhost_net_cleanup(net
);
221 static void vhost_vdpa_cleanup(NetClientState
*nc
)
223 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
226 * If a peer NIC is attached, do not cleanup anything.
227 * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup()
228 * when the guest is shutting down.
230 if (nc
->peer
&& nc
->peer
->info
->type
== NET_CLIENT_DRIVER_NIC
) {
233 munmap(s
->cvq_cmd_out_buffer
, vhost_vdpa_net_cvq_cmd_page_len());
234 munmap(s
->status
, vhost_vdpa_net_cvq_cmd_page_len());
236 vhost_net_cleanup(s
->vhost_net
);
237 g_free(s
->vhost_net
);
240 if (s
->vhost_vdpa
.index
!= 0) {
243 qemu_close(s
->vhost_vdpa
.shared
->device_fd
);
244 g_free(s
->vhost_vdpa
.shared
);
247 /** Dummy SetSteeringEBPF to support RSS for vhost-vdpa backend */
248 static bool vhost_vdpa_set_steering_ebpf(NetClientState
*nc
, int prog_fd
)
253 static bool vhost_vdpa_has_vnet_hdr(NetClientState
*nc
)
255 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
260 static bool vhost_vdpa_has_ufo(NetClientState
*nc
)
262 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
263 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
264 uint64_t features
= 0;
265 features
|= (1ULL << VIRTIO_NET_F_HOST_UFO
);
266 features
= vhost_net_get_features(s
->vhost_net
, features
);
267 return !!(features
& (1ULL << VIRTIO_NET_F_HOST_UFO
));
271 static bool vhost_vdpa_check_peer_type(NetClientState
*nc
, ObjectClass
*oc
,
274 const char *driver
= object_class_get_name(oc
);
276 if (!g_str_has_prefix(driver
, "virtio-net-")) {
277 error_setg(errp
, "vhost-vdpa requires frontend driver virtio-net-*");
284 /** Dummy receive in case qemu falls back to userland tap networking */
285 static ssize_t
vhost_vdpa_receive(NetClientState
*nc
, const uint8_t *buf
,
292 /** From any vdpa net client, get the netclient of the i-th queue pair */
293 static VhostVDPAState
*vhost_vdpa_net_get_nc_vdpa(VhostVDPAState
*s
, int i
)
295 NICState
*nic
= qemu_get_nic(s
->nc
.peer
);
296 NetClientState
*nc_i
= qemu_get_peer(nic
->ncs
, i
);
298 return DO_UPCAST(VhostVDPAState
, nc
, nc_i
);
301 static VhostVDPAState
*vhost_vdpa_net_first_nc_vdpa(VhostVDPAState
*s
)
303 return vhost_vdpa_net_get_nc_vdpa(s
, 0);
306 static void vhost_vdpa_net_log_global_enable(VhostVDPAState
*s
, bool enable
)
308 struct vhost_vdpa
*v
= &s
->vhost_vdpa
;
311 int data_queue_pairs
, cvq
, r
;
313 /* We are only called on the first data vqs and only if x-svq is not set */
314 if (s
->vhost_vdpa
.shadow_vqs_enabled
== enable
) {
319 n
= VIRTIO_NET(vdev
);
320 if (!n
->vhost_started
) {
324 data_queue_pairs
= n
->multiqueue
? n
->max_queue_pairs
: 1;
325 cvq
= virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_VQ
) ?
326 n
->max_ncs
- n
->max_queue_pairs
: 0;
327 v
->shared
->svq_switching
= enable
?
328 SVQ_TSTATE_ENABLING
: SVQ_TSTATE_DISABLING
;
330 * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
331 * in the future and resume the device if read-only operations between
332 * suspend and reset goes wrong.
334 vhost_net_stop(vdev
, n
->nic
->ncs
, data_queue_pairs
, cvq
);
336 /* Start will check migration setup_or_active to configure or not SVQ */
337 r
= vhost_net_start(vdev
, n
->nic
->ncs
, data_queue_pairs
, cvq
);
338 if (unlikely(r
< 0)) {
339 error_report("unable to start vhost net: %s(%d)", g_strerror(-r
), -r
);
341 v
->shared
->svq_switching
= SVQ_TSTATE_DONE
;
344 static int vdpa_net_migration_state_notifier(NotifierWithReturn
*notifier
,
345 MigrationEvent
*e
, Error
**errp
)
347 VhostVDPAState
*s
= container_of(notifier
, VhostVDPAState
, migration_state
);
349 if (e
->type
== MIG_EVENT_PRECOPY_SETUP
) {
350 vhost_vdpa_net_log_global_enable(s
, true);
351 } else if (e
->type
== MIG_EVENT_PRECOPY_FAILED
) {
352 vhost_vdpa_net_log_global_enable(s
, false);
357 static void vhost_vdpa_net_data_start_first(VhostVDPAState
*s
)
359 struct vhost_vdpa
*v
= &s
->vhost_vdpa
;
361 migration_add_notifier(&s
->migration_state
,
362 vdpa_net_migration_state_notifier
);
363 if (v
->shadow_vqs_enabled
) {
364 v
->shared
->iova_tree
= vhost_iova_tree_new(v
->shared
->iova_range
.first
,
365 v
->shared
->iova_range
.last
);
369 static int vhost_vdpa_net_data_start(NetClientState
*nc
)
371 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
372 struct vhost_vdpa
*v
= &s
->vhost_vdpa
;
374 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
377 migration_is_setup_or_active()) {
378 v
->shadow_vqs_enabled
= true;
380 v
->shadow_vqs_enabled
= false;
384 v
->shared
->shadow_data
= v
->shadow_vqs_enabled
;
385 vhost_vdpa_net_data_start_first(s
);
392 static int vhost_vdpa_net_data_load(NetClientState
*nc
)
394 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
395 struct vhost_vdpa
*v
= &s
->vhost_vdpa
;
396 bool has_cvq
= v
->dev
->vq_index_end
% 2;
402 for (int i
= 0; i
< v
->dev
->nvqs
; ++i
) {
403 int ret
= vhost_vdpa_set_vring_ready(v
, i
+ v
->dev
->vq_index
);
411 static void vhost_vdpa_net_client_stop(NetClientState
*nc
)
413 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
414 struct vhost_dev
*dev
;
416 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
418 if (s
->vhost_vdpa
.index
== 0) {
419 migration_remove_notifier(&s
->migration_state
);
422 dev
= s
->vhost_vdpa
.dev
;
423 if (dev
->vq_index
+ dev
->nvqs
== dev
->vq_index_end
) {
424 g_clear_pointer(&s
->vhost_vdpa
.shared
->iova_tree
,
425 vhost_iova_tree_delete
);
429 static NetClientInfo net_vhost_vdpa_info
= {
430 .type
= NET_CLIENT_DRIVER_VHOST_VDPA
,
431 .size
= sizeof(VhostVDPAState
),
432 .receive
= vhost_vdpa_receive
,
433 .start
= vhost_vdpa_net_data_start
,
434 .load
= vhost_vdpa_net_data_load
,
435 .stop
= vhost_vdpa_net_client_stop
,
436 .cleanup
= vhost_vdpa_cleanup
,
437 .has_vnet_hdr
= vhost_vdpa_has_vnet_hdr
,
438 .has_ufo
= vhost_vdpa_has_ufo
,
439 .check_peer_type
= vhost_vdpa_check_peer_type
,
440 .set_steering_ebpf
= vhost_vdpa_set_steering_ebpf
,
443 static int64_t vhost_vdpa_get_vring_group(int device_fd
, unsigned vq_index
,
446 struct vhost_vring_state state
= {
449 int r
= ioctl(device_fd
, VHOST_VDPA_GET_VRING_GROUP
, &state
);
451 if (unlikely(r
< 0)) {
453 error_setg_errno(errp
, errno
, "Cannot get VQ %u group", vq_index
);
460 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa
*v
,
464 struct vhost_vring_state asid
= {
470 trace_vhost_vdpa_set_address_space_id(v
, vq_group
, asid_num
);
472 r
= ioctl(v
->shared
->device_fd
, VHOST_VDPA_SET_GROUP_ASID
, &asid
);
473 if (unlikely(r
< 0)) {
474 error_report("Can't set vq group %u asid %u, errno=%d (%s)",
475 asid
.index
, asid
.num
, errno
, g_strerror(errno
));
480 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa
*v
, void *addr
)
482 VhostIOVATree
*tree
= v
->shared
->iova_tree
;
485 * No need to specify size or to look for more translations since
486 * this contiguous chunk was allocated by us.
488 .translated_addr
= (hwaddr
)(uintptr_t)addr
,
490 const DMAMap
*map
= vhost_iova_tree_find_iova(tree
, &needle
);
493 if (unlikely(!map
)) {
494 error_report("Cannot locate expected map");
498 r
= vhost_vdpa_dma_unmap(v
->shared
, v
->address_space_id
, map
->iova
,
500 if (unlikely(r
!= 0)) {
501 error_report("Device cannot unmap: %s(%d)", g_strerror(r
), r
);
504 vhost_iova_tree_remove(tree
, *map
);
507 /** Map CVQ buffer. */
508 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa
*v
, void *buf
, size_t size
,
514 map
.translated_addr
= (hwaddr
)(uintptr_t)buf
;
516 map
.perm
= write
? IOMMU_RW
: IOMMU_RO
,
517 r
= vhost_iova_tree_map_alloc(v
->shared
->iova_tree
, &map
);
518 if (unlikely(r
!= IOVA_OK
)) {
519 error_report("Cannot map injected element");
523 r
= vhost_vdpa_dma_map(v
->shared
, v
->address_space_id
, map
.iova
,
524 vhost_vdpa_net_cvq_cmd_page_len(), buf
, !write
);
525 if (unlikely(r
< 0)) {
532 vhost_iova_tree_remove(v
->shared
->iova_tree
, map
);
536 static int vhost_vdpa_net_cvq_start(NetClientState
*nc
)
538 VhostVDPAState
*s
, *s0
;
539 struct vhost_vdpa
*v
;
544 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
546 s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
549 s0
= vhost_vdpa_net_first_nc_vdpa(s
);
550 v
->shadow_vqs_enabled
= s0
->vhost_vdpa
.shadow_vqs_enabled
;
551 s
->vhost_vdpa
.address_space_id
= VHOST_VDPA_GUEST_PA_ASID
;
553 if (v
->shared
->shadow_data
) {
554 /* SVQ is already configured for all virtqueues */
559 * If we early return in these cases SVQ will not be enabled. The migration
560 * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
562 if (!vhost_vdpa_net_valid_svq_features(v
->dev
->features
, NULL
)) {
566 if (!s
->cvq_isolated
) {
570 cvq_group
= vhost_vdpa_get_vring_group(v
->shared
->device_fd
,
571 v
->dev
->vq_index_end
- 1,
573 if (unlikely(cvq_group
< 0)) {
574 error_report_err(err
);
578 r
= vhost_vdpa_set_address_space_id(v
, cvq_group
, VHOST_VDPA_NET_CVQ_ASID
);
579 if (unlikely(r
< 0)) {
583 v
->shadow_vqs_enabled
= true;
584 s
->vhost_vdpa
.address_space_id
= VHOST_VDPA_NET_CVQ_ASID
;
587 if (!s
->vhost_vdpa
.shadow_vqs_enabled
) {
592 * If other vhost_vdpa already have an iova_tree, reuse it for simplicity,
593 * whether CVQ shares ASID with guest or not, because:
594 * - Memory listener need access to guest's memory addresses allocated in
596 * - There should be plenty of IOVA address space for both ASID not to
597 * worry about collisions between them. Guest's translations are still
598 * validated with virtio virtqueue_pop so there is no risk for the guest
599 * to access memory that it shouldn't.
601 * To allocate a iova tree per ASID is doable but it complicates the code
602 * and it is not worth it for the moment.
604 if (!v
->shared
->iova_tree
) {
605 v
->shared
->iova_tree
= vhost_iova_tree_new(v
->shared
->iova_range
.first
,
606 v
->shared
->iova_range
.last
);
609 r
= vhost_vdpa_cvq_map_buf(&s
->vhost_vdpa
, s
->cvq_cmd_out_buffer
,
610 vhost_vdpa_net_cvq_cmd_page_len(), false);
611 if (unlikely(r
< 0)) {
615 r
= vhost_vdpa_cvq_map_buf(&s
->vhost_vdpa
, s
->status
,
616 vhost_vdpa_net_cvq_cmd_page_len(), true);
617 if (unlikely(r
< 0)) {
618 vhost_vdpa_cvq_unmap_buf(&s
->vhost_vdpa
, s
->cvq_cmd_out_buffer
);
624 static void vhost_vdpa_net_cvq_stop(NetClientState
*nc
)
626 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
628 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
630 if (s
->vhost_vdpa
.shadow_vqs_enabled
) {
631 vhost_vdpa_cvq_unmap_buf(&s
->vhost_vdpa
, s
->cvq_cmd_out_buffer
);
632 vhost_vdpa_cvq_unmap_buf(&s
->vhost_vdpa
, s
->status
);
635 vhost_vdpa_net_client_stop(nc
);
638 static ssize_t
vhost_vdpa_net_cvq_add(VhostVDPAState
*s
,
639 const struct iovec
*out_sg
, size_t out_num
,
640 const struct iovec
*in_sg
, size_t in_num
)
642 VhostShadowVirtqueue
*svq
= g_ptr_array_index(s
->vhost_vdpa
.shadow_vqs
, 0);
645 r
= vhost_svq_add(svq
, out_sg
, out_num
, in_sg
, in_num
, NULL
);
646 if (unlikely(r
!= 0)) {
647 if (unlikely(r
== -ENOSPC
)) {
648 qemu_log_mask(LOG_GUEST_ERROR
, "%s: No space on device queue\n",
657 * Convenience wrapper to poll SVQ for multiple control commands.
659 * Caller should hold the BQL when invoking this function, and should take
660 * the answer before SVQ pulls by itself when BQL is released.
662 static ssize_t
vhost_vdpa_net_svq_poll(VhostVDPAState
*s
, size_t cmds_in_flight
)
664 VhostShadowVirtqueue
*svq
= g_ptr_array_index(s
->vhost_vdpa
.shadow_vqs
, 0);
665 return vhost_svq_poll(svq
, cmds_in_flight
);
668 static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState
*s
,
669 struct iovec
*out_cursor
,
670 struct iovec
*in_cursor
)
672 /* reset the cursor of the output buffer for the device */
673 out_cursor
->iov_base
= s
->cvq_cmd_out_buffer
;
674 out_cursor
->iov_len
= vhost_vdpa_net_cvq_cmd_page_len();
676 /* reset the cursor of the in buffer for the device */
677 in_cursor
->iov_base
= s
->status
;
678 in_cursor
->iov_len
= vhost_vdpa_net_cvq_cmd_page_len();
682 * Poll SVQ for multiple pending control commands and check the device's ack.
684 * Caller should hold the BQL when invoking this function.
686 * @s: The VhostVDPAState
687 * @len: The length of the pending status shadow buffer
689 static ssize_t
vhost_vdpa_net_svq_flush(VhostVDPAState
*s
, size_t len
)
691 /* device uses a one-byte length ack for each control command */
692 ssize_t dev_written
= vhost_vdpa_net_svq_poll(s
, len
);
693 if (unlikely(dev_written
!= len
)) {
697 /* check the device's ack */
698 for (int i
= 0; i
< len
; ++i
) {
699 if (s
->status
[i
] != VIRTIO_NET_OK
) {
706 static ssize_t
vhost_vdpa_net_load_cmd(VhostVDPAState
*s
,
707 struct iovec
*out_cursor
,
708 struct iovec
*in_cursor
, uint8_t class,
709 uint8_t cmd
, const struct iovec
*data_sg
,
712 const struct virtio_net_ctrl_hdr ctrl
= {
716 size_t data_size
= iov_size(data_sg
, data_num
), cmd_size
;
717 struct iovec out
, in
;
719 unsigned dummy_cursor_iov_cnt
;
720 VhostShadowVirtqueue
*svq
= g_ptr_array_index(s
->vhost_vdpa
.shadow_vqs
, 0);
722 assert(data_size
< vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl
));
723 cmd_size
= sizeof(ctrl
) + data_size
;
724 trace_vhost_vdpa_net_load_cmd(s
, class, cmd
, data_num
, data_size
);
725 if (vhost_svq_available_slots(svq
) < 2 ||
726 iov_size(out_cursor
, 1) < cmd_size
) {
728 * It is time to flush all pending control commands if SVQ is full
729 * or control commands shadow buffers are full.
731 * We can poll here since we've had BQL from the time
732 * we sent the descriptor.
734 r
= vhost_vdpa_net_svq_flush(s
, in_cursor
->iov_base
-
736 if (unlikely(r
< 0)) {
740 vhost_vdpa_net_load_cursor_reset(s
, out_cursor
, in_cursor
);
743 /* pack the CVQ command header */
744 iov_from_buf(out_cursor
, 1, 0, &ctrl
, sizeof(ctrl
));
745 /* pack the CVQ command command-specific-data */
746 iov_to_buf(data_sg
, data_num
, 0,
747 out_cursor
->iov_base
+ sizeof(ctrl
), data_size
);
749 /* extract the required buffer from the cursor for output */
750 iov_copy(&out
, 1, out_cursor
, 1, 0, cmd_size
);
751 /* extract the required buffer from the cursor for input */
752 iov_copy(&in
, 1, in_cursor
, 1, 0, sizeof(*s
->status
));
754 r
= vhost_vdpa_net_cvq_add(s
, &out
, 1, &in
, 1);
755 if (unlikely(r
< 0)) {
756 trace_vhost_vdpa_net_load_cmd_retval(s
, class, cmd
, r
);
760 /* iterate the cursors */
761 dummy_cursor_iov_cnt
= 1;
762 iov_discard_front(&out_cursor
, &dummy_cursor_iov_cnt
, cmd_size
);
763 dummy_cursor_iov_cnt
= 1;
764 iov_discard_front(&in_cursor
, &dummy_cursor_iov_cnt
, sizeof(*s
->status
));
769 static int vhost_vdpa_net_load_mac(VhostVDPAState
*s
, const VirtIONet
*n
,
770 struct iovec
*out_cursor
,
771 struct iovec
*in_cursor
)
773 if (virtio_vdev_has_feature(&n
->parent_obj
, VIRTIO_NET_F_CTRL_MAC_ADDR
)) {
774 const struct iovec data
= {
775 .iov_base
= (void *)n
->mac
,
776 .iov_len
= sizeof(n
->mac
),
778 ssize_t r
= vhost_vdpa_net_load_cmd(s
, out_cursor
, in_cursor
,
780 VIRTIO_NET_CTRL_MAC_ADDR_SET
,
782 if (unlikely(r
< 0)) {
788 * According to VirtIO standard, "The device MUST have an
789 * empty MAC filtering table on reset.".
791 * Therefore, there is no need to send this CVQ command if the
792 * driver also sets an empty MAC filter table, which aligns with
793 * the device's defaults.
795 * Note that the device's defaults can mismatch the driver's
796 * configuration only at live migration.
798 if (!virtio_vdev_has_feature(&n
->parent_obj
, VIRTIO_NET_F_CTRL_RX
) ||
799 n
->mac_table
.in_use
== 0) {
803 uint32_t uni_entries
= n
->mac_table
.first_multi
,
804 uni_macs_size
= uni_entries
* ETH_ALEN
,
805 mul_entries
= n
->mac_table
.in_use
- uni_entries
,
806 mul_macs_size
= mul_entries
* ETH_ALEN
;
807 struct virtio_net_ctrl_mac uni
= {
808 .entries
= cpu_to_le32(uni_entries
),
810 struct virtio_net_ctrl_mac mul
= {
811 .entries
= cpu_to_le32(mul_entries
),
813 const struct iovec data
[] = {
816 .iov_len
= sizeof(uni
),
818 .iov_base
= n
->mac_table
.macs
,
819 .iov_len
= uni_macs_size
,
822 .iov_len
= sizeof(mul
),
824 .iov_base
= &n
->mac_table
.macs
[uni_macs_size
],
825 .iov_len
= mul_macs_size
,
828 ssize_t r
= vhost_vdpa_net_load_cmd(s
, out_cursor
, in_cursor
,
830 VIRTIO_NET_CTRL_MAC_TABLE_SET
,
831 data
, ARRAY_SIZE(data
));
832 if (unlikely(r
< 0)) {
839 static int vhost_vdpa_net_load_rss(VhostVDPAState
*s
, const VirtIONet
*n
,
840 struct iovec
*out_cursor
,
841 struct iovec
*in_cursor
, bool do_rss
)
843 struct virtio_net_rss_config cfg
= {};
845 g_autofree
uint16_t *table
= NULL
;
848 * According to VirtIO standard, "Initially the device has all hash
849 * types disabled and reports only VIRTIO_NET_HASH_REPORT_NONE.".
851 * Therefore, there is no need to send this CVQ command if the
852 * driver disables the all hash types, which aligns with
853 * the device's defaults.
855 * Note that the device's defaults can mismatch the driver's
856 * configuration only at live migration.
858 if (!n
->rss_data
.enabled
||
859 n
->rss_data
.hash_types
== VIRTIO_NET_HASH_REPORT_NONE
) {
863 table
= g_malloc_n(n
->rss_data
.indirections_len
,
864 sizeof(n
->rss_data
.indirections_table
[0]));
865 cfg
.hash_types
= cpu_to_le32(n
->rss_data
.hash_types
);
869 * According to VirtIO standard, "Number of entries in indirection_table
870 * is (indirection_table_mask + 1)".
872 cfg
.indirection_table_mask
= cpu_to_le16(n
->rss_data
.indirections_len
-
874 cfg
.unclassified_queue
= cpu_to_le16(n
->rss_data
.default_queue
);
875 for (int i
= 0; i
< n
->rss_data
.indirections_len
; ++i
) {
876 table
[i
] = cpu_to_le16(n
->rss_data
.indirections_table
[i
]);
878 cfg
.max_tx_vq
= cpu_to_le16(n
->curr_queue_pairs
);
881 * According to VirtIO standard, "Field reserved MUST contain zeroes.
882 * It is defined to make the structure to match the layout of
883 * virtio_net_rss_config structure, defined in 5.1.6.5.7.".
885 * Therefore, we need to zero the fields in
886 * struct virtio_net_rss_config, which corresponds to the
887 * `reserved` field in struct virtio_net_hash_config.
889 * Note that all other fields are zeroed at their definitions,
890 * except for the `indirection_table` field, where the actual data
891 * is stored in the `table` variable to ensure compatibility
892 * with RSS case. Therefore, we need to zero the `table` variable here.
898 * Considering that virtio_net_handle_rss() currently does not restore
899 * the hash key length parsed from the CVQ command sent from the guest
900 * into n->rss_data and uses the maximum key length in other code, so
901 * we also employ the maximum key length here.
903 cfg
.hash_key_length
= sizeof(n
->rss_data
.key
);
905 const struct iovec data
[] = {
908 .iov_len
= offsetof(struct virtio_net_rss_config
,
912 .iov_len
= n
->rss_data
.indirections_len
*
913 sizeof(n
->rss_data
.indirections_table
[0]),
915 .iov_base
= &cfg
.max_tx_vq
,
916 .iov_len
= offsetof(struct virtio_net_rss_config
, hash_key_data
) -
917 offsetof(struct virtio_net_rss_config
, max_tx_vq
),
919 .iov_base
= (void *)n
->rss_data
.key
,
920 .iov_len
= sizeof(n
->rss_data
.key
),
924 r
= vhost_vdpa_net_load_cmd(s
, out_cursor
, in_cursor
,
926 do_rss
? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
:
927 VIRTIO_NET_CTRL_MQ_HASH_CONFIG
,
928 data
, ARRAY_SIZE(data
));
929 if (unlikely(r
< 0)) {
936 static int vhost_vdpa_net_load_mq(VhostVDPAState
*s
,
938 struct iovec
*out_cursor
,
939 struct iovec
*in_cursor
)
941 struct virtio_net_ctrl_mq mq
;
944 if (!virtio_vdev_has_feature(&n
->parent_obj
, VIRTIO_NET_F_MQ
)) {
948 trace_vhost_vdpa_net_load_mq(s
, n
->curr_queue_pairs
);
950 mq
.virtqueue_pairs
= cpu_to_le16(n
->curr_queue_pairs
);
951 const struct iovec data
= {
953 .iov_len
= sizeof(mq
),
955 r
= vhost_vdpa_net_load_cmd(s
, out_cursor
, in_cursor
,
957 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET
,
959 if (unlikely(r
< 0)) {
963 if (virtio_vdev_has_feature(&n
->parent_obj
, VIRTIO_NET_F_RSS
)) {
964 /* load the receive-side scaling state */
965 r
= vhost_vdpa_net_load_rss(s
, n
, out_cursor
, in_cursor
, true);
966 if (unlikely(r
< 0)) {
969 } else if (virtio_vdev_has_feature(&n
->parent_obj
,
970 VIRTIO_NET_F_HASH_REPORT
)) {
971 /* load the hash calculation state */
972 r
= vhost_vdpa_net_load_rss(s
, n
, out_cursor
, in_cursor
, false);
973 if (unlikely(r
< 0)) {
981 static int vhost_vdpa_net_load_offloads(VhostVDPAState
*s
,
983 struct iovec
*out_cursor
,
984 struct iovec
*in_cursor
)
989 if (!virtio_vdev_has_feature(&n
->parent_obj
,
990 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
)) {
994 if (n
->curr_guest_offloads
== virtio_net_supported_guest_offloads(n
)) {
996 * According to VirtIO standard, "Upon feature negotiation
997 * corresponding offload gets enabled to preserve
998 * backward compatibility.".
1000 * Therefore, there is no need to send this CVQ command if the
1001 * driver also enables all supported offloads, which aligns with
1002 * the device's defaults.
1004 * Note that the device's defaults can mismatch the driver's
1005 * configuration only at live migration.
1010 offloads
= cpu_to_le64(n
->curr_guest_offloads
);
1011 const struct iovec data
= {
1012 .iov_base
= &offloads
,
1013 .iov_len
= sizeof(offloads
),
1015 r
= vhost_vdpa_net_load_cmd(s
, out_cursor
, in_cursor
,
1016 VIRTIO_NET_CTRL_GUEST_OFFLOADS
,
1017 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET
,
1019 if (unlikely(r
< 0)) {
1026 static int vhost_vdpa_net_load_rx_mode(VhostVDPAState
*s
,
1027 struct iovec
*out_cursor
,
1028 struct iovec
*in_cursor
,
1032 const struct iovec data
= {
1034 .iov_len
= sizeof(on
),
1038 r
= vhost_vdpa_net_load_cmd(s
, out_cursor
, in_cursor
,
1039 VIRTIO_NET_CTRL_RX
, cmd
, &data
, 1);
1040 if (unlikely(r
< 0)) {
1047 static int vhost_vdpa_net_load_rx(VhostVDPAState
*s
,
1049 struct iovec
*out_cursor
,
1050 struct iovec
*in_cursor
)
1054 if (!virtio_vdev_has_feature(&n
->parent_obj
, VIRTIO_NET_F_CTRL_RX
)) {
1059 * According to virtio_net_reset(), device turns promiscuous mode
1062 * Additionally, according to VirtIO standard, "Since there are
1063 * no guarantees, it can use a hash filter or silently switch to
1064 * allmulti or promiscuous mode if it is given too many addresses.".
1065 * QEMU marks `n->mac_table.uni_overflow` if guest sets too many
1066 * non-multicast MAC addresses, indicating that promiscuous mode
1067 * should be enabled.
1069 * Therefore, QEMU should only send this CVQ command if the
1070 * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off,
1071 * which sets promiscuous mode on, different from the device's defaults.
1073 * Note that the device's defaults can mismatch the driver's
1074 * configuration only at live migration.
1076 if (!n
->mac_table
.uni_overflow
&& !n
->promisc
) {
1077 r
= vhost_vdpa_net_load_rx_mode(s
, out_cursor
, in_cursor
,
1078 VIRTIO_NET_CTRL_RX_PROMISC
, 0);
1079 if (unlikely(r
< 0)) {
1085 * According to virtio_net_reset(), device turns all-multicast mode
1088 * According to VirtIO standard, "Since there are no guarantees,
1089 * it can use a hash filter or silently switch to allmulti or
1090 * promiscuous mode if it is given too many addresses.". QEMU marks
1091 * `n->mac_table.multi_overflow` if guest sets too many
1092 * non-multicast MAC addresses.
1094 * Therefore, QEMU should only send this CVQ command if the
1095 * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on,
1096 * which sets all-multicast mode on, different from the device's defaults.
1098 * Note that the device's defaults can mismatch the driver's
1099 * configuration only at live migration.
1101 if (n
->mac_table
.multi_overflow
|| n
->allmulti
) {
1102 r
= vhost_vdpa_net_load_rx_mode(s
, out_cursor
, in_cursor
,
1103 VIRTIO_NET_CTRL_RX_ALLMULTI
, 1);
1104 if (unlikely(r
< 0)) {
1109 if (!virtio_vdev_has_feature(&n
->parent_obj
, VIRTIO_NET_F_CTRL_RX_EXTRA
)) {
1114 * According to virtio_net_reset(), device turns all-unicast mode
1117 * Therefore, QEMU should only send this CVQ command if the driver
1118 * sets all-unicast mode on, different from the device's defaults.
1120 * Note that the device's defaults can mismatch the driver's
1121 * configuration only at live migration.
1124 r
= vhost_vdpa_net_load_rx_mode(s
, out_cursor
, in_cursor
,
1125 VIRTIO_NET_CTRL_RX_ALLUNI
, 1);
1132 * According to virtio_net_reset(), device turns non-multicast mode
1135 * Therefore, QEMU should only send this CVQ command if the driver
1136 * sets non-multicast mode on, different from the device's defaults.
1138 * Note that the device's defaults can mismatch the driver's
1139 * configuration only at live migration.
1142 r
= vhost_vdpa_net_load_rx_mode(s
, out_cursor
, in_cursor
,
1143 VIRTIO_NET_CTRL_RX_NOMULTI
, 1);
1150 * According to virtio_net_reset(), device turns non-unicast mode
1153 * Therefore, QEMU should only send this CVQ command if the driver
1154 * sets non-unicast mode on, different from the device's defaults.
1156 * Note that the device's defaults can mismatch the driver's
1157 * configuration only at live migration.
1160 r
= vhost_vdpa_net_load_rx_mode(s
, out_cursor
, in_cursor
,
1161 VIRTIO_NET_CTRL_RX_NOUNI
, 1);
1168 * According to virtio_net_reset(), device turns non-broadcast mode
1171 * Therefore, QEMU should only send this CVQ command if the driver
1172 * sets non-broadcast mode on, different from the device's defaults.
1174 * Note that the device's defaults can mismatch the driver's
1175 * configuration only at live migration.
1178 r
= vhost_vdpa_net_load_rx_mode(s
, out_cursor
, in_cursor
,
1179 VIRTIO_NET_CTRL_RX_NOBCAST
, 1);
1188 static int vhost_vdpa_net_load_single_vlan(VhostVDPAState
*s
,
1190 struct iovec
*out_cursor
,
1191 struct iovec
*in_cursor
,
1194 const struct iovec data
= {
1196 .iov_len
= sizeof(vid
),
1198 ssize_t r
= vhost_vdpa_net_load_cmd(s
, out_cursor
, in_cursor
,
1199 VIRTIO_NET_CTRL_VLAN
,
1200 VIRTIO_NET_CTRL_VLAN_ADD
,
1202 if (unlikely(r
< 0)) {
1209 static int vhost_vdpa_net_load_vlan(VhostVDPAState
*s
,
1211 struct iovec
*out_cursor
,
1212 struct iovec
*in_cursor
)
1216 if (!virtio_vdev_has_feature(&n
->parent_obj
, VIRTIO_NET_F_CTRL_VLAN
)) {
1220 for (int i
= 0; i
< MAX_VLAN
>> 5; i
++) {
1221 for (int j
= 0; n
->vlans
[i
] && j
<= 0x1f; j
++) {
1222 if (n
->vlans
[i
] & (1U << j
)) {
1223 r
= vhost_vdpa_net_load_single_vlan(s
, n
, out_cursor
,
1224 in_cursor
, (i
<< 5) + j
);
1225 if (unlikely(r
!= 0)) {
1235 static int vhost_vdpa_net_cvq_load(NetClientState
*nc
)
1237 VhostVDPAState
*s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
1238 struct vhost_vdpa
*v
= &s
->vhost_vdpa
;
1241 struct iovec out_cursor
, in_cursor
;
1243 assert(nc
->info
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
1245 r
= vhost_vdpa_set_vring_ready(v
, v
->dev
->vq_index
);
1246 if (unlikely(r
< 0)) {
1250 if (v
->shadow_vqs_enabled
) {
1251 n
= VIRTIO_NET(v
->dev
->vdev
);
1252 vhost_vdpa_net_load_cursor_reset(s
, &out_cursor
, &in_cursor
);
1253 r
= vhost_vdpa_net_load_mac(s
, n
, &out_cursor
, &in_cursor
);
1254 if (unlikely(r
< 0)) {
1257 r
= vhost_vdpa_net_load_mq(s
, n
, &out_cursor
, &in_cursor
);
1261 r
= vhost_vdpa_net_load_offloads(s
, n
, &out_cursor
, &in_cursor
);
1265 r
= vhost_vdpa_net_load_rx(s
, n
, &out_cursor
, &in_cursor
);
1269 r
= vhost_vdpa_net_load_vlan(s
, n
, &out_cursor
, &in_cursor
);
1275 * We need to poll and check all pending device's used buffers.
1277 * We can poll here since we've had BQL from the time
1278 * we sent the descriptor.
1280 r
= vhost_vdpa_net_svq_flush(s
, in_cursor
.iov_base
- (void *)s
->status
);
1286 for (int i
= 0; i
< v
->dev
->vq_index
; ++i
) {
1287 r
= vhost_vdpa_set_vring_ready(v
, i
);
1288 if (unlikely(r
< 0)) {
1296 static NetClientInfo net_vhost_vdpa_cvq_info
= {
1297 .type
= NET_CLIENT_DRIVER_VHOST_VDPA
,
1298 .size
= sizeof(VhostVDPAState
),
1299 .receive
= vhost_vdpa_receive
,
1300 .start
= vhost_vdpa_net_cvq_start
,
1301 .load
= vhost_vdpa_net_cvq_load
,
1302 .stop
= vhost_vdpa_net_cvq_stop
,
1303 .cleanup
= vhost_vdpa_cleanup
,
1304 .has_vnet_hdr
= vhost_vdpa_has_vnet_hdr
,
1305 .has_ufo
= vhost_vdpa_has_ufo
,
1306 .check_peer_type
= vhost_vdpa_check_peer_type
,
1307 .set_steering_ebpf
= vhost_vdpa_set_steering_ebpf
,
1311 * Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to
1314 * Considering that QEMU cannot send the entire filter table to the
1315 * vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ
1316 * command to enable promiscuous mode to receive all packets,
1317 * according to VirtIO standard, "Since there are no guarantees,
1318 * it can use a hash filter or silently switch to allmulti or
1319 * promiscuous mode if it is given too many addresses.".
1321 * Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and
1322 * marks `n->mac_table.x_overflow` accordingly, it should have
1323 * the same effect on the device model to receive
1324 * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses.
1325 * The same applies to multicast MAC addresses.
1327 * Therefore, QEMU can provide the device model with a fake
1328 * VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1)
1329 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast
1330 * MAC addresses. This ensures that the device model marks
1331 * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`,
1332 * allowing all packets to be received, which aligns with the
1333 * state of the vdpa device.
1335 static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState
*s
,
1336 VirtQueueElement
*elem
,
1338 const struct iovec
*in
)
1340 struct virtio_net_ctrl_mac mac_data
, *mac_ptr
;
1341 struct virtio_net_ctrl_hdr
*hdr_ptr
;
1346 /* parse the non-multicast MAC address entries from CVQ command */
1347 cursor
= sizeof(*hdr_ptr
);
1348 r
= iov_to_buf(elem
->out_sg
, elem
->out_num
, cursor
,
1349 &mac_data
, sizeof(mac_data
));
1350 if (unlikely(r
!= sizeof(mac_data
))) {
1352 * If the CVQ command is invalid, we should simulate the vdpa device
1353 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1355 *s
->status
= VIRTIO_NET_ERR
;
1356 return sizeof(*s
->status
);
1358 cursor
+= sizeof(mac_data
) + le32_to_cpu(mac_data
.entries
) * ETH_ALEN
;
1360 /* parse the multicast MAC address entries from CVQ command */
1361 r
= iov_to_buf(elem
->out_sg
, elem
->out_num
, cursor
,
1362 &mac_data
, sizeof(mac_data
));
1363 if (r
!= sizeof(mac_data
)) {
1365 * If the CVQ command is invalid, we should simulate the vdpa device
1366 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1368 *s
->status
= VIRTIO_NET_ERR
;
1369 return sizeof(*s
->status
);
1371 cursor
+= sizeof(mac_data
) + le32_to_cpu(mac_data
.entries
) * ETH_ALEN
;
1373 /* validate the CVQ command */
1374 if (iov_size(elem
->out_sg
, elem
->out_num
) != cursor
) {
1376 * If the CVQ command is invalid, we should simulate the vdpa device
1377 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1379 *s
->status
= VIRTIO_NET_ERR
;
1380 return sizeof(*s
->status
);
1384 * According to VirtIO standard, "Since there are no guarantees,
1385 * it can use a hash filter or silently switch to allmulti or
1386 * promiscuous mode if it is given too many addresses.".
1388 * Therefore, considering that QEMU is unable to send the entire
1389 * filter table to the vdpa device, it should send the
1390 * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode
1392 hdr_ptr
= out
->iov_base
;
1393 out
->iov_len
= sizeof(*hdr_ptr
) + sizeof(on
);
1395 hdr_ptr
->class = VIRTIO_NET_CTRL_RX
;
1396 hdr_ptr
->cmd
= VIRTIO_NET_CTRL_RX_PROMISC
;
1397 iov_from_buf(out
, 1, sizeof(*hdr_ptr
), &on
, sizeof(on
));
1398 r
= vhost_vdpa_net_cvq_add(s
, out
, 1, in
, 1);
1399 if (unlikely(r
< 0)) {
1404 * We can poll here since we've had BQL from the time
1405 * we sent the descriptor.
1407 r
= vhost_vdpa_net_svq_poll(s
, 1);
1408 if (unlikely(r
< sizeof(*s
->status
))) {
1411 if (*s
->status
!= VIRTIO_NET_OK
) {
1412 return sizeof(*s
->status
);
1416 * QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ
1417 * command to the device model, including (`MAC_TABLE_ENTRIES` + 1)
1418 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1)
1419 * multicast MAC addresses.
1421 * By doing so, the device model can mark `n->mac_table.uni_overflow`
1422 * and `n->mac_table.multi_overflow`, enabling all packets to be
1423 * received, which aligns with the state of the vdpa device.
1426 uint32_t fake_uni_entries
= MAC_TABLE_ENTRIES
+ 1,
1427 fake_mul_entries
= MAC_TABLE_ENTRIES
+ 1,
1428 fake_cvq_size
= sizeof(struct virtio_net_ctrl_hdr
) +
1429 sizeof(mac_data
) + fake_uni_entries
* ETH_ALEN
+
1430 sizeof(mac_data
) + fake_mul_entries
* ETH_ALEN
;
1432 assert(fake_cvq_size
< vhost_vdpa_net_cvq_cmd_page_len());
1433 out
->iov_len
= fake_cvq_size
;
1435 /* pack the header for fake CVQ command */
1436 hdr_ptr
= out
->iov_base
+ cursor
;
1437 hdr_ptr
->class = VIRTIO_NET_CTRL_MAC
;
1438 hdr_ptr
->cmd
= VIRTIO_NET_CTRL_MAC_TABLE_SET
;
1439 cursor
+= sizeof(*hdr_ptr
);
1442 * Pack the non-multicast MAC addresses part for fake CVQ command.
1444 * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1445 * addresses provided in CVQ command. Therefore, only the entries
1446 * field need to be prepared in the CVQ command.
1448 mac_ptr
= out
->iov_base
+ cursor
;
1449 mac_ptr
->entries
= cpu_to_le32(fake_uni_entries
);
1450 cursor
+= sizeof(*mac_ptr
) + fake_uni_entries
* ETH_ALEN
;
1453 * Pack the multicast MAC addresses part for fake CVQ command.
1455 * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1456 * addresses provided in CVQ command. Therefore, only the entries
1457 * field need to be prepared in the CVQ command.
1459 mac_ptr
= out
->iov_base
+ cursor
;
1460 mac_ptr
->entries
= cpu_to_le32(fake_mul_entries
);
1463 * Simulating QEMU poll a vdpa device used buffer
1464 * for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1466 return sizeof(*s
->status
);
1470 * Validate and copy control virtqueue commands.
1472 * Following QEMU guidelines, we offer a copy of the buffers to the device to
1473 * prevent TOCTOU bugs.
1475 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue
*svq
,
1476 VirtQueueElement
*elem
,
1479 VhostVDPAState
*s
= opaque
;
1481 const struct virtio_net_ctrl_hdr
*ctrl
;
1482 virtio_net_ctrl_ack status
= VIRTIO_NET_ERR
;
1483 /* Out buffer sent to both the vdpa device and the device model */
1484 struct iovec out
= {
1485 .iov_base
= s
->cvq_cmd_out_buffer
,
1487 /* in buffer used for device model */
1488 const struct iovec model_in
= {
1489 .iov_base
= &status
,
1490 .iov_len
= sizeof(status
),
1492 /* in buffer used for vdpa device */
1493 const struct iovec vdpa_in
= {
1494 .iov_base
= s
->status
,
1495 .iov_len
= sizeof(*s
->status
),
1497 ssize_t dev_written
= -EINVAL
;
1499 out
.iov_len
= iov_to_buf(elem
->out_sg
, elem
->out_num
, 0,
1500 s
->cvq_cmd_out_buffer
,
1501 vhost_vdpa_net_cvq_cmd_page_len());
1503 ctrl
= s
->cvq_cmd_out_buffer
;
1504 if (ctrl
->class == VIRTIO_NET_CTRL_ANNOUNCE
) {
1506 * Guest announce capability is emulated by qemu, so don't forward to
1509 dev_written
= sizeof(status
);
1510 *s
->status
= VIRTIO_NET_OK
;
1511 } else if (unlikely(ctrl
->class == VIRTIO_NET_CTRL_MAC
&&
1512 ctrl
->cmd
== VIRTIO_NET_CTRL_MAC_TABLE_SET
&&
1513 iov_size(elem
->out_sg
, elem
->out_num
) > out
.iov_len
)) {
1515 * Due to the size limitation of the out buffer sent to the vdpa device,
1516 * which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive
1517 * MAC addresses set by the driver for the filter table can cause
1518 * truncation of the CVQ command in QEMU. As a result, the vdpa device
1519 * rejects the flawed CVQ command.
1521 * Therefore, QEMU must handle this situation instead of sending
1522 * the CVQ command directly.
1524 dev_written
= vhost_vdpa_net_excessive_mac_filter_cvq_add(s
, elem
,
1526 if (unlikely(dev_written
< 0)) {
1531 r
= vhost_vdpa_net_cvq_add(s
, &out
, 1, &vdpa_in
, 1);
1532 if (unlikely(r
< 0)) {
1538 * We can poll here since we've had BQL from the time
1539 * we sent the descriptor.
1541 dev_written
= vhost_vdpa_net_svq_poll(s
, 1);
1544 if (unlikely(dev_written
< sizeof(status
))) {
1545 error_report("Insufficient written data (%zu)", dev_written
);
1549 if (*s
->status
!= VIRTIO_NET_OK
) {
1553 status
= VIRTIO_NET_ERR
;
1554 virtio_net_handle_ctrl_iov(svq
->vdev
, &model_in
, 1, &out
, 1);
1555 if (status
!= VIRTIO_NET_OK
) {
1556 error_report("Bad CVQ processing in model");
1560 in_len
= iov_from_buf(elem
->in_sg
, elem
->in_num
, 0, &status
,
1562 if (unlikely(in_len
< sizeof(status
))) {
1563 error_report("Bad device CVQ written length");
1565 vhost_svq_push_elem(svq
, elem
, MIN(in_len
, sizeof(status
)));
1567 * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when
1568 * the function successfully forwards the CVQ command, indicated
1569 * by a non-negative value of `dev_written`. Otherwise, it still
1571 * This function should only free the `elem` when it owns.
1573 if (dev_written
>= 0) {
1576 return dev_written
< 0 ? dev_written
: 0;
1579 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops
= {
1580 .avail_handler
= vhost_vdpa_net_handle_ctrl_avail
,
1584 * Probe if CVQ is isolated
1586 * @device_fd The vdpa device fd
1587 * @features Features offered by the device.
1588 * @cvq_index The control vq pair index
1590 * Returns <0 in case of failure, 0 if false and 1 if true.
1592 static int vhost_vdpa_probe_cvq_isolation(int device_fd
, uint64_t features
,
1593 int cvq_index
, Error
**errp
)
1596 uint64_t backend_features
;
1598 uint8_t status
= VIRTIO_CONFIG_S_ACKNOWLEDGE
|
1599 VIRTIO_CONFIG_S_DRIVER
;
1602 r
= ioctl(device_fd
, VHOST_GET_BACKEND_FEATURES
, &backend_features
);
1603 if (unlikely(r
< 0)) {
1604 error_setg_errno(errp
, errno
, "Cannot get vdpa backend_features");
1608 if (!(backend_features
& BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID
))) {
1612 r
= ioctl(device_fd
, VHOST_VDPA_SET_STATUS
, &status
);
1614 error_setg_errno(errp
, -r
, "Cannot set device status");
1618 r
= ioctl(device_fd
, VHOST_SET_FEATURES
, &features
);
1620 error_setg_errno(errp
, -r
, "Cannot set features");
1624 status
|= VIRTIO_CONFIG_S_FEATURES_OK
;
1625 r
= ioctl(device_fd
, VHOST_VDPA_SET_STATUS
, &status
);
1627 error_setg_errno(errp
, -r
, "Cannot set device status");
1631 cvq_group
= vhost_vdpa_get_vring_group(device_fd
, cvq_index
, errp
);
1632 if (unlikely(cvq_group
< 0)) {
1633 if (cvq_group
!= -ENOTSUP
) {
1639 * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend
1640 * support ASID even if the parent driver does not. The CVQ cannot be
1641 * isolated in this case.
1649 for (int i
= 0; i
< cvq_index
; ++i
) {
1650 int64_t group
= vhost_vdpa_get_vring_group(device_fd
, i
, errp
);
1651 if (unlikely(group
< 0)) {
1656 if (group
== (int64_t)cvq_group
) {
1666 ioctl(device_fd
, VHOST_VDPA_SET_STATUS
, &status
);
1670 static NetClientState
*net_vhost_vdpa_init(NetClientState
*peer
,
1674 int queue_pair_index
,
1678 struct vhost_vdpa_iova_range iova_range
,
1680 VhostVDPAShared
*shared
,
1683 NetClientState
*nc
= NULL
;
1687 int cvq_isolated
= 0;
1690 nc
= qemu_new_net_client(&net_vhost_vdpa_info
, peer
, device
,
1693 cvq_isolated
= vhost_vdpa_probe_cvq_isolation(vdpa_device_fd
, features
,
1694 queue_pair_index
* 2,
1696 if (unlikely(cvq_isolated
< 0)) {
1700 nc
= qemu_new_net_control_client(&net_vhost_vdpa_cvq_info
, peer
,
1703 qemu_set_info_str(nc
, TYPE_VHOST_VDPA
);
1704 s
= DO_UPCAST(VhostVDPAState
, nc
, nc
);
1706 s
->vhost_vdpa
.index
= queue_pair_index
;
1707 s
->always_svq
= svq
;
1708 s
->migration_state
.notify
= NULL
;
1709 s
->vhost_vdpa
.shadow_vqs_enabled
= svq
;
1710 if (queue_pair_index
== 0) {
1711 vhost_vdpa_net_valid_svq_features(features
,
1712 &s
->vhost_vdpa
.migration_blocker
);
1713 s
->vhost_vdpa
.shared
= g_new0(VhostVDPAShared
, 1);
1714 s
->vhost_vdpa
.shared
->device_fd
= vdpa_device_fd
;
1715 s
->vhost_vdpa
.shared
->iova_range
= iova_range
;
1716 s
->vhost_vdpa
.shared
->shadow_data
= svq
;
1717 } else if (!is_datapath
) {
1718 s
->cvq_cmd_out_buffer
= mmap(NULL
, vhost_vdpa_net_cvq_cmd_page_len(),
1719 PROT_READ
| PROT_WRITE
,
1720 MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
1721 s
->status
= mmap(NULL
, vhost_vdpa_net_cvq_cmd_page_len(),
1722 PROT_READ
| PROT_WRITE
, MAP_SHARED
| MAP_ANONYMOUS
,
1725 s
->vhost_vdpa
.shadow_vq_ops
= &vhost_vdpa_net_svq_ops
;
1726 s
->vhost_vdpa
.shadow_vq_ops_opaque
= s
;
1727 s
->cvq_isolated
= cvq_isolated
;
1729 if (queue_pair_index
!= 0) {
1730 s
->vhost_vdpa
.shared
= shared
;
1733 ret
= vhost_vdpa_add(nc
, (void *)&s
->vhost_vdpa
, queue_pair_index
, nvqs
);
1735 qemu_del_net_client(nc
);
1742 static int vhost_vdpa_get_features(int fd
, uint64_t *features
, Error
**errp
)
1744 int ret
= ioctl(fd
, VHOST_GET_FEATURES
, features
);
1745 if (unlikely(ret
< 0)) {
1746 error_setg_errno(errp
, errno
,
1747 "Fail to query features from vhost-vDPA device");
1752 static int vhost_vdpa_get_max_queue_pairs(int fd
, uint64_t features
,
1753 int *has_cvq
, Error
**errp
)
1755 unsigned long config_size
= offsetof(struct vhost_vdpa_config
, buf
);
1756 g_autofree
struct vhost_vdpa_config
*config
= NULL
;
1757 __virtio16
*max_queue_pairs
;
1760 if (features
& (1 << VIRTIO_NET_F_CTRL_VQ
)) {
1766 if (features
& (1 << VIRTIO_NET_F_MQ
)) {
1767 config
= g_malloc0(config_size
+ sizeof(*max_queue_pairs
));
1768 config
->off
= offsetof(struct virtio_net_config
, max_virtqueue_pairs
);
1769 config
->len
= sizeof(*max_queue_pairs
);
1771 ret
= ioctl(fd
, VHOST_VDPA_GET_CONFIG
, config
);
1773 error_setg(errp
, "Fail to get config from vhost-vDPA device");
1777 max_queue_pairs
= (__virtio16
*)&config
->buf
;
1779 return lduw_le_p(max_queue_pairs
);
1785 int net_init_vhost_vdpa(const Netdev
*netdev
, const char *name
,
1786 NetClientState
*peer
, Error
**errp
)
1789 const NetdevVhostVDPAOptions
*opts
;
1792 g_autofree NetClientState
**ncs
= NULL
;
1793 struct vhost_vdpa_iova_range iova_range
;
1795 int queue_pairs
, r
, i
= 0, has_cvq
= 0;
1797 assert(netdev
->type
== NET_CLIENT_DRIVER_VHOST_VDPA
);
1798 opts
= &netdev
->u
.vhost_vdpa
;
1799 if (!opts
->vhostdev
&& !opts
->vhostfd
) {
1801 "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
1805 if (opts
->vhostdev
&& opts
->vhostfd
) {
1807 "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
1811 if (opts
->vhostdev
) {
1812 vdpa_device_fd
= qemu_open(opts
->vhostdev
, O_RDWR
, errp
);
1813 if (vdpa_device_fd
== -1) {
1818 vdpa_device_fd
= monitor_fd_param(monitor_cur(), opts
->vhostfd
, errp
);
1819 if (vdpa_device_fd
== -1) {
1820 error_prepend(errp
, "vhost-vdpa: unable to parse vhostfd: ");
1825 r
= vhost_vdpa_get_features(vdpa_device_fd
, &features
, errp
);
1826 if (unlikely(r
< 0)) {
1830 queue_pairs
= vhost_vdpa_get_max_queue_pairs(vdpa_device_fd
, features
,
1832 if (queue_pairs
< 0) {
1833 qemu_close(vdpa_device_fd
);
1837 r
= vhost_vdpa_get_iova_range(vdpa_device_fd
, &iova_range
);
1838 if (unlikely(r
< 0)) {
1839 error_setg(errp
, "vhost-vdpa: get iova range failed: %s",
1844 if (opts
->x_svq
&& !vhost_vdpa_net_valid_svq_features(features
, errp
)) {
1848 ncs
= g_malloc0(sizeof(*ncs
) * queue_pairs
);
1850 for (i
= 0; i
< queue_pairs
; i
++) {
1851 VhostVDPAShared
*shared
= NULL
;
1854 shared
= DO_UPCAST(VhostVDPAState
, nc
, ncs
[0])->vhost_vdpa
.shared
;
1856 ncs
[i
] = net_vhost_vdpa_init(peer
, TYPE_VHOST_VDPA
, name
,
1857 vdpa_device_fd
, i
, 2, true, opts
->x_svq
,
1858 iova_range
, features
, shared
, errp
);
1864 VhostVDPAState
*s0
= DO_UPCAST(VhostVDPAState
, nc
, ncs
[0]);
1865 VhostVDPAShared
*shared
= s0
->vhost_vdpa
.shared
;
1867 nc
= net_vhost_vdpa_init(peer
, TYPE_VHOST_VDPA
, name
,
1868 vdpa_device_fd
, i
, 1, false,
1869 opts
->x_svq
, iova_range
, features
, shared
,
1879 for (i
--; i
>= 0; i
--) {
1880 qemu_del_net_client(ncs
[i
]);
1884 qemu_close(vdpa_device_fd
);