tcg/tci: Move call-return regs to end of tcg_target_reg_alloc_order
[qemu/ar7.git] / hw / virtio / vhost-vdpa.c
blob61ba3133317934a263f3734f373293777e232a2c
1 /*
2 * vhost-vdpa
4 * Copyright(c) 2017-2018 Intel Corporation.
5 * Copyright(c) 2020 Red Hat, Inc.
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
13 #include <linux/vhost.h>
14 #include <linux/vfio.h>
15 #include <sys/eventfd.h>
16 #include <sys/ioctl.h>
17 #include "hw/virtio/vhost.h"
18 #include "hw/virtio/vhost-backend.h"
19 #include "hw/virtio/virtio-net.h"
20 #include "hw/virtio/vhost-vdpa.h"
21 #include "exec/address-spaces.h"
22 #include "qemu/main-loop.h"
23 #include "cpu.h"
24 #include "trace.h"
25 #include "qemu-common.h"
27 static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section)
29 return (!memory_region_is_ram(section->mr) &&
30 !memory_region_is_iommu(section->mr)) ||
31 /* vhost-vDPA doesn't allow MMIO to be mapped */
32 memory_region_is_ram_device(section->mr) ||
34 * Sizing an enabled 64-bit BAR can cause spurious mappings to
35 * addresses in the upper part of the 64-bit address space. These
36 * are never accessed by the CPU and beyond the address width of
37 * some IOMMU hardware. TODO: VDPA should tell us the IOMMU width.
39 section->offset_within_address_space & (1ULL << 63);
42 static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
43 void *vaddr, bool readonly)
45 struct vhost_msg_v2 msg = {};
46 int fd = v->device_fd;
47 int ret = 0;
49 msg.type = v->msg_type;
50 msg.iotlb.iova = iova;
51 msg.iotlb.size = size;
52 msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
53 msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
54 msg.iotlb.type = VHOST_IOTLB_UPDATE;
56 trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, msg.iotlb.size,
57 msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type);
59 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
60 error_report("failed to write, fd=%d, errno=%d (%s)",
61 fd, errno, strerror(errno));
62 return -EIO ;
65 return ret;
68 static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova,
69 hwaddr size)
71 struct vhost_msg_v2 msg = {};
72 int fd = v->device_fd;
73 int ret = 0;
75 msg.type = v->msg_type;
76 msg.iotlb.iova = iova;
77 msg.iotlb.size = size;
78 msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
80 trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova,
81 msg.iotlb.size, msg.iotlb.type);
83 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
84 error_report("failed to write, fd=%d, errno=%d (%s)",
85 fd, errno, strerror(errno));
86 return -EIO ;
89 return ret;
92 static void vhost_vdpa_listener_begin(MemoryListener *listener)
94 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
95 struct vhost_dev *dev = v->dev;
96 struct vhost_msg_v2 msg = {};
97 int fd = v->device_fd;
99 if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
100 return;
103 msg.type = v->msg_type;
104 msg.iotlb.type = VHOST_IOTLB_BATCH_BEGIN;
106 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
107 error_report("failed to write, fd=%d, errno=%d (%s)",
108 fd, errno, strerror(errno));
112 static void vhost_vdpa_listener_commit(MemoryListener *listener)
114 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
115 struct vhost_dev *dev = v->dev;
116 struct vhost_msg_v2 msg = {};
117 int fd = v->device_fd;
119 if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
120 return;
123 msg.type = v->msg_type;
124 msg.iotlb.type = VHOST_IOTLB_BATCH_END;
126 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
127 error_report("failed to write, fd=%d, errno=%d (%s)",
128 fd, errno, strerror(errno));
132 static void vhost_vdpa_listener_region_add(MemoryListener *listener,
133 MemoryRegionSection *section)
135 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
136 hwaddr iova;
137 Int128 llend, llsize;
138 void *vaddr;
139 int ret;
141 if (vhost_vdpa_listener_skipped_section(section)) {
142 return;
145 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
146 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
147 error_report("%s received unaligned region", __func__);
148 return;
151 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
152 llend = int128_make64(section->offset_within_address_space);
153 llend = int128_add(llend, section->size);
154 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
156 if (int128_ge(int128_make64(iova), llend)) {
157 return;
160 memory_region_ref(section->mr);
162 /* Here we assume that memory_region_is_ram(section->mr)==true */
164 vaddr = memory_region_get_ram_ptr(section->mr) +
165 section->offset_within_region +
166 (iova - section->offset_within_address_space);
168 trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend),
169 vaddr, section->readonly);
171 llsize = int128_sub(llend, int128_make64(iova));
173 ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
174 vaddr, section->readonly);
175 if (ret) {
176 error_report("vhost vdpa map fail!");
177 goto fail;
180 return;
182 fail:
184 * On the initfn path, store the first error in the container so we
185 * can gracefully fail. Runtime, there's not much we can do other
186 * than throw a hardware error.
188 error_report("vhost-vdpa: DMA mapping failed, unable to continue");
189 return;
193 static void vhost_vdpa_listener_region_del(MemoryListener *listener,
194 MemoryRegionSection *section)
196 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
197 hwaddr iova;
198 Int128 llend, llsize;
199 int ret;
201 if (vhost_vdpa_listener_skipped_section(section)) {
202 return;
205 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
206 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
207 error_report("%s received unaligned region", __func__);
208 return;
211 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
212 llend = int128_make64(section->offset_within_address_space);
213 llend = int128_add(llend, section->size);
214 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
216 trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend));
218 if (int128_ge(int128_make64(iova), llend)) {
219 return;
222 llsize = int128_sub(llend, int128_make64(iova));
224 ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
225 if (ret) {
226 error_report("vhost_vdpa dma unmap error!");
229 memory_region_unref(section->mr);
232 * IOTLB API is used by vhost-vpda which requires incremental updating
233 * of the mapping. So we can not use generic vhost memory listener which
234 * depends on the addnop().
236 static const MemoryListener vhost_vdpa_memory_listener = {
237 .begin = vhost_vdpa_listener_begin,
238 .commit = vhost_vdpa_listener_commit,
239 .region_add = vhost_vdpa_listener_region_add,
240 .region_del = vhost_vdpa_listener_region_del,
243 static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request,
244 void *arg)
246 struct vhost_vdpa *v = dev->opaque;
247 int fd = v->device_fd;
249 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
251 return ioctl(fd, request, arg);
254 static void vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
256 uint8_t s;
258 trace_vhost_vdpa_add_status(dev, status);
259 if (vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s)) {
260 return;
263 s |= status;
265 vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
268 static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque)
270 struct vhost_vdpa *v;
271 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
272 trace_vhost_vdpa_init(dev, opaque);
274 v = opaque;
275 v->dev = dev;
276 dev->opaque = opaque ;
277 v->listener = vhost_vdpa_memory_listener;
278 v->msg_type = VHOST_IOTLB_MSG_V2;
280 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
281 VIRTIO_CONFIG_S_DRIVER);
283 return 0;
286 static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
287 int queue_index)
289 size_t page_size = qemu_real_host_page_size;
290 struct vhost_vdpa *v = dev->opaque;
291 VirtIODevice *vdev = dev->vdev;
292 VhostVDPAHostNotifier *n;
294 n = &v->notifier[queue_index];
296 if (n->addr) {
297 virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false);
298 object_unparent(OBJECT(&n->mr));
299 munmap(n->addr, page_size);
300 n->addr = NULL;
304 static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n)
306 int i;
308 for (i = 0; i < n; i++) {
309 vhost_vdpa_host_notifier_uninit(dev, i);
313 static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index)
315 size_t page_size = qemu_real_host_page_size;
316 struct vhost_vdpa *v = dev->opaque;
317 VirtIODevice *vdev = dev->vdev;
318 VhostVDPAHostNotifier *n;
319 int fd = v->device_fd;
320 void *addr;
321 char *name;
323 vhost_vdpa_host_notifier_uninit(dev, queue_index);
325 n = &v->notifier[queue_index];
327 addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
328 queue_index * page_size);
329 if (addr == MAP_FAILED) {
330 goto err;
333 name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]",
334 v, queue_index);
335 memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
336 page_size, addr);
337 g_free(name);
339 if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) {
340 munmap(addr, page_size);
341 goto err;
343 n->addr = addr;
345 return 0;
347 err:
348 return -1;
351 static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev)
353 int i;
355 for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) {
356 if (vhost_vdpa_host_notifier_init(dev, i)) {
357 goto err;
361 return;
363 err:
364 vhost_vdpa_host_notifiers_uninit(dev, i);
365 return;
368 static int vhost_vdpa_cleanup(struct vhost_dev *dev)
370 struct vhost_vdpa *v;
371 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
372 v = dev->opaque;
373 trace_vhost_vdpa_cleanup(dev, v);
374 vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
375 memory_listener_unregister(&v->listener);
377 dev->opaque = NULL;
378 return 0;
381 static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
383 trace_vhost_vdpa_memslots_limit(dev, INT_MAX);
384 return INT_MAX;
387 static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
388 struct vhost_memory *mem)
390 trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding);
391 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) &&
392 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) {
393 int i;
394 for (i = 0; i < mem->nregions; i++) {
395 trace_vhost_vdpa_dump_regions(dev, i,
396 mem->regions[i].guest_phys_addr,
397 mem->regions[i].memory_size,
398 mem->regions[i].userspace_addr,
399 mem->regions[i].flags_padding);
402 if (mem->padding) {
403 return -1;
406 return 0;
409 static int vhost_vdpa_set_features(struct vhost_dev *dev,
410 uint64_t features)
412 int ret;
413 trace_vhost_vdpa_set_features(dev, features);
414 ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
415 uint8_t status = 0;
416 if (ret) {
417 return ret;
419 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
420 vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
422 return !(status & VIRTIO_CONFIG_S_FEATURES_OK);
425 static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
427 uint64_t features;
428 uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
429 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH;
430 int r;
432 if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
433 return 0;
436 features &= f;
437 r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
438 if (r) {
439 return 0;
442 dev->backend_cap = features;
444 return 0;
447 static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
448 uint32_t *device_id)
450 int ret;
451 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
452 trace_vhost_vdpa_get_device_id(dev, *device_id);
453 return ret;
456 static int vhost_vdpa_reset_device(struct vhost_dev *dev)
458 int ret;
459 uint8_t status = 0;
461 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
462 trace_vhost_vdpa_reset_device(dev, status);
463 return ret;
466 static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
468 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
470 trace_vhost_vdpa_get_vq_index(dev, idx, idx - dev->vq_index);
471 return idx - dev->vq_index;
474 static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
476 int i;
477 trace_vhost_vdpa_set_vring_ready(dev);
478 for (i = 0; i < dev->nvqs; ++i) {
479 struct vhost_vring_state state = {
480 .index = dev->vq_index + i,
481 .num = 1,
483 vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state);
485 return 0;
488 static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config,
489 uint32_t config_len)
491 int b, len;
492 char line[QEMU_HEXDUMP_LINE_LEN];
494 for (b = 0; b < config_len; b += 16) {
495 len = config_len - b;
496 qemu_hexdump_line(line, b, config, len, false);
497 trace_vhost_vdpa_dump_config(dev, line);
501 static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
502 uint32_t offset, uint32_t size,
503 uint32_t flags)
505 struct vhost_vdpa_config *config;
506 int ret;
507 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
509 trace_vhost_vdpa_set_config(dev, offset, size, flags);
510 config = g_malloc(size + config_size);
511 config->off = offset;
512 config->len = size;
513 memcpy(config->buf, data, size);
514 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) &&
515 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
516 vhost_vdpa_dump_config(dev, data, size);
518 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config);
519 g_free(config);
520 return ret;
523 static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
524 uint32_t config_len)
526 struct vhost_vdpa_config *v_config;
527 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
528 int ret;
530 trace_vhost_vdpa_get_config(dev, config, config_len);
531 v_config = g_malloc(config_len + config_size);
532 v_config->len = config_len;
533 v_config->off = 0;
534 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config);
535 memcpy(config, v_config->buf, config_len);
536 g_free(v_config);
537 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) &&
538 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
539 vhost_vdpa_dump_config(dev, config, config_len);
541 return ret;
544 static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
546 struct vhost_vdpa *v = dev->opaque;
547 trace_vhost_vdpa_dev_start(dev, started);
548 if (started) {
549 uint8_t status = 0;
550 memory_listener_register(&v->listener, &address_space_memory);
551 vhost_vdpa_host_notifiers_init(dev);
552 vhost_vdpa_set_vring_ready(dev);
553 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
554 vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
556 return !(status & VIRTIO_CONFIG_S_DRIVER_OK);
557 } else {
558 vhost_vdpa_reset_device(dev);
559 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
560 VIRTIO_CONFIG_S_DRIVER);
561 vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
562 memory_listener_unregister(&v->listener);
564 return 0;
568 static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
569 struct vhost_log *log)
571 trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd,
572 log->log);
573 return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
576 static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
577 struct vhost_vring_addr *addr)
579 trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
580 addr->desc_user_addr, addr->used_user_addr,
581 addr->avail_user_addr,
582 addr->log_guest_addr);
583 return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
586 static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
587 struct vhost_vring_state *ring)
589 trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
590 return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
593 static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
594 struct vhost_vring_state *ring)
596 trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
597 return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
600 static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
601 struct vhost_vring_state *ring)
603 int ret;
605 ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
606 trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num);
607 return ret;
610 static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
611 struct vhost_vring_file *file)
613 trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
614 return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
617 static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
618 struct vhost_vring_file *file)
620 trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
621 return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
624 static int vhost_vdpa_get_features(struct vhost_dev *dev,
625 uint64_t *features)
627 int ret;
629 ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
630 trace_vhost_vdpa_get_features(dev, *features);
631 return ret;
634 static int vhost_vdpa_set_owner(struct vhost_dev *dev)
636 trace_vhost_vdpa_set_owner(dev);
637 return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
640 static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
641 struct vhost_vring_addr *addr, struct vhost_virtqueue *vq)
643 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
644 addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
645 addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
646 addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
647 trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
648 addr->avail_user_addr, addr->used_user_addr);
649 return 0;
652 static bool vhost_vdpa_force_iommu(struct vhost_dev *dev)
654 return true;
657 const VhostOps vdpa_ops = {
658 .backend_type = VHOST_BACKEND_TYPE_VDPA,
659 .vhost_backend_init = vhost_vdpa_init,
660 .vhost_backend_cleanup = vhost_vdpa_cleanup,
661 .vhost_set_log_base = vhost_vdpa_set_log_base,
662 .vhost_set_vring_addr = vhost_vdpa_set_vring_addr,
663 .vhost_set_vring_num = vhost_vdpa_set_vring_num,
664 .vhost_set_vring_base = vhost_vdpa_set_vring_base,
665 .vhost_get_vring_base = vhost_vdpa_get_vring_base,
666 .vhost_set_vring_kick = vhost_vdpa_set_vring_kick,
667 .vhost_set_vring_call = vhost_vdpa_set_vring_call,
668 .vhost_get_features = vhost_vdpa_get_features,
669 .vhost_set_backend_cap = vhost_vdpa_set_backend_cap,
670 .vhost_set_owner = vhost_vdpa_set_owner,
671 .vhost_set_vring_endian = NULL,
672 .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit,
673 .vhost_set_mem_table = vhost_vdpa_set_mem_table,
674 .vhost_set_features = vhost_vdpa_set_features,
675 .vhost_reset_device = vhost_vdpa_reset_device,
676 .vhost_get_vq_index = vhost_vdpa_get_vq_index,
677 .vhost_get_config = vhost_vdpa_get_config,
678 .vhost_set_config = vhost_vdpa_set_config,
679 .vhost_requires_shm_log = NULL,
680 .vhost_migration_done = NULL,
681 .vhost_backend_can_merge = NULL,
682 .vhost_net_set_mtu = NULL,
683 .vhost_set_iotlb_callback = NULL,
684 .vhost_send_device_iotlb_msg = NULL,
685 .vhost_dev_start = vhost_vdpa_dev_start,
686 .vhost_get_device_id = vhost_vdpa_get_device_id,
687 .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
688 .vhost_force_iommu = vhost_vdpa_force_iommu,