vhost-vdpa: Do not send empty IOTLB update batches
[qemu/kevin.git] / hw / virtio / vhost-vdpa.c
blobca1227e5dc2f2fcc3bf6365067da4b9ee6040394
1 /*
2 * vhost-vdpa
4 * Copyright(c) 2017-2018 Intel Corporation.
5 * Copyright(c) 2020 Red Hat, Inc.
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
13 #include <linux/vhost.h>
14 #include <linux/vfio.h>
15 #include <sys/eventfd.h>
16 #include <sys/ioctl.h>
17 #include "hw/virtio/vhost.h"
18 #include "hw/virtio/vhost-backend.h"
19 #include "hw/virtio/virtio-net.h"
20 #include "hw/virtio/vhost-vdpa.h"
21 #include "exec/address-spaces.h"
22 #include "qemu/main-loop.h"
23 #include "cpu.h"
24 #include "trace.h"
25 #include "qemu-common.h"
27 static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section)
29 return (!memory_region_is_ram(section->mr) &&
30 !memory_region_is_iommu(section->mr)) ||
31 /* vhost-vDPA doesn't allow MMIO to be mapped */
32 memory_region_is_ram_device(section->mr) ||
34 * Sizing an enabled 64-bit BAR can cause spurious mappings to
35 * addresses in the upper part of the 64-bit address space. These
36 * are never accessed by the CPU and beyond the address width of
37 * some IOMMU hardware. TODO: VDPA should tell us the IOMMU width.
39 section->offset_within_address_space & (1ULL << 63);
42 static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
43 void *vaddr, bool readonly)
45 struct vhost_msg_v2 msg = {};
46 int fd = v->device_fd;
47 int ret = 0;
49 msg.type = v->msg_type;
50 msg.iotlb.iova = iova;
51 msg.iotlb.size = size;
52 msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
53 msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
54 msg.iotlb.type = VHOST_IOTLB_UPDATE;
56 trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, msg.iotlb.size,
57 msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type);
59 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
60 error_report("failed to write, fd=%d, errno=%d (%s)",
61 fd, errno, strerror(errno));
62 return -EIO ;
65 return ret;
68 static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova,
69 hwaddr size)
71 struct vhost_msg_v2 msg = {};
72 int fd = v->device_fd;
73 int ret = 0;
75 msg.type = v->msg_type;
76 msg.iotlb.iova = iova;
77 msg.iotlb.size = size;
78 msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
80 trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova,
81 msg.iotlb.size, msg.iotlb.type);
83 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
84 error_report("failed to write, fd=%d, errno=%d (%s)",
85 fd, errno, strerror(errno));
86 return -EIO ;
89 return ret;
92 static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v)
94 int fd = v->device_fd;
95 struct vhost_msg_v2 msg = {
96 .type = v->msg_type,
97 .iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
100 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
101 error_report("failed to write, fd=%d, errno=%d (%s)",
102 fd, errno, strerror(errno));
106 static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v)
108 if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) &&
109 !v->iotlb_batch_begin_sent) {
110 vhost_vdpa_listener_begin_batch(v);
113 v->iotlb_batch_begin_sent = true;
116 static void vhost_vdpa_listener_commit(MemoryListener *listener)
118 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
119 struct vhost_dev *dev = v->dev;
120 struct vhost_msg_v2 msg = {};
121 int fd = v->device_fd;
123 if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
124 return;
127 if (!v->iotlb_batch_begin_sent) {
128 return;
131 msg.type = v->msg_type;
132 msg.iotlb.type = VHOST_IOTLB_BATCH_END;
134 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
135 error_report("failed to write, fd=%d, errno=%d (%s)",
136 fd, errno, strerror(errno));
139 v->iotlb_batch_begin_sent = false;
142 static void vhost_vdpa_listener_region_add(MemoryListener *listener,
143 MemoryRegionSection *section)
145 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
146 hwaddr iova;
147 Int128 llend, llsize;
148 void *vaddr;
149 int ret;
151 if (vhost_vdpa_listener_skipped_section(section)) {
152 return;
155 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
156 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
157 error_report("%s received unaligned region", __func__);
158 return;
161 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
162 llend = int128_make64(section->offset_within_address_space);
163 llend = int128_add(llend, section->size);
164 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
166 if (int128_ge(int128_make64(iova), llend)) {
167 return;
170 memory_region_ref(section->mr);
172 /* Here we assume that memory_region_is_ram(section->mr)==true */
174 vaddr = memory_region_get_ram_ptr(section->mr) +
175 section->offset_within_region +
176 (iova - section->offset_within_address_space);
178 trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend),
179 vaddr, section->readonly);
181 llsize = int128_sub(llend, int128_make64(iova));
183 vhost_vdpa_iotlb_batch_begin_once(v);
184 ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
185 vaddr, section->readonly);
186 if (ret) {
187 error_report("vhost vdpa map fail!");
188 goto fail;
191 return;
193 fail:
195 * On the initfn path, store the first error in the container so we
196 * can gracefully fail. Runtime, there's not much we can do other
197 * than throw a hardware error.
199 error_report("vhost-vdpa: DMA mapping failed, unable to continue");
200 return;
204 static void vhost_vdpa_listener_region_del(MemoryListener *listener,
205 MemoryRegionSection *section)
207 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
208 hwaddr iova;
209 Int128 llend, llsize;
210 int ret;
212 if (vhost_vdpa_listener_skipped_section(section)) {
213 return;
216 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
217 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
218 error_report("%s received unaligned region", __func__);
219 return;
222 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
223 llend = int128_make64(section->offset_within_address_space);
224 llend = int128_add(llend, section->size);
225 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
227 trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend));
229 if (int128_ge(int128_make64(iova), llend)) {
230 return;
233 llsize = int128_sub(llend, int128_make64(iova));
235 vhost_vdpa_iotlb_batch_begin_once(v);
236 ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
237 if (ret) {
238 error_report("vhost_vdpa dma unmap error!");
241 memory_region_unref(section->mr);
244 * IOTLB API is used by vhost-vpda which requires incremental updating
245 * of the mapping. So we can not use generic vhost memory listener which
246 * depends on the addnop().
248 static const MemoryListener vhost_vdpa_memory_listener = {
249 .commit = vhost_vdpa_listener_commit,
250 .region_add = vhost_vdpa_listener_region_add,
251 .region_del = vhost_vdpa_listener_region_del,
254 static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request,
255 void *arg)
257 struct vhost_vdpa *v = dev->opaque;
258 int fd = v->device_fd;
259 int ret;
261 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
263 ret = ioctl(fd, request, arg);
264 return ret < 0 ? -errno : ret;
267 static void vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
269 uint8_t s;
271 trace_vhost_vdpa_add_status(dev, status);
272 if (vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s)) {
273 return;
276 s |= status;
278 vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
281 static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
283 struct vhost_vdpa *v;
284 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
285 trace_vhost_vdpa_init(dev, opaque);
287 v = opaque;
288 v->dev = dev;
289 dev->opaque = opaque ;
290 v->listener = vhost_vdpa_memory_listener;
291 v->msg_type = VHOST_IOTLB_MSG_V2;
293 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
294 VIRTIO_CONFIG_S_DRIVER);
296 return 0;
299 static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
300 int queue_index)
302 size_t page_size = qemu_real_host_page_size;
303 struct vhost_vdpa *v = dev->opaque;
304 VirtIODevice *vdev = dev->vdev;
305 VhostVDPAHostNotifier *n;
307 n = &v->notifier[queue_index];
309 if (n->addr) {
310 virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false);
311 object_unparent(OBJECT(&n->mr));
312 munmap(n->addr, page_size);
313 n->addr = NULL;
317 static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n)
319 int i;
321 for (i = 0; i < n; i++) {
322 vhost_vdpa_host_notifier_uninit(dev, i);
326 static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index)
328 size_t page_size = qemu_real_host_page_size;
329 struct vhost_vdpa *v = dev->opaque;
330 VirtIODevice *vdev = dev->vdev;
331 VhostVDPAHostNotifier *n;
332 int fd = v->device_fd;
333 void *addr;
334 char *name;
336 vhost_vdpa_host_notifier_uninit(dev, queue_index);
338 n = &v->notifier[queue_index];
340 addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
341 queue_index * page_size);
342 if (addr == MAP_FAILED) {
343 goto err;
346 name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]",
347 v, queue_index);
348 memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
349 page_size, addr);
350 g_free(name);
352 if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) {
353 munmap(addr, page_size);
354 goto err;
356 n->addr = addr;
358 return 0;
360 err:
361 return -1;
364 static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev)
366 int i;
368 for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) {
369 if (vhost_vdpa_host_notifier_init(dev, i)) {
370 goto err;
374 return;
376 err:
377 vhost_vdpa_host_notifiers_uninit(dev, i);
378 return;
381 static int vhost_vdpa_cleanup(struct vhost_dev *dev)
383 struct vhost_vdpa *v;
384 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
385 v = dev->opaque;
386 trace_vhost_vdpa_cleanup(dev, v);
387 vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
388 memory_listener_unregister(&v->listener);
390 dev->opaque = NULL;
391 return 0;
394 static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
396 trace_vhost_vdpa_memslots_limit(dev, INT_MAX);
397 return INT_MAX;
400 static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
401 struct vhost_memory *mem)
403 trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding);
404 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) &&
405 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) {
406 int i;
407 for (i = 0; i < mem->nregions; i++) {
408 trace_vhost_vdpa_dump_regions(dev, i,
409 mem->regions[i].guest_phys_addr,
410 mem->regions[i].memory_size,
411 mem->regions[i].userspace_addr,
412 mem->regions[i].flags_padding);
415 if (mem->padding) {
416 return -1;
419 return 0;
422 static int vhost_vdpa_set_features(struct vhost_dev *dev,
423 uint64_t features)
425 int ret;
426 trace_vhost_vdpa_set_features(dev, features);
427 ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
428 uint8_t status = 0;
429 if (ret) {
430 return ret;
432 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
433 vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
435 return !(status & VIRTIO_CONFIG_S_FEATURES_OK);
438 static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
440 uint64_t features;
441 uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
442 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH;
443 int r;
445 if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
446 return 0;
449 features &= f;
450 r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
451 if (r) {
452 return 0;
455 dev->backend_cap = features;
457 return 0;
460 static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
461 uint32_t *device_id)
463 int ret;
464 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
465 trace_vhost_vdpa_get_device_id(dev, *device_id);
466 return ret;
469 static int vhost_vdpa_reset_device(struct vhost_dev *dev)
471 int ret;
472 uint8_t status = 0;
474 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
475 trace_vhost_vdpa_reset_device(dev, status);
476 return ret;
479 static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
481 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
483 trace_vhost_vdpa_get_vq_index(dev, idx, idx - dev->vq_index);
484 return idx - dev->vq_index;
487 static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
489 int i;
490 trace_vhost_vdpa_set_vring_ready(dev);
491 for (i = 0; i < dev->nvqs; ++i) {
492 struct vhost_vring_state state = {
493 .index = dev->vq_index + i,
494 .num = 1,
496 vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state);
498 return 0;
501 static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config,
502 uint32_t config_len)
504 int b, len;
505 char line[QEMU_HEXDUMP_LINE_LEN];
507 for (b = 0; b < config_len; b += 16) {
508 len = config_len - b;
509 qemu_hexdump_line(line, b, config, len, false);
510 trace_vhost_vdpa_dump_config(dev, line);
514 static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
515 uint32_t offset, uint32_t size,
516 uint32_t flags)
518 struct vhost_vdpa_config *config;
519 int ret;
520 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
522 trace_vhost_vdpa_set_config(dev, offset, size, flags);
523 config = g_malloc(size + config_size);
524 config->off = offset;
525 config->len = size;
526 memcpy(config->buf, data, size);
527 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) &&
528 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
529 vhost_vdpa_dump_config(dev, data, size);
531 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config);
532 g_free(config);
533 return ret;
536 static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
537 uint32_t config_len, Error **errp)
539 struct vhost_vdpa_config *v_config;
540 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
541 int ret;
543 trace_vhost_vdpa_get_config(dev, config, config_len);
544 v_config = g_malloc(config_len + config_size);
545 v_config->len = config_len;
546 v_config->off = 0;
547 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config);
548 memcpy(config, v_config->buf, config_len);
549 g_free(v_config);
550 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) &&
551 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
552 vhost_vdpa_dump_config(dev, config, config_len);
554 return ret;
557 static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
559 struct vhost_vdpa *v = dev->opaque;
560 trace_vhost_vdpa_dev_start(dev, started);
561 if (started) {
562 uint8_t status = 0;
563 memory_listener_register(&v->listener, &address_space_memory);
564 vhost_vdpa_host_notifiers_init(dev);
565 vhost_vdpa_set_vring_ready(dev);
566 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
567 vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
569 return !(status & VIRTIO_CONFIG_S_DRIVER_OK);
570 } else {
571 vhost_vdpa_reset_device(dev);
572 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
573 VIRTIO_CONFIG_S_DRIVER);
574 vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
575 memory_listener_unregister(&v->listener);
577 return 0;
581 static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
582 struct vhost_log *log)
584 trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd,
585 log->log);
586 return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
589 static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
590 struct vhost_vring_addr *addr)
592 trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
593 addr->desc_user_addr, addr->used_user_addr,
594 addr->avail_user_addr,
595 addr->log_guest_addr);
596 return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
599 static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
600 struct vhost_vring_state *ring)
602 trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
603 return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
606 static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
607 struct vhost_vring_state *ring)
609 trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
610 return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
613 static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
614 struct vhost_vring_state *ring)
616 int ret;
618 ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
619 trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num);
620 return ret;
623 static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
624 struct vhost_vring_file *file)
626 trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
627 return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
630 static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
631 struct vhost_vring_file *file)
633 trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
634 return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
637 static int vhost_vdpa_get_features(struct vhost_dev *dev,
638 uint64_t *features)
640 int ret;
642 ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
643 trace_vhost_vdpa_get_features(dev, *features);
644 return ret;
647 static int vhost_vdpa_set_owner(struct vhost_dev *dev)
649 trace_vhost_vdpa_set_owner(dev);
650 return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
653 static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
654 struct vhost_vring_addr *addr, struct vhost_virtqueue *vq)
656 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
657 addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
658 addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
659 addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
660 trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
661 addr->avail_user_addr, addr->used_user_addr);
662 return 0;
665 static bool vhost_vdpa_force_iommu(struct vhost_dev *dev)
667 return true;
670 const VhostOps vdpa_ops = {
671 .backend_type = VHOST_BACKEND_TYPE_VDPA,
672 .vhost_backend_init = vhost_vdpa_init,
673 .vhost_backend_cleanup = vhost_vdpa_cleanup,
674 .vhost_set_log_base = vhost_vdpa_set_log_base,
675 .vhost_set_vring_addr = vhost_vdpa_set_vring_addr,
676 .vhost_set_vring_num = vhost_vdpa_set_vring_num,
677 .vhost_set_vring_base = vhost_vdpa_set_vring_base,
678 .vhost_get_vring_base = vhost_vdpa_get_vring_base,
679 .vhost_set_vring_kick = vhost_vdpa_set_vring_kick,
680 .vhost_set_vring_call = vhost_vdpa_set_vring_call,
681 .vhost_get_features = vhost_vdpa_get_features,
682 .vhost_set_backend_cap = vhost_vdpa_set_backend_cap,
683 .vhost_set_owner = vhost_vdpa_set_owner,
684 .vhost_set_vring_endian = NULL,
685 .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit,
686 .vhost_set_mem_table = vhost_vdpa_set_mem_table,
687 .vhost_set_features = vhost_vdpa_set_features,
688 .vhost_reset_device = vhost_vdpa_reset_device,
689 .vhost_get_vq_index = vhost_vdpa_get_vq_index,
690 .vhost_get_config = vhost_vdpa_get_config,
691 .vhost_set_config = vhost_vdpa_set_config,
692 .vhost_requires_shm_log = NULL,
693 .vhost_migration_done = NULL,
694 .vhost_backend_can_merge = NULL,
695 .vhost_net_set_mtu = NULL,
696 .vhost_set_iotlb_callback = NULL,
697 .vhost_send_device_iotlb_msg = NULL,
698 .vhost_dev_start = vhost_vdpa_dev_start,
699 .vhost_get_device_id = vhost_vdpa_get_device_id,
700 .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
701 .vhost_force_iommu = vhost_vdpa_force_iommu,