2 * Migration support for VFIO devices
4 * Copyright NVIDIA, Inc. 2020
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
11 #include "qemu/main-loop.h"
12 #include "qemu/cutils.h"
13 #include <linux/vfio.h>
14 #include <sys/ioctl.h>
16 #include "sysemu/runstate.h"
17 #include "hw/vfio/vfio-common.h"
19 #include "migration/migration.h"
20 #include "migration/vmstate.h"
21 #include "migration/qemu-file.h"
22 #include "migration/register.h"
23 #include "migration/blocker.h"
24 #include "migration/misc.h"
25 #include "qapi/error.h"
26 #include "exec/ramlist.h"
27 #include "exec/ram_addr.h"
33 * Flags to be used as unique delimiters for VFIO devices in the migration
34 * stream. These flags are composed as:
35 * 0xffffffff => MSB 32-bit all 1s
36 * 0xef10 => Magic ID, represents emulated (virtual) function IO
37 * 0x0000 => 16-bits reserved for flags
39 * The beginning of state information is marked by _DEV_CONFIG_STATE,
40 * _DEV_SETUP_STATE, or _DEV_DATA_STATE, respectively. The end of a
41 * certain state information is marked by _END_OF_STATE.
43 #define VFIO_MIG_FLAG_END_OF_STATE (0xffffffffef100001ULL)
44 #define VFIO_MIG_FLAG_DEV_CONFIG_STATE (0xffffffffef100002ULL)
45 #define VFIO_MIG_FLAG_DEV_SETUP_STATE (0xffffffffef100003ULL)
46 #define VFIO_MIG_FLAG_DEV_DATA_STATE (0xffffffffef100004ULL)
48 static int64_t bytes_transferred
;
50 static inline int vfio_mig_access(VFIODevice
*vbasedev
, void *val
, int count
,
51 off_t off
, bool iswrite
)
55 ret
= iswrite
? pwrite(vbasedev
->fd
, val
, count
, off
) :
56 pread(vbasedev
->fd
, val
, count
, off
);
58 error_report("vfio_mig_%s %d byte %s: failed at offset 0x%"
59 HWADDR_PRIx
", err: %s", iswrite
? "write" : "read", count
,
60 vbasedev
->name
, off
, strerror(errno
));
61 return (ret
< 0) ? ret
: -EINVAL
;
66 static int vfio_mig_rw(VFIODevice
*vbasedev
, __u8
*buf
, size_t count
,
67 off_t off
, bool iswrite
)
75 if (count
>= 8 && !(off
% 8)) {
77 } else if (count
>= 4 && !(off
% 4)) {
79 } else if (count
>= 2 && !(off
% 2)) {
85 ret
= vfio_mig_access(vbasedev
, tbuf
, bytes
, off
, iswrite
);
98 #define vfio_mig_read(f, v, c, o) vfio_mig_rw(f, (__u8 *)v, c, o, false)
99 #define vfio_mig_write(f, v, c, o) vfio_mig_rw(f, (__u8 *)v, c, o, true)
101 #define VFIO_MIG_STRUCT_OFFSET(f) \
102 offsetof(struct vfio_device_migration_info, f)
104 * Change the device_state register for device @vbasedev. Bits set in @mask
105 * are preserved, bits set in @value are set, and bits not set in either @mask
106 * or @value are cleared in device_state. If the register cannot be accessed,
107 * the resulting state would be invalid, or the device enters an error state,
108 * an error is returned.
111 static int vfio_migration_set_state(VFIODevice
*vbasedev
, uint32_t mask
,
114 VFIOMigration
*migration
= vbasedev
->migration
;
115 VFIORegion
*region
= &migration
->region
;
116 off_t dev_state_off
= region
->fd_offset
+
117 VFIO_MIG_STRUCT_OFFSET(device_state
);
118 uint32_t device_state
;
121 ret
= vfio_mig_read(vbasedev
, &device_state
, sizeof(device_state
),
127 device_state
= (device_state
& mask
) | value
;
129 if (!VFIO_DEVICE_STATE_VALID(device_state
)) {
133 ret
= vfio_mig_write(vbasedev
, &device_state
, sizeof(device_state
),
138 rret
= vfio_mig_read(vbasedev
, &device_state
, sizeof(device_state
),
141 if ((rret
< 0) || (VFIO_DEVICE_STATE_IS_ERROR(device_state
))) {
142 hw_error("%s: Device in error state 0x%x", vbasedev
->name
,
144 return rret
? rret
: -EIO
;
149 migration
->device_state
= device_state
;
150 trace_vfio_migration_set_state(vbasedev
->name
, device_state
);
154 static void *get_data_section_size(VFIORegion
*region
, uint64_t data_offset
,
155 uint64_t data_size
, uint64_t *size
)
161 if (!region
->mmaps
) {
163 *size
= MIN(data_size
, region
->size
- data_offset
);
168 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
169 VFIOMmap
*map
= region
->mmaps
+ i
;
171 if ((data_offset
>= map
->offset
) &&
172 (data_offset
< map
->offset
+ map
->size
)) {
174 /* check if data_offset is within sparse mmap areas */
175 ptr
= map
->mmap
+ data_offset
- map
->offset
;
177 *size
= MIN(data_size
, map
->offset
+ map
->size
- data_offset
);
180 } else if ((data_offset
< map
->offset
) &&
181 (!limit
|| limit
> map
->offset
)) {
183 * data_offset is not within sparse mmap areas, find size of
184 * non-mapped area. Check through all list since region->mmaps list
192 *size
= limit
? MIN(data_size
, limit
- data_offset
) : data_size
;
197 static int vfio_save_buffer(QEMUFile
*f
, VFIODevice
*vbasedev
, uint64_t *size
)
199 VFIOMigration
*migration
= vbasedev
->migration
;
200 VFIORegion
*region
= &migration
->region
;
201 uint64_t data_offset
= 0, data_size
= 0, sz
;
204 ret
= vfio_mig_read(vbasedev
, &data_offset
, sizeof(data_offset
),
205 region
->fd_offset
+ VFIO_MIG_STRUCT_OFFSET(data_offset
));
210 ret
= vfio_mig_read(vbasedev
, &data_size
, sizeof(data_size
),
211 region
->fd_offset
+ VFIO_MIG_STRUCT_OFFSET(data_size
));
216 trace_vfio_save_buffer(vbasedev
->name
, data_offset
, data_size
,
217 migration
->pending_bytes
);
219 qemu_put_be64(f
, data_size
);
225 bool buf_allocated
= false;
227 buf
= get_data_section_size(region
, data_offset
, sz
, &sec_size
);
230 buf
= g_try_malloc(sec_size
);
232 error_report("%s: Error allocating buffer ", __func__
);
235 buf_allocated
= true;
237 ret
= vfio_mig_read(vbasedev
, buf
, sec_size
,
238 region
->fd_offset
+ data_offset
);
245 qemu_put_buffer(f
, buf
, sec_size
);
251 data_offset
+= sec_size
;
254 ret
= qemu_file_get_error(f
);
260 bytes_transferred
+= data_size
;
264 static int vfio_load_buffer(QEMUFile
*f
, VFIODevice
*vbasedev
,
267 VFIORegion
*region
= &vbasedev
->migration
->region
;
268 uint64_t data_offset
= 0, size
, report_size
;
272 ret
= vfio_mig_read(vbasedev
, &data_offset
, sizeof(data_offset
),
273 region
->fd_offset
+ VFIO_MIG_STRUCT_OFFSET(data_offset
));
278 if (data_offset
+ data_size
> region
->size
) {
280 * If data_size is greater than the data section of migration region
281 * then iterate the write buffer operation. This case can occur if
282 * size of migration region at destination is smaller than size of
283 * migration region at source.
285 report_size
= size
= region
->size
- data_offset
;
288 report_size
= size
= data_size
;
292 trace_vfio_load_state_device_data(vbasedev
->name
, data_offset
, size
);
297 bool buf_alloc
= false;
299 buf
= get_data_section_size(region
, data_offset
, size
, &sec_size
);
302 buf
= g_try_malloc(sec_size
);
304 error_report("%s: Error allocating buffer ", __func__
);
310 qemu_get_buffer(f
, buf
, sec_size
);
313 ret
= vfio_mig_write(vbasedev
, buf
, sec_size
,
314 region
->fd_offset
+ data_offset
);
322 data_offset
+= sec_size
;
325 ret
= vfio_mig_write(vbasedev
, &report_size
, sizeof(report_size
),
326 region
->fd_offset
+ VFIO_MIG_STRUCT_OFFSET(data_size
));
335 static int vfio_update_pending(VFIODevice
*vbasedev
)
337 VFIOMigration
*migration
= vbasedev
->migration
;
338 VFIORegion
*region
= &migration
->region
;
339 uint64_t pending_bytes
= 0;
342 ret
= vfio_mig_read(vbasedev
, &pending_bytes
, sizeof(pending_bytes
),
343 region
->fd_offset
+ VFIO_MIG_STRUCT_OFFSET(pending_bytes
));
345 migration
->pending_bytes
= 0;
349 migration
->pending_bytes
= pending_bytes
;
350 trace_vfio_update_pending(vbasedev
->name
, pending_bytes
);
354 static int vfio_save_device_config_state(QEMUFile
*f
, void *opaque
)
356 VFIODevice
*vbasedev
= opaque
;
358 qemu_put_be64(f
, VFIO_MIG_FLAG_DEV_CONFIG_STATE
);
360 if (vbasedev
->ops
&& vbasedev
->ops
->vfio_save_config
) {
361 vbasedev
->ops
->vfio_save_config(vbasedev
, f
);
364 qemu_put_be64(f
, VFIO_MIG_FLAG_END_OF_STATE
);
366 trace_vfio_save_device_config_state(vbasedev
->name
);
368 return qemu_file_get_error(f
);
371 static int vfio_load_device_config_state(QEMUFile
*f
, void *opaque
)
373 VFIODevice
*vbasedev
= opaque
;
376 if (vbasedev
->ops
&& vbasedev
->ops
->vfio_load_config
) {
379 ret
= vbasedev
->ops
->vfio_load_config(vbasedev
, f
);
381 error_report("%s: Failed to load device config space",
387 data
= qemu_get_be64(f
);
388 if (data
!= VFIO_MIG_FLAG_END_OF_STATE
) {
389 error_report("%s: Failed loading device config space, "
390 "end flag incorrect 0x%"PRIx64
, vbasedev
->name
, data
);
394 trace_vfio_load_device_config_state(vbasedev
->name
);
395 return qemu_file_get_error(f
);
398 static int vfio_set_dirty_page_tracking(VFIODevice
*vbasedev
, bool start
)
401 VFIOMigration
*migration
= vbasedev
->migration
;
402 VFIOContainer
*container
= vbasedev
->group
->container
;
403 struct vfio_iommu_type1_dirty_bitmap dirty
= {
404 .argsz
= sizeof(dirty
),
408 if (migration
->device_state
& VFIO_DEVICE_STATE_SAVING
) {
409 dirty
.flags
= VFIO_IOMMU_DIRTY_PAGES_FLAG_START
;
414 dirty
.flags
= VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP
;
417 ret
= ioctl(container
->fd
, VFIO_IOMMU_DIRTY_PAGES
, &dirty
);
419 error_report("Failed to set dirty tracking flag 0x%x errno: %d",
426 static void vfio_migration_cleanup(VFIODevice
*vbasedev
)
428 VFIOMigration
*migration
= vbasedev
->migration
;
430 vfio_set_dirty_page_tracking(vbasedev
, false);
432 if (migration
->region
.mmaps
) {
433 vfio_region_unmap(&migration
->region
);
437 /* ---------------------------------------------------------------------- */
439 static int vfio_save_setup(QEMUFile
*f
, void *opaque
)
441 VFIODevice
*vbasedev
= opaque
;
442 VFIOMigration
*migration
= vbasedev
->migration
;
445 trace_vfio_save_setup(vbasedev
->name
);
447 qemu_put_be64(f
, VFIO_MIG_FLAG_DEV_SETUP_STATE
);
449 if (migration
->region
.mmaps
) {
451 * Calling vfio_region_mmap() from migration thread. Memory API called
452 * from this function require locking the iothread when called from
453 * outside the main loop thread.
455 qemu_mutex_lock_iothread();
456 ret
= vfio_region_mmap(&migration
->region
);
457 qemu_mutex_unlock_iothread();
459 error_report("%s: Failed to mmap VFIO migration region: %s",
460 vbasedev
->name
, strerror(-ret
));
461 error_report("%s: Falling back to slow path", vbasedev
->name
);
465 ret
= vfio_migration_set_state(vbasedev
, VFIO_DEVICE_STATE_MASK
,
466 VFIO_DEVICE_STATE_SAVING
);
468 error_report("%s: Failed to set state SAVING", vbasedev
->name
);
472 ret
= vfio_set_dirty_page_tracking(vbasedev
, true);
477 qemu_put_be64(f
, VFIO_MIG_FLAG_END_OF_STATE
);
479 ret
= qemu_file_get_error(f
);
487 static void vfio_save_cleanup(void *opaque
)
489 VFIODevice
*vbasedev
= opaque
;
491 vfio_migration_cleanup(vbasedev
);
492 trace_vfio_save_cleanup(vbasedev
->name
);
495 static void vfio_save_pending(QEMUFile
*f
, void *opaque
,
496 uint64_t threshold_size
,
497 uint64_t *res_precopy_only
,
498 uint64_t *res_compatible
,
499 uint64_t *res_postcopy_only
)
501 VFIODevice
*vbasedev
= opaque
;
502 VFIOMigration
*migration
= vbasedev
->migration
;
505 ret
= vfio_update_pending(vbasedev
);
510 *res_precopy_only
+= migration
->pending_bytes
;
512 trace_vfio_save_pending(vbasedev
->name
, *res_precopy_only
,
513 *res_postcopy_only
, *res_compatible
);
516 static int vfio_save_iterate(QEMUFile
*f
, void *opaque
)
518 VFIODevice
*vbasedev
= opaque
;
519 VFIOMigration
*migration
= vbasedev
->migration
;
523 qemu_put_be64(f
, VFIO_MIG_FLAG_DEV_DATA_STATE
);
525 if (migration
->pending_bytes
== 0) {
526 ret
= vfio_update_pending(vbasedev
);
531 if (migration
->pending_bytes
== 0) {
533 qemu_put_be64(f
, VFIO_MIG_FLAG_END_OF_STATE
);
534 /* indicates data finished, goto complete phase */
539 ret
= vfio_save_buffer(f
, vbasedev
, &data_size
);
541 error_report("%s: vfio_save_buffer failed %s", vbasedev
->name
,
546 qemu_put_be64(f
, VFIO_MIG_FLAG_END_OF_STATE
);
548 ret
= qemu_file_get_error(f
);
554 * Reset pending_bytes as .save_live_pending is not called during savevm or
555 * snapshot case, in such case vfio_update_pending() at the start of this
556 * function updates pending_bytes.
558 migration
->pending_bytes
= 0;
559 trace_vfio_save_iterate(vbasedev
->name
, data_size
);
563 static int vfio_save_complete_precopy(QEMUFile
*f
, void *opaque
)
565 VFIODevice
*vbasedev
= opaque
;
566 VFIOMigration
*migration
= vbasedev
->migration
;
570 ret
= vfio_migration_set_state(vbasedev
, ~VFIO_DEVICE_STATE_RUNNING
,
571 VFIO_DEVICE_STATE_SAVING
);
573 error_report("%s: Failed to set state STOP and SAVING",
578 ret
= vfio_save_device_config_state(f
, opaque
);
583 ret
= vfio_update_pending(vbasedev
);
588 while (migration
->pending_bytes
> 0) {
589 qemu_put_be64(f
, VFIO_MIG_FLAG_DEV_DATA_STATE
);
590 ret
= vfio_save_buffer(f
, vbasedev
, &data_size
);
592 error_report("%s: Failed to save buffer", vbasedev
->name
);
596 if (data_size
== 0) {
600 ret
= vfio_update_pending(vbasedev
);
606 qemu_put_be64(f
, VFIO_MIG_FLAG_END_OF_STATE
);
608 ret
= qemu_file_get_error(f
);
613 ret
= vfio_migration_set_state(vbasedev
, ~VFIO_DEVICE_STATE_SAVING
, 0);
615 error_report("%s: Failed to set state STOPPED", vbasedev
->name
);
619 trace_vfio_save_complete_precopy(vbasedev
->name
);
623 static int vfio_load_setup(QEMUFile
*f
, void *opaque
)
625 VFIODevice
*vbasedev
= opaque
;
626 VFIOMigration
*migration
= vbasedev
->migration
;
629 if (migration
->region
.mmaps
) {
630 ret
= vfio_region_mmap(&migration
->region
);
632 error_report("%s: Failed to mmap VFIO migration region %d: %s",
633 vbasedev
->name
, migration
->region
.nr
,
635 error_report("%s: Falling back to slow path", vbasedev
->name
);
639 ret
= vfio_migration_set_state(vbasedev
, ~VFIO_DEVICE_STATE_MASK
,
640 VFIO_DEVICE_STATE_RESUMING
);
642 error_report("%s: Failed to set state RESUMING", vbasedev
->name
);
643 if (migration
->region
.mmaps
) {
644 vfio_region_unmap(&migration
->region
);
650 static int vfio_load_cleanup(void *opaque
)
652 VFIODevice
*vbasedev
= opaque
;
654 vfio_migration_cleanup(vbasedev
);
655 trace_vfio_load_cleanup(vbasedev
->name
);
659 static int vfio_load_state(QEMUFile
*f
, void *opaque
, int version_id
)
661 VFIODevice
*vbasedev
= opaque
;
665 data
= qemu_get_be64(f
);
666 while (data
!= VFIO_MIG_FLAG_END_OF_STATE
) {
668 trace_vfio_load_state(vbasedev
->name
, data
);
671 case VFIO_MIG_FLAG_DEV_CONFIG_STATE
:
673 ret
= vfio_load_device_config_state(f
, opaque
);
679 case VFIO_MIG_FLAG_DEV_SETUP_STATE
:
681 data
= qemu_get_be64(f
);
682 if (data
== VFIO_MIG_FLAG_END_OF_STATE
) {
685 error_report("%s: SETUP STATE: EOS not found 0x%"PRIx64
,
686 vbasedev
->name
, data
);
691 case VFIO_MIG_FLAG_DEV_DATA_STATE
:
693 uint64_t data_size
= qemu_get_be64(f
);
696 ret
= vfio_load_buffer(f
, vbasedev
, data_size
);
704 error_report("%s: Unknown tag 0x%"PRIx64
, vbasedev
->name
, data
);
708 data
= qemu_get_be64(f
);
709 ret
= qemu_file_get_error(f
);
717 static SaveVMHandlers savevm_vfio_handlers
= {
718 .save_setup
= vfio_save_setup
,
719 .save_cleanup
= vfio_save_cleanup
,
720 .save_live_pending
= vfio_save_pending
,
721 .save_live_iterate
= vfio_save_iterate
,
722 .save_live_complete_precopy
= vfio_save_complete_precopy
,
723 .load_setup
= vfio_load_setup
,
724 .load_cleanup
= vfio_load_cleanup
,
725 .load_state
= vfio_load_state
,
728 /* ---------------------------------------------------------------------- */
730 static void vfio_vmstate_change(void *opaque
, int running
, RunState state
)
732 VFIODevice
*vbasedev
= opaque
;
733 VFIOMigration
*migration
= vbasedev
->migration
;
734 uint32_t value
, mask
;
737 if (vbasedev
->migration
->vm_running
== running
) {
743 * Here device state can have one of _SAVING, _RESUMING or _STOP bit.
744 * Transition from _SAVING to _RUNNING can happen if there is migration
745 * failure, in that case clear _SAVING bit.
746 * Transition from _RESUMING to _RUNNING occurs during resuming
747 * phase, in that case clear _RESUMING bit.
748 * In both the above cases, set _RUNNING bit.
750 mask
= ~VFIO_DEVICE_STATE_MASK
;
751 value
= VFIO_DEVICE_STATE_RUNNING
;
754 * Here device state could be either _RUNNING or _SAVING|_RUNNING. Reset
757 mask
= ~VFIO_DEVICE_STATE_RUNNING
;
761 ret
= vfio_migration_set_state(vbasedev
, mask
, value
);
764 * Migration should be aborted in this case, but vm_state_notify()
765 * currently does not support reporting failures.
767 error_report("%s: Failed to set device state 0x%x", vbasedev
->name
,
768 (migration
->device_state
& mask
) | value
);
769 qemu_file_set_error(migrate_get_current()->to_dst_file
, ret
);
771 vbasedev
->migration
->vm_running
= running
;
772 trace_vfio_vmstate_change(vbasedev
->name
, running
, RunState_str(state
),
773 (migration
->device_state
& mask
) | value
);
776 static void vfio_migration_state_notifier(Notifier
*notifier
, void *data
)
778 MigrationState
*s
= data
;
779 VFIOMigration
*migration
= container_of(notifier
, VFIOMigration
,
781 VFIODevice
*vbasedev
= migration
->vbasedev
;
784 trace_vfio_migration_state_notifier(vbasedev
->name
,
785 MigrationStatus_str(s
->state
));
788 case MIGRATION_STATUS_CANCELLING
:
789 case MIGRATION_STATUS_CANCELLED
:
790 case MIGRATION_STATUS_FAILED
:
791 bytes_transferred
= 0;
792 ret
= vfio_migration_set_state(vbasedev
,
793 ~(VFIO_DEVICE_STATE_SAVING
| VFIO_DEVICE_STATE_RESUMING
),
794 VFIO_DEVICE_STATE_RUNNING
);
796 error_report("%s: Failed to set state RUNNING", vbasedev
->name
);
801 static void vfio_migration_exit(VFIODevice
*vbasedev
)
803 VFIOMigration
*migration
= vbasedev
->migration
;
805 vfio_region_exit(&migration
->region
);
806 vfio_region_finalize(&migration
->region
);
807 g_free(vbasedev
->migration
);
808 vbasedev
->migration
= NULL
;
811 static int vfio_migration_init(VFIODevice
*vbasedev
,
812 struct vfio_region_info
*info
)
816 VFIOMigration
*migration
;
818 g_autofree
char *path
= NULL
, *oid
= NULL
;
820 if (!vbasedev
->ops
->vfio_get_object
) {
824 obj
= vbasedev
->ops
->vfio_get_object(vbasedev
);
829 vbasedev
->migration
= g_new0(VFIOMigration
, 1);
831 ret
= vfio_region_setup(obj
, vbasedev
, &vbasedev
->migration
->region
,
832 info
->index
, "migration");
834 error_report("%s: Failed to setup VFIO migration region %d: %s",
835 vbasedev
->name
, info
->index
, strerror(-ret
));
839 if (!vbasedev
->migration
->region
.size
) {
840 error_report("%s: Invalid zero-sized VFIO migration region %d",
841 vbasedev
->name
, info
->index
);
846 migration
= vbasedev
->migration
;
847 migration
->vbasedev
= vbasedev
;
849 oid
= vmstate_if_get_id(VMSTATE_IF(DEVICE(obj
)));
851 path
= g_strdup_printf("%s/vfio", oid
);
853 path
= g_strdup("vfio");
855 strpadcpy(id
, sizeof(id
), path
, '\0');
857 register_savevm_live(id
, VMSTATE_INSTANCE_ID_ANY
, 1, &savevm_vfio_handlers
,
860 migration
->vm_state
= qemu_add_vm_change_state_handler(vfio_vmstate_change
,
862 migration
->migration_state
.notify
= vfio_migration_state_notifier
;
863 add_migration_state_change_notifier(&migration
->migration_state
);
867 vfio_migration_exit(vbasedev
);
871 /* ---------------------------------------------------------------------- */
873 int64_t vfio_mig_bytes_transferred(void)
875 return bytes_transferred
;
878 int vfio_migration_probe(VFIODevice
*vbasedev
, Error
**errp
)
880 VFIOContainer
*container
= vbasedev
->group
->container
;
881 struct vfio_region_info
*info
= NULL
;
882 Error
*local_err
= NULL
;
885 if (!vbasedev
->enable_migration
|| !container
->dirty_pages_supported
) {
889 ret
= vfio_get_dev_region_info(vbasedev
, VFIO_REGION_TYPE_MIGRATION
,
890 VFIO_REGION_SUBTYPE_MIGRATION
, &info
);
895 ret
= vfio_migration_init(vbasedev
, info
);
900 trace_vfio_migration_probe(vbasedev
->name
, info
->index
);
905 error_setg(&vbasedev
->migration_blocker
,
906 "VFIO device doesn't support migration");
909 ret
= migrate_add_blocker(vbasedev
->migration_blocker
, &local_err
);
911 error_propagate(errp
, local_err
);
912 error_free(vbasedev
->migration_blocker
);
913 vbasedev
->migration_blocker
= NULL
;
918 void vfio_migration_finalize(VFIODevice
*vbasedev
)
920 if (vbasedev
->migration
) {
921 VFIOMigration
*migration
= vbasedev
->migration
;
923 remove_migration_state_change_notifier(&migration
->migration_state
);
924 qemu_del_vm_change_state_handler(migration
->vm_state
);
925 vfio_migration_exit(vbasedev
);
928 if (vbasedev
->migration_blocker
) {
929 migrate_del_blocker(vbasedev
->migration_blocker
);
930 error_free(vbasedev
->migration_blocker
);
931 vbasedev
->migration_blocker
= NULL
;