2 * Migration support for VFIO devices
4 * Copyright NVIDIA, Inc. 2020
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
11 #include "qemu/main-loop.h"
12 #include "qemu/cutils.h"
13 #include <linux/vfio.h>
14 #include <sys/ioctl.h>
16 #include "sysemu/runstate.h"
17 #include "hw/vfio/vfio-common.h"
18 #include "migration/migration.h"
19 #include "migration/vmstate.h"
20 #include "migration/qemu-file.h"
21 #include "migration/register.h"
22 #include "migration/blocker.h"
23 #include "migration/misc.h"
24 #include "qapi/error.h"
25 #include "exec/ramlist.h"
26 #include "exec/ram_addr.h"
32 * Flags to be used as unique delimiters for VFIO devices in the migration
33 * stream. These flags are composed as:
34 * 0xffffffff => MSB 32-bit all 1s
35 * 0xef10 => Magic ID, represents emulated (virtual) function IO
36 * 0x0000 => 16-bits reserved for flags
38 * The beginning of state information is marked by _DEV_CONFIG_STATE,
39 * _DEV_SETUP_STATE, or _DEV_DATA_STATE, respectively. The end of a
40 * certain state information is marked by _END_OF_STATE.
42 #define VFIO_MIG_FLAG_END_OF_STATE (0xffffffffef100001ULL)
43 #define VFIO_MIG_FLAG_DEV_CONFIG_STATE (0xffffffffef100002ULL)
44 #define VFIO_MIG_FLAG_DEV_SETUP_STATE (0xffffffffef100003ULL)
45 #define VFIO_MIG_FLAG_DEV_DATA_STATE (0xffffffffef100004ULL)
47 static int64_t bytes_transferred
;
49 static inline int vfio_mig_access(VFIODevice
*vbasedev
, void *val
, int count
,
50 off_t off
, bool iswrite
)
54 ret
= iswrite
? pwrite(vbasedev
->fd
, val
, count
, off
) :
55 pread(vbasedev
->fd
, val
, count
, off
);
57 error_report("vfio_mig_%s %d byte %s: failed at offset 0x%"
58 HWADDR_PRIx
", err: %s", iswrite
? "write" : "read", count
,
59 vbasedev
->name
, off
, strerror(errno
));
60 return (ret
< 0) ? ret
: -EINVAL
;
65 static int vfio_mig_rw(VFIODevice
*vbasedev
, __u8
*buf
, size_t count
,
66 off_t off
, bool iswrite
)
74 if (count
>= 8 && !(off
% 8)) {
76 } else if (count
>= 4 && !(off
% 4)) {
78 } else if (count
>= 2 && !(off
% 2)) {
84 ret
= vfio_mig_access(vbasedev
, tbuf
, bytes
, off
, iswrite
);
97 #define vfio_mig_read(f, v, c, o) vfio_mig_rw(f, (__u8 *)v, c, o, false)
98 #define vfio_mig_write(f, v, c, o) vfio_mig_rw(f, (__u8 *)v, c, o, true)
100 #define VFIO_MIG_STRUCT_OFFSET(f) \
101 offsetof(struct vfio_device_migration_info, f)
103 * Change the device_state register for device @vbasedev. Bits set in @mask
104 * are preserved, bits set in @value are set, and bits not set in either @mask
105 * or @value are cleared in device_state. If the register cannot be accessed,
106 * the resulting state would be invalid, or the device enters an error state,
107 * an error is returned.
110 static int vfio_migration_set_state(VFIODevice
*vbasedev
, uint32_t mask
,
113 VFIOMigration
*migration
= vbasedev
->migration
;
114 VFIORegion
*region
= &migration
->region
;
115 off_t dev_state_off
= region
->fd_offset
+
116 VFIO_MIG_STRUCT_OFFSET(device_state
);
117 uint32_t device_state
;
120 ret
= vfio_mig_read(vbasedev
, &device_state
, sizeof(device_state
),
126 device_state
= (device_state
& mask
) | value
;
128 if (!VFIO_DEVICE_STATE_VALID(device_state
)) {
132 ret
= vfio_mig_write(vbasedev
, &device_state
, sizeof(device_state
),
137 rret
= vfio_mig_read(vbasedev
, &device_state
, sizeof(device_state
),
140 if ((rret
< 0) || (VFIO_DEVICE_STATE_IS_ERROR(device_state
))) {
141 hw_error("%s: Device in error state 0x%x", vbasedev
->name
,
143 return rret
? rret
: -EIO
;
148 migration
->device_state
= device_state
;
149 trace_vfio_migration_set_state(vbasedev
->name
, device_state
);
153 static void *get_data_section_size(VFIORegion
*region
, uint64_t data_offset
,
154 uint64_t data_size
, uint64_t *size
)
160 if (!region
->mmaps
) {
162 *size
= MIN(data_size
, region
->size
- data_offset
);
167 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
168 VFIOMmap
*map
= region
->mmaps
+ i
;
170 if ((data_offset
>= map
->offset
) &&
171 (data_offset
< map
->offset
+ map
->size
)) {
173 /* check if data_offset is within sparse mmap areas */
174 ptr
= map
->mmap
+ data_offset
- map
->offset
;
176 *size
= MIN(data_size
, map
->offset
+ map
->size
- data_offset
);
179 } else if ((data_offset
< map
->offset
) &&
180 (!limit
|| limit
> map
->offset
)) {
182 * data_offset is not within sparse mmap areas, find size of
183 * non-mapped area. Check through all list since region->mmaps list
191 *size
= limit
? MIN(data_size
, limit
- data_offset
) : data_size
;
196 static int vfio_save_buffer(QEMUFile
*f
, VFIODevice
*vbasedev
, uint64_t *size
)
198 VFIOMigration
*migration
= vbasedev
->migration
;
199 VFIORegion
*region
= &migration
->region
;
200 uint64_t data_offset
= 0, data_size
= 0, sz
;
203 ret
= vfio_mig_read(vbasedev
, &data_offset
, sizeof(data_offset
),
204 region
->fd_offset
+ VFIO_MIG_STRUCT_OFFSET(data_offset
));
209 ret
= vfio_mig_read(vbasedev
, &data_size
, sizeof(data_size
),
210 region
->fd_offset
+ VFIO_MIG_STRUCT_OFFSET(data_size
));
215 trace_vfio_save_buffer(vbasedev
->name
, data_offset
, data_size
,
216 migration
->pending_bytes
);
218 qemu_put_be64(f
, data_size
);
224 bool buf_allocated
= false;
226 buf
= get_data_section_size(region
, data_offset
, sz
, &sec_size
);
229 buf
= g_try_malloc(sec_size
);
231 error_report("%s: Error allocating buffer ", __func__
);
234 buf_allocated
= true;
236 ret
= vfio_mig_read(vbasedev
, buf
, sec_size
,
237 region
->fd_offset
+ data_offset
);
244 qemu_put_buffer(f
, buf
, sec_size
);
250 data_offset
+= sec_size
;
253 ret
= qemu_file_get_error(f
);
259 bytes_transferred
+= data_size
;
263 static int vfio_load_buffer(QEMUFile
*f
, VFIODevice
*vbasedev
,
266 VFIORegion
*region
= &vbasedev
->migration
->region
;
267 uint64_t data_offset
= 0, size
, report_size
;
271 ret
= vfio_mig_read(vbasedev
, &data_offset
, sizeof(data_offset
),
272 region
->fd_offset
+ VFIO_MIG_STRUCT_OFFSET(data_offset
));
277 if (data_offset
+ data_size
> region
->size
) {
279 * If data_size is greater than the data section of migration region
280 * then iterate the write buffer operation. This case can occur if
281 * size of migration region at destination is smaller than size of
282 * migration region at source.
284 report_size
= size
= region
->size
- data_offset
;
287 report_size
= size
= data_size
;
291 trace_vfio_load_state_device_data(vbasedev
->name
, data_offset
, size
);
296 bool buf_alloc
= false;
298 buf
= get_data_section_size(region
, data_offset
, size
, &sec_size
);
301 buf
= g_try_malloc(sec_size
);
303 error_report("%s: Error allocating buffer ", __func__
);
309 qemu_get_buffer(f
, buf
, sec_size
);
312 ret
= vfio_mig_write(vbasedev
, buf
, sec_size
,
313 region
->fd_offset
+ data_offset
);
321 data_offset
+= sec_size
;
324 ret
= vfio_mig_write(vbasedev
, &report_size
, sizeof(report_size
),
325 region
->fd_offset
+ VFIO_MIG_STRUCT_OFFSET(data_size
));
334 static int vfio_update_pending(VFIODevice
*vbasedev
)
336 VFIOMigration
*migration
= vbasedev
->migration
;
337 VFIORegion
*region
= &migration
->region
;
338 uint64_t pending_bytes
= 0;
341 ret
= vfio_mig_read(vbasedev
, &pending_bytes
, sizeof(pending_bytes
),
342 region
->fd_offset
+ VFIO_MIG_STRUCT_OFFSET(pending_bytes
));
344 migration
->pending_bytes
= 0;
348 migration
->pending_bytes
= pending_bytes
;
349 trace_vfio_update_pending(vbasedev
->name
, pending_bytes
);
353 static int vfio_save_device_config_state(QEMUFile
*f
, void *opaque
)
355 VFIODevice
*vbasedev
= opaque
;
357 qemu_put_be64(f
, VFIO_MIG_FLAG_DEV_CONFIG_STATE
);
359 if (vbasedev
->ops
&& vbasedev
->ops
->vfio_save_config
) {
360 vbasedev
->ops
->vfio_save_config(vbasedev
, f
);
363 qemu_put_be64(f
, VFIO_MIG_FLAG_END_OF_STATE
);
365 trace_vfio_save_device_config_state(vbasedev
->name
);
367 return qemu_file_get_error(f
);
370 static int vfio_load_device_config_state(QEMUFile
*f
, void *opaque
)
372 VFIODevice
*vbasedev
= opaque
;
375 if (vbasedev
->ops
&& vbasedev
->ops
->vfio_load_config
) {
378 ret
= vbasedev
->ops
->vfio_load_config(vbasedev
, f
);
380 error_report("%s: Failed to load device config space",
386 data
= qemu_get_be64(f
);
387 if (data
!= VFIO_MIG_FLAG_END_OF_STATE
) {
388 error_report("%s: Failed loading device config space, "
389 "end flag incorrect 0x%"PRIx64
, vbasedev
->name
, data
);
393 trace_vfio_load_device_config_state(vbasedev
->name
);
394 return qemu_file_get_error(f
);
397 static void vfio_migration_cleanup(VFIODevice
*vbasedev
)
399 VFIOMigration
*migration
= vbasedev
->migration
;
401 if (migration
->region
.mmaps
) {
402 vfio_region_unmap(&migration
->region
);
406 /* ---------------------------------------------------------------------- */
408 static int vfio_save_setup(QEMUFile
*f
, void *opaque
)
410 VFIODevice
*vbasedev
= opaque
;
411 VFIOMigration
*migration
= vbasedev
->migration
;
414 trace_vfio_save_setup(vbasedev
->name
);
416 qemu_put_be64(f
, VFIO_MIG_FLAG_DEV_SETUP_STATE
);
418 if (migration
->region
.mmaps
) {
420 * Calling vfio_region_mmap() from migration thread. Memory API called
421 * from this function require locking the iothread when called from
422 * outside the main loop thread.
424 qemu_mutex_lock_iothread();
425 ret
= vfio_region_mmap(&migration
->region
);
426 qemu_mutex_unlock_iothread();
428 error_report("%s: Failed to mmap VFIO migration region: %s",
429 vbasedev
->name
, strerror(-ret
));
430 error_report("%s: Falling back to slow path", vbasedev
->name
);
434 ret
= vfio_migration_set_state(vbasedev
, VFIO_DEVICE_STATE_MASK
,
435 VFIO_DEVICE_STATE_V1_SAVING
);
437 error_report("%s: Failed to set state SAVING", vbasedev
->name
);
441 qemu_put_be64(f
, VFIO_MIG_FLAG_END_OF_STATE
);
443 ret
= qemu_file_get_error(f
);
451 static void vfio_save_cleanup(void *opaque
)
453 VFIODevice
*vbasedev
= opaque
;
455 vfio_migration_cleanup(vbasedev
);
456 trace_vfio_save_cleanup(vbasedev
->name
);
459 static void vfio_save_pending(QEMUFile
*f
, void *opaque
,
460 uint64_t threshold_size
,
461 uint64_t *res_precopy_only
,
462 uint64_t *res_compatible
,
463 uint64_t *res_postcopy_only
)
465 VFIODevice
*vbasedev
= opaque
;
466 VFIOMigration
*migration
= vbasedev
->migration
;
469 ret
= vfio_update_pending(vbasedev
);
474 *res_precopy_only
+= migration
->pending_bytes
;
476 trace_vfio_save_pending(vbasedev
->name
, *res_precopy_only
,
477 *res_postcopy_only
, *res_compatible
);
480 static int vfio_save_iterate(QEMUFile
*f
, void *opaque
)
482 VFIODevice
*vbasedev
= opaque
;
483 VFIOMigration
*migration
= vbasedev
->migration
;
487 qemu_put_be64(f
, VFIO_MIG_FLAG_DEV_DATA_STATE
);
489 if (migration
->pending_bytes
== 0) {
490 ret
= vfio_update_pending(vbasedev
);
495 if (migration
->pending_bytes
== 0) {
497 qemu_put_be64(f
, VFIO_MIG_FLAG_END_OF_STATE
);
498 /* indicates data finished, goto complete phase */
503 ret
= vfio_save_buffer(f
, vbasedev
, &data_size
);
505 error_report("%s: vfio_save_buffer failed %s", vbasedev
->name
,
510 qemu_put_be64(f
, VFIO_MIG_FLAG_END_OF_STATE
);
512 ret
= qemu_file_get_error(f
);
518 * Reset pending_bytes as .save_live_pending is not called during savevm or
519 * snapshot case, in such case vfio_update_pending() at the start of this
520 * function updates pending_bytes.
522 migration
->pending_bytes
= 0;
523 trace_vfio_save_iterate(vbasedev
->name
, data_size
);
527 static int vfio_save_complete_precopy(QEMUFile
*f
, void *opaque
)
529 VFIODevice
*vbasedev
= opaque
;
530 VFIOMigration
*migration
= vbasedev
->migration
;
534 ret
= vfio_migration_set_state(vbasedev
, ~VFIO_DEVICE_STATE_V1_RUNNING
,
535 VFIO_DEVICE_STATE_V1_SAVING
);
537 error_report("%s: Failed to set state STOP and SAVING",
542 ret
= vfio_update_pending(vbasedev
);
547 while (migration
->pending_bytes
> 0) {
548 qemu_put_be64(f
, VFIO_MIG_FLAG_DEV_DATA_STATE
);
549 ret
= vfio_save_buffer(f
, vbasedev
, &data_size
);
551 error_report("%s: Failed to save buffer", vbasedev
->name
);
555 if (data_size
== 0) {
559 ret
= vfio_update_pending(vbasedev
);
565 qemu_put_be64(f
, VFIO_MIG_FLAG_END_OF_STATE
);
567 ret
= qemu_file_get_error(f
);
572 ret
= vfio_migration_set_state(vbasedev
, ~VFIO_DEVICE_STATE_V1_SAVING
, 0);
574 error_report("%s: Failed to set state STOPPED", vbasedev
->name
);
578 trace_vfio_save_complete_precopy(vbasedev
->name
);
582 static void vfio_save_state(QEMUFile
*f
, void *opaque
)
584 VFIODevice
*vbasedev
= opaque
;
587 ret
= vfio_save_device_config_state(f
, opaque
);
589 error_report("%s: Failed to save device config space",
591 qemu_file_set_error(f
, ret
);
595 static int vfio_load_setup(QEMUFile
*f
, void *opaque
)
597 VFIODevice
*vbasedev
= opaque
;
598 VFIOMigration
*migration
= vbasedev
->migration
;
601 if (migration
->region
.mmaps
) {
602 ret
= vfio_region_mmap(&migration
->region
);
604 error_report("%s: Failed to mmap VFIO migration region %d: %s",
605 vbasedev
->name
, migration
->region
.nr
,
607 error_report("%s: Falling back to slow path", vbasedev
->name
);
611 ret
= vfio_migration_set_state(vbasedev
, ~VFIO_DEVICE_STATE_MASK
,
612 VFIO_DEVICE_STATE_V1_RESUMING
);
614 error_report("%s: Failed to set state RESUMING", vbasedev
->name
);
615 if (migration
->region
.mmaps
) {
616 vfio_region_unmap(&migration
->region
);
622 static int vfio_load_cleanup(void *opaque
)
624 VFIODevice
*vbasedev
= opaque
;
626 vfio_migration_cleanup(vbasedev
);
627 trace_vfio_load_cleanup(vbasedev
->name
);
631 static int vfio_load_state(QEMUFile
*f
, void *opaque
, int version_id
)
633 VFIODevice
*vbasedev
= opaque
;
637 data
= qemu_get_be64(f
);
638 while (data
!= VFIO_MIG_FLAG_END_OF_STATE
) {
640 trace_vfio_load_state(vbasedev
->name
, data
);
643 case VFIO_MIG_FLAG_DEV_CONFIG_STATE
:
645 return vfio_load_device_config_state(f
, opaque
);
647 case VFIO_MIG_FLAG_DEV_SETUP_STATE
:
649 data
= qemu_get_be64(f
);
650 if (data
== VFIO_MIG_FLAG_END_OF_STATE
) {
653 error_report("%s: SETUP STATE: EOS not found 0x%"PRIx64
,
654 vbasedev
->name
, data
);
659 case VFIO_MIG_FLAG_DEV_DATA_STATE
:
661 uint64_t data_size
= qemu_get_be64(f
);
664 ret
= vfio_load_buffer(f
, vbasedev
, data_size
);
672 error_report("%s: Unknown tag 0x%"PRIx64
, vbasedev
->name
, data
);
676 data
= qemu_get_be64(f
);
677 ret
= qemu_file_get_error(f
);
685 static SaveVMHandlers savevm_vfio_handlers
= {
686 .save_setup
= vfio_save_setup
,
687 .save_cleanup
= vfio_save_cleanup
,
688 .save_live_pending
= vfio_save_pending
,
689 .save_live_iterate
= vfio_save_iterate
,
690 .save_live_complete_precopy
= vfio_save_complete_precopy
,
691 .save_state
= vfio_save_state
,
692 .load_setup
= vfio_load_setup
,
693 .load_cleanup
= vfio_load_cleanup
,
694 .load_state
= vfio_load_state
,
697 /* ---------------------------------------------------------------------- */
699 static void vfio_vmstate_change(void *opaque
, bool running
, RunState state
)
701 VFIODevice
*vbasedev
= opaque
;
702 VFIOMigration
*migration
= vbasedev
->migration
;
703 uint32_t value
, mask
;
706 if (vbasedev
->migration
->vm_running
== running
) {
712 * Here device state can have one of _SAVING, _RESUMING or _STOP bit.
713 * Transition from _SAVING to _RUNNING can happen if there is migration
714 * failure, in that case clear _SAVING bit.
715 * Transition from _RESUMING to _RUNNING occurs during resuming
716 * phase, in that case clear _RESUMING bit.
717 * In both the above cases, set _RUNNING bit.
719 mask
= ~VFIO_DEVICE_STATE_MASK
;
720 value
= VFIO_DEVICE_STATE_V1_RUNNING
;
723 * Here device state could be either _RUNNING or _SAVING|_RUNNING. Reset
726 mask
= ~VFIO_DEVICE_STATE_V1_RUNNING
;
729 * When VM state transition to stop for savevm command, device should
732 if (state
== RUN_STATE_SAVE_VM
) {
733 value
= VFIO_DEVICE_STATE_V1_SAVING
;
739 ret
= vfio_migration_set_state(vbasedev
, mask
, value
);
742 * Migration should be aborted in this case, but vm_state_notify()
743 * currently does not support reporting failures.
745 error_report("%s: Failed to set device state 0x%x", vbasedev
->name
,
746 (migration
->device_state
& mask
) | value
);
747 qemu_file_set_error(migrate_get_current()->to_dst_file
, ret
);
749 vbasedev
->migration
->vm_running
= running
;
750 trace_vfio_vmstate_change(vbasedev
->name
, running
, RunState_str(state
),
751 (migration
->device_state
& mask
) | value
);
754 static void vfio_migration_state_notifier(Notifier
*notifier
, void *data
)
756 MigrationState
*s
= data
;
757 VFIOMigration
*migration
= container_of(notifier
, VFIOMigration
,
759 VFIODevice
*vbasedev
= migration
->vbasedev
;
762 trace_vfio_migration_state_notifier(vbasedev
->name
,
763 MigrationStatus_str(s
->state
));
766 case MIGRATION_STATUS_CANCELLING
:
767 case MIGRATION_STATUS_CANCELLED
:
768 case MIGRATION_STATUS_FAILED
:
769 bytes_transferred
= 0;
770 ret
= vfio_migration_set_state(vbasedev
,
771 ~(VFIO_DEVICE_STATE_V1_SAVING
|
772 VFIO_DEVICE_STATE_V1_RESUMING
),
773 VFIO_DEVICE_STATE_V1_RUNNING
);
775 error_report("%s: Failed to set state RUNNING", vbasedev
->name
);
780 static void vfio_migration_exit(VFIODevice
*vbasedev
)
782 VFIOMigration
*migration
= vbasedev
->migration
;
784 vfio_region_exit(&migration
->region
);
785 vfio_region_finalize(&migration
->region
);
786 g_free(vbasedev
->migration
);
787 vbasedev
->migration
= NULL
;
790 static int vfio_migration_init(VFIODevice
*vbasedev
,
791 struct vfio_region_info
*info
)
795 VFIOMigration
*migration
;
797 g_autofree
char *path
= NULL
, *oid
= NULL
;
799 if (!vbasedev
->ops
->vfio_get_object
) {
803 obj
= vbasedev
->ops
->vfio_get_object(vbasedev
);
808 vbasedev
->migration
= g_new0(VFIOMigration
, 1);
809 vbasedev
->migration
->device_state
= VFIO_DEVICE_STATE_V1_RUNNING
;
810 vbasedev
->migration
->vm_running
= runstate_is_running();
812 ret
= vfio_region_setup(obj
, vbasedev
, &vbasedev
->migration
->region
,
813 info
->index
, "migration");
815 error_report("%s: Failed to setup VFIO migration region %d: %s",
816 vbasedev
->name
, info
->index
, strerror(-ret
));
820 if (!vbasedev
->migration
->region
.size
) {
821 error_report("%s: Invalid zero-sized VFIO migration region %d",
822 vbasedev
->name
, info
->index
);
827 migration
= vbasedev
->migration
;
828 migration
->vbasedev
= vbasedev
;
830 oid
= vmstate_if_get_id(VMSTATE_IF(DEVICE(obj
)));
832 path
= g_strdup_printf("%s/vfio", oid
);
834 path
= g_strdup("vfio");
836 strpadcpy(id
, sizeof(id
), path
, '\0');
838 register_savevm_live(id
, VMSTATE_INSTANCE_ID_ANY
, 1, &savevm_vfio_handlers
,
841 migration
->vm_state
= qdev_add_vm_change_state_handler(vbasedev
->dev
,
844 migration
->migration_state
.notify
= vfio_migration_state_notifier
;
845 add_migration_state_change_notifier(&migration
->migration_state
);
849 vfio_migration_exit(vbasedev
);
853 /* ---------------------------------------------------------------------- */
855 int64_t vfio_mig_bytes_transferred(void)
857 return bytes_transferred
;
860 int vfio_migration_probe(VFIODevice
*vbasedev
, Error
**errp
)
862 VFIOContainer
*container
= vbasedev
->group
->container
;
863 struct vfio_region_info
*info
= NULL
;
866 if (!vbasedev
->enable_migration
|| !container
->dirty_pages_supported
) {
870 ret
= vfio_get_dev_region_info(vbasedev
,
871 VFIO_REGION_TYPE_MIGRATION_DEPRECATED
,
872 VFIO_REGION_SUBTYPE_MIGRATION_DEPRECATED
,
878 ret
= vfio_migration_init(vbasedev
, info
);
883 trace_vfio_migration_probe(vbasedev
->name
, info
->index
);
888 error_setg(&vbasedev
->migration_blocker
,
889 "VFIO device doesn't support migration");
892 ret
= migrate_add_blocker(vbasedev
->migration_blocker
, errp
);
894 error_free(vbasedev
->migration_blocker
);
895 vbasedev
->migration_blocker
= NULL
;
900 void vfio_migration_finalize(VFIODevice
*vbasedev
)
902 if (vbasedev
->migration
) {
903 VFIOMigration
*migration
= vbasedev
->migration
;
905 remove_migration_state_change_notifier(&migration
->migration_state
);
906 qemu_del_vm_change_state_handler(migration
->vm_state
);
907 unregister_savevm(VMSTATE_IF(vbasedev
->dev
), "vfio", vbasedev
);
908 vfio_migration_exit(vbasedev
);
911 if (vbasedev
->migration_blocker
) {
912 migrate_del_blocker(vbasedev
->migration_blocker
);
913 error_free(vbasedev
->migration_blocker
);
914 vbasedev
->migration_blocker
= NULL
;