2 * Migration support for VFIO devices
4 * Copyright NVIDIA, Inc. 2020
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
11 #include "qemu/main-loop.h"
12 #include "qemu/cutils.h"
13 #include <linux/vfio.h>
14 #include <sys/ioctl.h>
16 #include "sysemu/runstate.h"
17 #include "hw/vfio/vfio-common.h"
19 #include "migration/migration.h"
20 #include "migration/vmstate.h"
21 #include "migration/qemu-file.h"
22 #include "migration/register.h"
23 #include "migration/blocker.h"
24 #include "migration/misc.h"
25 #include "qapi/error.h"
26 #include "exec/ramlist.h"
27 #include "exec/ram_addr.h"
33 * Flags to be used as unique delimiters for VFIO devices in the migration
34 * stream. These flags are composed as:
35 * 0xffffffff => MSB 32-bit all 1s
36 * 0xef10 => Magic ID, represents emulated (virtual) function IO
37 * 0x0000 => 16-bits reserved for flags
39 * The beginning of state information is marked by _DEV_CONFIG_STATE,
40 * _DEV_SETUP_STATE, or _DEV_DATA_STATE, respectively. The end of a
41 * certain state information is marked by _END_OF_STATE.
43 #define VFIO_MIG_FLAG_END_OF_STATE (0xffffffffef100001ULL)
44 #define VFIO_MIG_FLAG_DEV_CONFIG_STATE (0xffffffffef100002ULL)
45 #define VFIO_MIG_FLAG_DEV_SETUP_STATE (0xffffffffef100003ULL)
46 #define VFIO_MIG_FLAG_DEV_DATA_STATE (0xffffffffef100004ULL)
48 static int64_t bytes_transferred
;
50 static inline int vfio_mig_access(VFIODevice
*vbasedev
, void *val
, int count
,
51 off_t off
, bool iswrite
)
55 ret
= iswrite
? pwrite(vbasedev
->fd
, val
, count
, off
) :
56 pread(vbasedev
->fd
, val
, count
, off
);
58 error_report("vfio_mig_%s %d byte %s: failed at offset 0x%"
59 HWADDR_PRIx
", err: %s", iswrite
? "write" : "read", count
,
60 vbasedev
->name
, off
, strerror(errno
));
61 return (ret
< 0) ? ret
: -EINVAL
;
66 static int vfio_mig_rw(VFIODevice
*vbasedev
, __u8
*buf
, size_t count
,
67 off_t off
, bool iswrite
)
75 if (count
>= 8 && !(off
% 8)) {
77 } else if (count
>= 4 && !(off
% 4)) {
79 } else if (count
>= 2 && !(off
% 2)) {
85 ret
= vfio_mig_access(vbasedev
, tbuf
, bytes
, off
, iswrite
);
98 #define vfio_mig_read(f, v, c, o) vfio_mig_rw(f, (__u8 *)v, c, o, false)
99 #define vfio_mig_write(f, v, c, o) vfio_mig_rw(f, (__u8 *)v, c, o, true)
101 #define VFIO_MIG_STRUCT_OFFSET(f) \
102 offsetof(struct vfio_device_migration_info, f)
104 * Change the device_state register for device @vbasedev. Bits set in @mask
105 * are preserved, bits set in @value are set, and bits not set in either @mask
106 * or @value are cleared in device_state. If the register cannot be accessed,
107 * the resulting state would be invalid, or the device enters an error state,
108 * an error is returned.
111 static int vfio_migration_set_state(VFIODevice
*vbasedev
, uint32_t mask
,
114 VFIOMigration
*migration
= vbasedev
->migration
;
115 VFIORegion
*region
= &migration
->region
;
116 off_t dev_state_off
= region
->fd_offset
+
117 VFIO_MIG_STRUCT_OFFSET(device_state
);
118 uint32_t device_state
;
121 ret
= vfio_mig_read(vbasedev
, &device_state
, sizeof(device_state
),
127 device_state
= (device_state
& mask
) | value
;
129 if (!VFIO_DEVICE_STATE_VALID(device_state
)) {
133 ret
= vfio_mig_write(vbasedev
, &device_state
, sizeof(device_state
),
138 rret
= vfio_mig_read(vbasedev
, &device_state
, sizeof(device_state
),
141 if ((rret
< 0) || (VFIO_DEVICE_STATE_IS_ERROR(device_state
))) {
142 hw_error("%s: Device in error state 0x%x", vbasedev
->name
,
144 return rret
? rret
: -EIO
;
149 migration
->device_state
= device_state
;
150 trace_vfio_migration_set_state(vbasedev
->name
, device_state
);
154 static void *get_data_section_size(VFIORegion
*region
, uint64_t data_offset
,
155 uint64_t data_size
, uint64_t *size
)
161 if (!region
->mmaps
) {
163 *size
= MIN(data_size
, region
->size
- data_offset
);
168 for (i
= 0; i
< region
->nr_mmaps
; i
++) {
169 VFIOMmap
*map
= region
->mmaps
+ i
;
171 if ((data_offset
>= map
->offset
) &&
172 (data_offset
< map
->offset
+ map
->size
)) {
174 /* check if data_offset is within sparse mmap areas */
175 ptr
= map
->mmap
+ data_offset
- map
->offset
;
177 *size
= MIN(data_size
, map
->offset
+ map
->size
- data_offset
);
180 } else if ((data_offset
< map
->offset
) &&
181 (!limit
|| limit
> map
->offset
)) {
183 * data_offset is not within sparse mmap areas, find size of
184 * non-mapped area. Check through all list since region->mmaps list
192 *size
= limit
? MIN(data_size
, limit
- data_offset
) : data_size
;
197 static int vfio_save_buffer(QEMUFile
*f
, VFIODevice
*vbasedev
, uint64_t *size
)
199 VFIOMigration
*migration
= vbasedev
->migration
;
200 VFIORegion
*region
= &migration
->region
;
201 uint64_t data_offset
= 0, data_size
= 0, sz
;
204 ret
= vfio_mig_read(vbasedev
, &data_offset
, sizeof(data_offset
),
205 region
->fd_offset
+ VFIO_MIG_STRUCT_OFFSET(data_offset
));
210 ret
= vfio_mig_read(vbasedev
, &data_size
, sizeof(data_size
),
211 region
->fd_offset
+ VFIO_MIG_STRUCT_OFFSET(data_size
));
216 trace_vfio_save_buffer(vbasedev
->name
, data_offset
, data_size
,
217 migration
->pending_bytes
);
219 qemu_put_be64(f
, data_size
);
225 bool buf_allocated
= false;
227 buf
= get_data_section_size(region
, data_offset
, sz
, &sec_size
);
230 buf
= g_try_malloc(sec_size
);
232 error_report("%s: Error allocating buffer ", __func__
);
235 buf_allocated
= true;
237 ret
= vfio_mig_read(vbasedev
, buf
, sec_size
,
238 region
->fd_offset
+ data_offset
);
245 qemu_put_buffer(f
, buf
, sec_size
);
251 data_offset
+= sec_size
;
254 ret
= qemu_file_get_error(f
);
260 bytes_transferred
+= data_size
;
264 static int vfio_load_buffer(QEMUFile
*f
, VFIODevice
*vbasedev
,
267 VFIORegion
*region
= &vbasedev
->migration
->region
;
268 uint64_t data_offset
= 0, size
, report_size
;
272 ret
= vfio_mig_read(vbasedev
, &data_offset
, sizeof(data_offset
),
273 region
->fd_offset
+ VFIO_MIG_STRUCT_OFFSET(data_offset
));
278 if (data_offset
+ data_size
> region
->size
) {
280 * If data_size is greater than the data section of migration region
281 * then iterate the write buffer operation. This case can occur if
282 * size of migration region at destination is smaller than size of
283 * migration region at source.
285 report_size
= size
= region
->size
- data_offset
;
288 report_size
= size
= data_size
;
292 trace_vfio_load_state_device_data(vbasedev
->name
, data_offset
, size
);
297 bool buf_alloc
= false;
299 buf
= get_data_section_size(region
, data_offset
, size
, &sec_size
);
302 buf
= g_try_malloc(sec_size
);
304 error_report("%s: Error allocating buffer ", __func__
);
310 qemu_get_buffer(f
, buf
, sec_size
);
313 ret
= vfio_mig_write(vbasedev
, buf
, sec_size
,
314 region
->fd_offset
+ data_offset
);
322 data_offset
+= sec_size
;
325 ret
= vfio_mig_write(vbasedev
, &report_size
, sizeof(report_size
),
326 region
->fd_offset
+ VFIO_MIG_STRUCT_OFFSET(data_size
));
335 static int vfio_update_pending(VFIODevice
*vbasedev
)
337 VFIOMigration
*migration
= vbasedev
->migration
;
338 VFIORegion
*region
= &migration
->region
;
339 uint64_t pending_bytes
= 0;
342 ret
= vfio_mig_read(vbasedev
, &pending_bytes
, sizeof(pending_bytes
),
343 region
->fd_offset
+ VFIO_MIG_STRUCT_OFFSET(pending_bytes
));
345 migration
->pending_bytes
= 0;
349 migration
->pending_bytes
= pending_bytes
;
350 trace_vfio_update_pending(vbasedev
->name
, pending_bytes
);
354 static int vfio_save_device_config_state(QEMUFile
*f
, void *opaque
)
356 VFIODevice
*vbasedev
= opaque
;
358 qemu_put_be64(f
, VFIO_MIG_FLAG_DEV_CONFIG_STATE
);
360 if (vbasedev
->ops
&& vbasedev
->ops
->vfio_save_config
) {
361 vbasedev
->ops
->vfio_save_config(vbasedev
, f
);
364 qemu_put_be64(f
, VFIO_MIG_FLAG_END_OF_STATE
);
366 trace_vfio_save_device_config_state(vbasedev
->name
);
368 return qemu_file_get_error(f
);
371 static int vfio_load_device_config_state(QEMUFile
*f
, void *opaque
)
373 VFIODevice
*vbasedev
= opaque
;
376 if (vbasedev
->ops
&& vbasedev
->ops
->vfio_load_config
) {
379 ret
= vbasedev
->ops
->vfio_load_config(vbasedev
, f
);
381 error_report("%s: Failed to load device config space",
387 data
= qemu_get_be64(f
);
388 if (data
!= VFIO_MIG_FLAG_END_OF_STATE
) {
389 error_report("%s: Failed loading device config space, "
390 "end flag incorrect 0x%"PRIx64
, vbasedev
->name
, data
);
394 trace_vfio_load_device_config_state(vbasedev
->name
);
395 return qemu_file_get_error(f
);
398 static void vfio_migration_cleanup(VFIODevice
*vbasedev
)
400 VFIOMigration
*migration
= vbasedev
->migration
;
402 if (migration
->region
.mmaps
) {
403 vfio_region_unmap(&migration
->region
);
407 /* ---------------------------------------------------------------------- */
409 static int vfio_save_setup(QEMUFile
*f
, void *opaque
)
411 VFIODevice
*vbasedev
= opaque
;
412 VFIOMigration
*migration
= vbasedev
->migration
;
415 trace_vfio_save_setup(vbasedev
->name
);
417 qemu_put_be64(f
, VFIO_MIG_FLAG_DEV_SETUP_STATE
);
419 if (migration
->region
.mmaps
) {
421 * Calling vfio_region_mmap() from migration thread. Memory API called
422 * from this function require locking the iothread when called from
423 * outside the main loop thread.
425 qemu_mutex_lock_iothread();
426 ret
= vfio_region_mmap(&migration
->region
);
427 qemu_mutex_unlock_iothread();
429 error_report("%s: Failed to mmap VFIO migration region: %s",
430 vbasedev
->name
, strerror(-ret
));
431 error_report("%s: Falling back to slow path", vbasedev
->name
);
435 ret
= vfio_migration_set_state(vbasedev
, VFIO_DEVICE_STATE_MASK
,
436 VFIO_DEVICE_STATE_SAVING
);
438 error_report("%s: Failed to set state SAVING", vbasedev
->name
);
442 qemu_put_be64(f
, VFIO_MIG_FLAG_END_OF_STATE
);
444 ret
= qemu_file_get_error(f
);
452 static void vfio_save_cleanup(void *opaque
)
454 VFIODevice
*vbasedev
= opaque
;
456 vfio_migration_cleanup(vbasedev
);
457 trace_vfio_save_cleanup(vbasedev
->name
);
460 static void vfio_save_pending(QEMUFile
*f
, void *opaque
,
461 uint64_t threshold_size
,
462 uint64_t *res_precopy_only
,
463 uint64_t *res_compatible
,
464 uint64_t *res_postcopy_only
)
466 VFIODevice
*vbasedev
= opaque
;
467 VFIOMigration
*migration
= vbasedev
->migration
;
470 ret
= vfio_update_pending(vbasedev
);
475 *res_precopy_only
+= migration
->pending_bytes
;
477 trace_vfio_save_pending(vbasedev
->name
, *res_precopy_only
,
478 *res_postcopy_only
, *res_compatible
);
481 static int vfio_save_iterate(QEMUFile
*f
, void *opaque
)
483 VFIODevice
*vbasedev
= opaque
;
484 VFIOMigration
*migration
= vbasedev
->migration
;
488 qemu_put_be64(f
, VFIO_MIG_FLAG_DEV_DATA_STATE
);
490 if (migration
->pending_bytes
== 0) {
491 ret
= vfio_update_pending(vbasedev
);
496 if (migration
->pending_bytes
== 0) {
498 qemu_put_be64(f
, VFIO_MIG_FLAG_END_OF_STATE
);
499 /* indicates data finished, goto complete phase */
504 ret
= vfio_save_buffer(f
, vbasedev
, &data_size
);
506 error_report("%s: vfio_save_buffer failed %s", vbasedev
->name
,
511 qemu_put_be64(f
, VFIO_MIG_FLAG_END_OF_STATE
);
513 ret
= qemu_file_get_error(f
);
519 * Reset pending_bytes as .save_live_pending is not called during savevm or
520 * snapshot case, in such case vfio_update_pending() at the start of this
521 * function updates pending_bytes.
523 migration
->pending_bytes
= 0;
524 trace_vfio_save_iterate(vbasedev
->name
, data_size
);
528 static int vfio_save_complete_precopy(QEMUFile
*f
, void *opaque
)
530 VFIODevice
*vbasedev
= opaque
;
531 VFIOMigration
*migration
= vbasedev
->migration
;
535 ret
= vfio_migration_set_state(vbasedev
, ~VFIO_DEVICE_STATE_RUNNING
,
536 VFIO_DEVICE_STATE_SAVING
);
538 error_report("%s: Failed to set state STOP and SAVING",
543 ret
= vfio_update_pending(vbasedev
);
548 while (migration
->pending_bytes
> 0) {
549 qemu_put_be64(f
, VFIO_MIG_FLAG_DEV_DATA_STATE
);
550 ret
= vfio_save_buffer(f
, vbasedev
, &data_size
);
552 error_report("%s: Failed to save buffer", vbasedev
->name
);
556 if (data_size
== 0) {
560 ret
= vfio_update_pending(vbasedev
);
566 qemu_put_be64(f
, VFIO_MIG_FLAG_END_OF_STATE
);
568 ret
= qemu_file_get_error(f
);
573 ret
= vfio_migration_set_state(vbasedev
, ~VFIO_DEVICE_STATE_SAVING
, 0);
575 error_report("%s: Failed to set state STOPPED", vbasedev
->name
);
579 trace_vfio_save_complete_precopy(vbasedev
->name
);
583 static void vfio_save_state(QEMUFile
*f
, void *opaque
)
585 VFIODevice
*vbasedev
= opaque
;
588 ret
= vfio_save_device_config_state(f
, opaque
);
590 error_report("%s: Failed to save device config space",
592 qemu_file_set_error(f
, ret
);
596 static int vfio_load_setup(QEMUFile
*f
, void *opaque
)
598 VFIODevice
*vbasedev
= opaque
;
599 VFIOMigration
*migration
= vbasedev
->migration
;
602 if (migration
->region
.mmaps
) {
603 ret
= vfio_region_mmap(&migration
->region
);
605 error_report("%s: Failed to mmap VFIO migration region %d: %s",
606 vbasedev
->name
, migration
->region
.nr
,
608 error_report("%s: Falling back to slow path", vbasedev
->name
);
612 ret
= vfio_migration_set_state(vbasedev
, ~VFIO_DEVICE_STATE_MASK
,
613 VFIO_DEVICE_STATE_RESUMING
);
615 error_report("%s: Failed to set state RESUMING", vbasedev
->name
);
616 if (migration
->region
.mmaps
) {
617 vfio_region_unmap(&migration
->region
);
623 static int vfio_load_cleanup(void *opaque
)
625 VFIODevice
*vbasedev
= opaque
;
627 vfio_migration_cleanup(vbasedev
);
628 trace_vfio_load_cleanup(vbasedev
->name
);
632 static int vfio_load_state(QEMUFile
*f
, void *opaque
, int version_id
)
634 VFIODevice
*vbasedev
= opaque
;
638 data
= qemu_get_be64(f
);
639 while (data
!= VFIO_MIG_FLAG_END_OF_STATE
) {
641 trace_vfio_load_state(vbasedev
->name
, data
);
644 case VFIO_MIG_FLAG_DEV_CONFIG_STATE
:
646 return vfio_load_device_config_state(f
, opaque
);
648 case VFIO_MIG_FLAG_DEV_SETUP_STATE
:
650 data
= qemu_get_be64(f
);
651 if (data
== VFIO_MIG_FLAG_END_OF_STATE
) {
654 error_report("%s: SETUP STATE: EOS not found 0x%"PRIx64
,
655 vbasedev
->name
, data
);
660 case VFIO_MIG_FLAG_DEV_DATA_STATE
:
662 uint64_t data_size
= qemu_get_be64(f
);
665 ret
= vfio_load_buffer(f
, vbasedev
, data_size
);
673 error_report("%s: Unknown tag 0x%"PRIx64
, vbasedev
->name
, data
);
677 data
= qemu_get_be64(f
);
678 ret
= qemu_file_get_error(f
);
686 static SaveVMHandlers savevm_vfio_handlers
= {
687 .save_setup
= vfio_save_setup
,
688 .save_cleanup
= vfio_save_cleanup
,
689 .save_live_pending
= vfio_save_pending
,
690 .save_live_iterate
= vfio_save_iterate
,
691 .save_live_complete_precopy
= vfio_save_complete_precopy
,
692 .save_state
= vfio_save_state
,
693 .load_setup
= vfio_load_setup
,
694 .load_cleanup
= vfio_load_cleanup
,
695 .load_state
= vfio_load_state
,
698 /* ---------------------------------------------------------------------- */
700 static void vfio_vmstate_change(void *opaque
, bool running
, RunState state
)
702 VFIODevice
*vbasedev
= opaque
;
703 VFIOMigration
*migration
= vbasedev
->migration
;
704 uint32_t value
, mask
;
707 if (vbasedev
->migration
->vm_running
== running
) {
713 * Here device state can have one of _SAVING, _RESUMING or _STOP bit.
714 * Transition from _SAVING to _RUNNING can happen if there is migration
715 * failure, in that case clear _SAVING bit.
716 * Transition from _RESUMING to _RUNNING occurs during resuming
717 * phase, in that case clear _RESUMING bit.
718 * In both the above cases, set _RUNNING bit.
720 mask
= ~VFIO_DEVICE_STATE_MASK
;
721 value
= VFIO_DEVICE_STATE_RUNNING
;
724 * Here device state could be either _RUNNING or _SAVING|_RUNNING. Reset
727 mask
= ~VFIO_DEVICE_STATE_RUNNING
;
731 ret
= vfio_migration_set_state(vbasedev
, mask
, value
);
734 * Migration should be aborted in this case, but vm_state_notify()
735 * currently does not support reporting failures.
737 error_report("%s: Failed to set device state 0x%x", vbasedev
->name
,
738 (migration
->device_state
& mask
) | value
);
739 qemu_file_set_error(migrate_get_current()->to_dst_file
, ret
);
741 vbasedev
->migration
->vm_running
= running
;
742 trace_vfio_vmstate_change(vbasedev
->name
, running
, RunState_str(state
),
743 (migration
->device_state
& mask
) | value
);
746 static void vfio_migration_state_notifier(Notifier
*notifier
, void *data
)
748 MigrationState
*s
= data
;
749 VFIOMigration
*migration
= container_of(notifier
, VFIOMigration
,
751 VFIODevice
*vbasedev
= migration
->vbasedev
;
754 trace_vfio_migration_state_notifier(vbasedev
->name
,
755 MigrationStatus_str(s
->state
));
758 case MIGRATION_STATUS_CANCELLING
:
759 case MIGRATION_STATUS_CANCELLED
:
760 case MIGRATION_STATUS_FAILED
:
761 bytes_transferred
= 0;
762 ret
= vfio_migration_set_state(vbasedev
,
763 ~(VFIO_DEVICE_STATE_SAVING
| VFIO_DEVICE_STATE_RESUMING
),
764 VFIO_DEVICE_STATE_RUNNING
);
766 error_report("%s: Failed to set state RUNNING", vbasedev
->name
);
771 static void vfio_migration_exit(VFIODevice
*vbasedev
)
773 VFIOMigration
*migration
= vbasedev
->migration
;
775 vfio_region_exit(&migration
->region
);
776 vfio_region_finalize(&migration
->region
);
777 g_free(vbasedev
->migration
);
778 vbasedev
->migration
= NULL
;
781 static int vfio_migration_init(VFIODevice
*vbasedev
,
782 struct vfio_region_info
*info
)
786 VFIOMigration
*migration
;
788 g_autofree
char *path
= NULL
, *oid
= NULL
;
790 if (!vbasedev
->ops
->vfio_get_object
) {
794 obj
= vbasedev
->ops
->vfio_get_object(vbasedev
);
799 vbasedev
->migration
= g_new0(VFIOMigration
, 1);
801 ret
= vfio_region_setup(obj
, vbasedev
, &vbasedev
->migration
->region
,
802 info
->index
, "migration");
804 error_report("%s: Failed to setup VFIO migration region %d: %s",
805 vbasedev
->name
, info
->index
, strerror(-ret
));
809 if (!vbasedev
->migration
->region
.size
) {
810 error_report("%s: Invalid zero-sized VFIO migration region %d",
811 vbasedev
->name
, info
->index
);
816 migration
= vbasedev
->migration
;
817 migration
->vbasedev
= vbasedev
;
819 oid
= vmstate_if_get_id(VMSTATE_IF(DEVICE(obj
)));
821 path
= g_strdup_printf("%s/vfio", oid
);
823 path
= g_strdup("vfio");
825 strpadcpy(id
, sizeof(id
), path
, '\0');
827 register_savevm_live(id
, VMSTATE_INSTANCE_ID_ANY
, 1, &savevm_vfio_handlers
,
830 migration
->vm_state
= qdev_add_vm_change_state_handler(vbasedev
->dev
,
833 migration
->migration_state
.notify
= vfio_migration_state_notifier
;
834 add_migration_state_change_notifier(&migration
->migration_state
);
838 vfio_migration_exit(vbasedev
);
842 /* ---------------------------------------------------------------------- */
844 int64_t vfio_mig_bytes_transferred(void)
846 return bytes_transferred
;
849 int vfio_migration_probe(VFIODevice
*vbasedev
, Error
**errp
)
851 VFIOContainer
*container
= vbasedev
->group
->container
;
852 struct vfio_region_info
*info
= NULL
;
853 Error
*local_err
= NULL
;
856 if (!vbasedev
->enable_migration
|| !container
->dirty_pages_supported
) {
860 ret
= vfio_get_dev_region_info(vbasedev
, VFIO_REGION_TYPE_MIGRATION
,
861 VFIO_REGION_SUBTYPE_MIGRATION
, &info
);
866 ret
= vfio_migration_init(vbasedev
, info
);
871 trace_vfio_migration_probe(vbasedev
->name
, info
->index
);
876 error_setg(&vbasedev
->migration_blocker
,
877 "VFIO device doesn't support migration");
880 ret
= migrate_add_blocker(vbasedev
->migration_blocker
, &local_err
);
882 error_propagate(errp
, local_err
);
883 error_free(vbasedev
->migration_blocker
);
884 vbasedev
->migration_blocker
= NULL
;
889 void vfio_migration_finalize(VFIODevice
*vbasedev
)
891 if (vbasedev
->migration
) {
892 VFIOMigration
*migration
= vbasedev
->migration
;
894 remove_migration_state_change_notifier(&migration
->migration_state
);
895 qemu_del_vm_change_state_handler(migration
->vm_state
);
896 vfio_migration_exit(vbasedev
);
899 if (vbasedev
->migration_blocker
) {
900 migrate_del_blocker(vbasedev
->migration_blocker
);
901 error_free(vbasedev
->migration_blocker
);
902 vbasedev
->migration_blocker
= NULL
;