2 * Postcopy migration for RAM
4 * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates
7 * Dave Gilbert <dgilbert@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
15 * Postcopy is a migration technique where the execution flips from the
16 * source to the destination before all the data has been copied.
19 #include "qemu/osdep.h"
21 #include "qemu/madvise.h"
22 #include "exec/target_page.h"
23 #include "migration.h"
24 #include "qemu-file.h"
26 #include "postcopy-ram.h"
28 #include "qapi/error.h"
29 #include "qemu/notify.h"
31 #include "sysemu/sysemu.h"
32 #include "qemu/error-report.h"
34 #include "hw/boards.h"
35 #include "exec/ramblock.h"
37 /* Arbitrary limit on size of each discard command,
38 * keeps them around ~200 bytes
40 #define MAX_DISCARDS_PER_COMMAND 12
42 struct PostcopyDiscardState
{
43 const char *ramblock_name
;
46 * Start and length of a discard range (bytes)
48 uint64_t start_list
[MAX_DISCARDS_PER_COMMAND
];
49 uint64_t length_list
[MAX_DISCARDS_PER_COMMAND
];
50 unsigned int nsentwords
;
51 unsigned int nsentcmds
;
54 static NotifierWithReturnList postcopy_notifier_list
;
56 void postcopy_infrastructure_init(void)
58 notifier_with_return_list_init(&postcopy_notifier_list
);
61 void postcopy_add_notifier(NotifierWithReturn
*nn
)
63 notifier_with_return_list_add(&postcopy_notifier_list
, nn
);
66 void postcopy_remove_notifier(NotifierWithReturn
*n
)
68 notifier_with_return_remove(n
);
71 int postcopy_notify(enum PostcopyNotifyReason reason
, Error
**errp
)
73 struct PostcopyNotifyData pnd
;
77 return notifier_with_return_list_notify(&postcopy_notifier_list
,
81 /* Postcopy needs to detect accesses to pages that haven't yet been copied
82 * across, and efficiently map new pages in, the techniques for doing this
83 * are target OS specific.
85 #if defined(__linux__)
88 #include <sys/ioctl.h>
89 #include <sys/syscall.h>
90 #include <asm/types.h> /* for __u64 */
93 #if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD)
94 #include <sys/eventfd.h>
95 #include <linux/userfaultfd.h>
97 typedef struct PostcopyBlocktimeContext
{
98 /* time when page fault initiated per vCPU */
99 uint32_t *page_fault_vcpu_time
;
100 /* page address per vCPU */
101 uintptr_t *vcpu_addr
;
102 uint32_t total_blocktime
;
103 /* blocktime per vCPU */
104 uint32_t *vcpu_blocktime
;
105 /* point in time when last page fault was initiated */
107 /* number of vCPU are suspended */
112 * Handler for exit event, necessary for
113 * releasing whole blocktime_ctx
115 Notifier exit_notifier
;
116 } PostcopyBlocktimeContext
;
118 static void destroy_blocktime_context(struct PostcopyBlocktimeContext
*ctx
)
120 g_free(ctx
->page_fault_vcpu_time
);
121 g_free(ctx
->vcpu_addr
);
122 g_free(ctx
->vcpu_blocktime
);
126 static void migration_exit_cb(Notifier
*n
, void *data
)
128 PostcopyBlocktimeContext
*ctx
= container_of(n
, PostcopyBlocktimeContext
,
130 destroy_blocktime_context(ctx
);
133 static struct PostcopyBlocktimeContext
*blocktime_context_new(void)
135 MachineState
*ms
= MACHINE(qdev_get_machine());
136 unsigned int smp_cpus
= ms
->smp
.cpus
;
137 PostcopyBlocktimeContext
*ctx
= g_new0(PostcopyBlocktimeContext
, 1);
138 ctx
->page_fault_vcpu_time
= g_new0(uint32_t, smp_cpus
);
139 ctx
->vcpu_addr
= g_new0(uintptr_t, smp_cpus
);
140 ctx
->vcpu_blocktime
= g_new0(uint32_t, smp_cpus
);
142 ctx
->exit_notifier
.notify
= migration_exit_cb
;
143 ctx
->start_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
144 qemu_add_exit_notifier(&ctx
->exit_notifier
);
148 static uint32List
*get_vcpu_blocktime_list(PostcopyBlocktimeContext
*ctx
)
150 MachineState
*ms
= MACHINE(qdev_get_machine());
151 uint32List
*list
= NULL
;
154 for (i
= ms
->smp
.cpus
- 1; i
>= 0; i
--) {
155 QAPI_LIST_PREPEND(list
, ctx
->vcpu_blocktime
[i
]);
162 * This function just populates MigrationInfo from postcopy's
163 * blocktime context. It will not populate MigrationInfo,
164 * unless postcopy-blocktime capability was set.
166 * @info: pointer to MigrationInfo to populate
168 void fill_destination_postcopy_migration_info(MigrationInfo
*info
)
170 MigrationIncomingState
*mis
= migration_incoming_get_current();
171 PostcopyBlocktimeContext
*bc
= mis
->blocktime_ctx
;
177 info
->has_postcopy_blocktime
= true;
178 info
->postcopy_blocktime
= bc
->total_blocktime
;
179 info
->has_postcopy_vcpu_blocktime
= true;
180 info
->postcopy_vcpu_blocktime
= get_vcpu_blocktime_list(bc
);
183 static uint32_t get_postcopy_total_blocktime(void)
185 MigrationIncomingState
*mis
= migration_incoming_get_current();
186 PostcopyBlocktimeContext
*bc
= mis
->blocktime_ctx
;
192 return bc
->total_blocktime
;
196 * receive_ufd_features: check userfault fd features, to request only supported
197 * features in the future.
199 * Returns: true on success
201 * __NR_userfaultfd - should be checked before
202 * @features: out parameter will contain uffdio_api.features provided by kernel
205 static bool receive_ufd_features(uint64_t *features
)
207 struct uffdio_api api_struct
= {0};
211 /* if we are here __NR_userfaultfd should exists */
212 ufd
= syscall(__NR_userfaultfd
, O_CLOEXEC
);
214 error_report("%s: syscall __NR_userfaultfd failed: %s", __func__
,
220 api_struct
.api
= UFFD_API
;
221 api_struct
.features
= 0;
222 if (ioctl(ufd
, UFFDIO_API
, &api_struct
)) {
223 error_report("%s: UFFDIO_API failed: %s", __func__
,
229 *features
= api_struct
.features
;
237 * request_ufd_features: this function should be called only once on a newly
238 * opened ufd, subsequent calls will lead to error.
240 * Returns: true on success
242 * @ufd: fd obtained from userfaultfd syscall
243 * @features: bit mask see UFFD_API_FEATURES
245 static bool request_ufd_features(int ufd
, uint64_t features
)
247 struct uffdio_api api_struct
= {0};
250 api_struct
.api
= UFFD_API
;
251 api_struct
.features
= features
;
252 if (ioctl(ufd
, UFFDIO_API
, &api_struct
)) {
253 error_report("%s failed: UFFDIO_API failed: %s", __func__
,
258 ioctl_mask
= (__u64
)1 << _UFFDIO_REGISTER
|
259 (__u64
)1 << _UFFDIO_UNREGISTER
;
260 if ((api_struct
.ioctls
& ioctl_mask
) != ioctl_mask
) {
261 error_report("Missing userfault features: %" PRIx64
,
262 (uint64_t)(~api_struct
.ioctls
& ioctl_mask
));
269 static bool ufd_check_and_apply(int ufd
, MigrationIncomingState
*mis
)
271 uint64_t asked_features
= 0;
272 static uint64_t supported_features
;
275 * it's not possible to
276 * request UFFD_API twice per one fd
277 * userfault fd features is persistent
279 if (!supported_features
) {
280 if (!receive_ufd_features(&supported_features
)) {
281 error_report("%s failed", __func__
);
286 #ifdef UFFD_FEATURE_THREAD_ID
287 if (UFFD_FEATURE_THREAD_ID
& supported_features
) {
288 asked_features
|= UFFD_FEATURE_THREAD_ID
;
289 if (migrate_postcopy_blocktime()) {
290 if (!mis
->blocktime_ctx
) {
291 mis
->blocktime_ctx
= blocktime_context_new();
298 * request features, even if asked_features is 0, due to
299 * kernel expects UFFD_API before UFFDIO_REGISTER, per
300 * userfault file descriptor
302 if (!request_ufd_features(ufd
, asked_features
)) {
303 error_report("%s failed: features %" PRIu64
, __func__
,
308 if (qemu_real_host_page_size
!= ram_pagesize_summary()) {
309 bool have_hp
= false;
310 /* We've got a huge page */
311 #ifdef UFFD_FEATURE_MISSING_HUGETLBFS
312 have_hp
= supported_features
& UFFD_FEATURE_MISSING_HUGETLBFS
;
315 error_report("Userfault on this host does not support huge pages");
322 /* Callback from postcopy_ram_supported_by_host block iterator.
324 static int test_ramblock_postcopiable(RAMBlock
*rb
, void *opaque
)
326 const char *block_name
= qemu_ram_get_idstr(rb
);
327 ram_addr_t length
= qemu_ram_get_used_length(rb
);
328 size_t pagesize
= qemu_ram_pagesize(rb
);
330 if (length
% pagesize
) {
331 error_report("Postcopy requires RAM blocks to be a page size multiple,"
332 " block %s is 0x" RAM_ADDR_FMT
" bytes with a "
333 "page size of 0x%zx", block_name
, length
, pagesize
);
340 * Note: This has the side effect of munlock'ing all of RAM, that's
341 * normally fine since if the postcopy succeeds it gets turned back on at the
344 bool postcopy_ram_supported_by_host(MigrationIncomingState
*mis
)
346 long pagesize
= qemu_real_host_page_size
;
348 bool ret
= false; /* Error unless we change it */
349 void *testarea
= NULL
;
350 struct uffdio_register reg_struct
;
351 struct uffdio_range range_struct
;
352 uint64_t feature_mask
;
353 Error
*local_err
= NULL
;
355 if (qemu_target_page_size() > pagesize
) {
356 error_report("Target page size bigger than host page size");
360 ufd
= syscall(__NR_userfaultfd
, O_CLOEXEC
);
362 error_report("%s: userfaultfd not available: %s", __func__
,
367 /* Give devices a chance to object */
368 if (postcopy_notify(POSTCOPY_NOTIFY_PROBE
, &local_err
)) {
369 error_report_err(local_err
);
373 /* Version and features check */
374 if (!ufd_check_and_apply(ufd
, mis
)) {
378 /* We don't support postcopy with shared RAM yet */
379 if (foreach_not_ignored_block(test_ramblock_postcopiable
, NULL
)) {
384 * userfault and mlock don't go together; we'll put it back later if
388 error_report("%s: munlockall: %s", __func__
, strerror(errno
));
393 * We need to check that the ops we need are supported on anon memory
394 * To do that we need to register a chunk and see the flags that
397 testarea
= mmap(NULL
, pagesize
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
|
398 MAP_ANONYMOUS
, -1, 0);
399 if (testarea
== MAP_FAILED
) {
400 error_report("%s: Failed to map test area: %s", __func__
,
404 g_assert(QEMU_PTR_IS_ALIGNED(testarea
, pagesize
));
406 reg_struct
.range
.start
= (uintptr_t)testarea
;
407 reg_struct
.range
.len
= pagesize
;
408 reg_struct
.mode
= UFFDIO_REGISTER_MODE_MISSING
;
410 if (ioctl(ufd
, UFFDIO_REGISTER
, ®_struct
)) {
411 error_report("%s userfault register: %s", __func__
, strerror(errno
));
415 range_struct
.start
= (uintptr_t)testarea
;
416 range_struct
.len
= pagesize
;
417 if (ioctl(ufd
, UFFDIO_UNREGISTER
, &range_struct
)) {
418 error_report("%s userfault unregister: %s", __func__
, strerror(errno
));
422 feature_mask
= (__u64
)1 << _UFFDIO_WAKE
|
423 (__u64
)1 << _UFFDIO_COPY
|
424 (__u64
)1 << _UFFDIO_ZEROPAGE
;
425 if ((reg_struct
.ioctls
& feature_mask
) != feature_mask
) {
426 error_report("Missing userfault map features: %" PRIx64
,
427 (uint64_t)(~reg_struct
.ioctls
& feature_mask
));
435 munmap(testarea
, pagesize
);
444 * Setup an area of RAM so that it *can* be used for postcopy later; this
445 * must be done right at the start prior to pre-copy.
446 * opaque should be the MIS.
448 static int init_range(RAMBlock
*rb
, void *opaque
)
450 const char *block_name
= qemu_ram_get_idstr(rb
);
451 void *host_addr
= qemu_ram_get_host_addr(rb
);
452 ram_addr_t offset
= qemu_ram_get_offset(rb
);
453 ram_addr_t length
= qemu_ram_get_used_length(rb
);
454 trace_postcopy_init_range(block_name
, host_addr
, offset
, length
);
457 * Save the used_length before running the guest. In case we have to
458 * resize RAM blocks when syncing RAM block sizes from the source during
459 * precopy, we'll update it manually via the ram block notifier.
461 rb
->postcopy_length
= length
;
464 * We need the whole of RAM to be truly empty for postcopy, so things
465 * like ROMs and any data tables built during init must be zero'd
466 * - we're going to get the copy from the source anyway.
467 * (Precopy will just overwrite this data, so doesn't need the discard)
469 if (ram_discard_range(block_name
, 0, length
)) {
477 * At the end of migration, undo the effects of init_range
478 * opaque should be the MIS.
480 static int cleanup_range(RAMBlock
*rb
, void *opaque
)
482 const char *block_name
= qemu_ram_get_idstr(rb
);
483 void *host_addr
= qemu_ram_get_host_addr(rb
);
484 ram_addr_t offset
= qemu_ram_get_offset(rb
);
485 ram_addr_t length
= rb
->postcopy_length
;
486 MigrationIncomingState
*mis
= opaque
;
487 struct uffdio_range range_struct
;
488 trace_postcopy_cleanup_range(block_name
, host_addr
, offset
, length
);
491 * We turned off hugepage for the precopy stage with postcopy enabled
492 * we can turn it back on now.
494 qemu_madvise(host_addr
, length
, QEMU_MADV_HUGEPAGE
);
497 * We can also turn off userfault now since we should have all the
498 * pages. It can be useful to leave it on to debug postcopy
499 * if you're not sure it's always getting every page.
501 range_struct
.start
= (uintptr_t)host_addr
;
502 range_struct
.len
= length
;
504 if (ioctl(mis
->userfault_fd
, UFFDIO_UNREGISTER
, &range_struct
)) {
505 error_report("%s: userfault unregister %s", __func__
, strerror(errno
));
514 * Initialise postcopy-ram, setting the RAM to a state where we can go into
515 * postcopy later; must be called prior to any precopy.
516 * called from arch_init's similarly named ram_postcopy_incoming_init
518 int postcopy_ram_incoming_init(MigrationIncomingState
*mis
)
520 if (foreach_not_ignored_block(init_range
, NULL
)) {
527 static void postcopy_temp_pages_cleanup(MigrationIncomingState
*mis
)
529 if (mis
->postcopy_tmp_page
) {
530 munmap(mis
->postcopy_tmp_page
, mis
->largest_page_size
);
531 mis
->postcopy_tmp_page
= NULL
;
534 if (mis
->postcopy_tmp_zero_page
) {
535 munmap(mis
->postcopy_tmp_zero_page
, mis
->largest_page_size
);
536 mis
->postcopy_tmp_zero_page
= NULL
;
541 * At the end of a migration where postcopy_ram_incoming_init was called.
543 int postcopy_ram_incoming_cleanup(MigrationIncomingState
*mis
)
545 trace_postcopy_ram_incoming_cleanup_entry();
547 if (mis
->have_fault_thread
) {
548 Error
*local_err
= NULL
;
550 /* Let the fault thread quit */
551 qatomic_set(&mis
->fault_thread_quit
, 1);
552 postcopy_fault_thread_notify(mis
);
553 trace_postcopy_ram_incoming_cleanup_join();
554 qemu_thread_join(&mis
->fault_thread
);
556 if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_END
, &local_err
)) {
557 error_report_err(local_err
);
561 if (foreach_not_ignored_block(cleanup_range
, mis
)) {
565 trace_postcopy_ram_incoming_cleanup_closeuf();
566 close(mis
->userfault_fd
);
567 close(mis
->userfault_event_fd
);
568 mis
->have_fault_thread
= false;
572 if (os_mlock() < 0) {
573 error_report("mlock: %s", strerror(errno
));
575 * It doesn't feel right to fail at this point, we have a valid
581 postcopy_temp_pages_cleanup(mis
);
583 trace_postcopy_ram_incoming_cleanup_blocktime(
584 get_postcopy_total_blocktime());
586 trace_postcopy_ram_incoming_cleanup_exit();
591 * Disable huge pages on an area
593 static int nhp_range(RAMBlock
*rb
, void *opaque
)
595 const char *block_name
= qemu_ram_get_idstr(rb
);
596 void *host_addr
= qemu_ram_get_host_addr(rb
);
597 ram_addr_t offset
= qemu_ram_get_offset(rb
);
598 ram_addr_t length
= rb
->postcopy_length
;
599 trace_postcopy_nhp_range(block_name
, host_addr
, offset
, length
);
602 * Before we do discards we need to ensure those discards really
603 * do delete areas of the page, even if THP thinks a hugepage would
604 * be a good idea, so force hugepages off.
606 qemu_madvise(host_addr
, length
, QEMU_MADV_NOHUGEPAGE
);
612 * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard
613 * however leaving it until after precopy means that most of the precopy
616 int postcopy_ram_prepare_discard(MigrationIncomingState
*mis
)
618 if (foreach_not_ignored_block(nhp_range
, mis
)) {
622 postcopy_state_set(POSTCOPY_INCOMING_DISCARD
);
628 * Mark the given area of RAM as requiring notification to unwritten areas
629 * Used as a callback on foreach_not_ignored_block.
630 * host_addr: Base of area to mark
631 * offset: Offset in the whole ram arena
632 * length: Length of the section
633 * opaque: MigrationIncomingState pointer
634 * Returns 0 on success
636 static int ram_block_enable_notify(RAMBlock
*rb
, void *opaque
)
638 MigrationIncomingState
*mis
= opaque
;
639 struct uffdio_register reg_struct
;
641 reg_struct
.range
.start
= (uintptr_t)qemu_ram_get_host_addr(rb
);
642 reg_struct
.range
.len
= rb
->postcopy_length
;
643 reg_struct
.mode
= UFFDIO_REGISTER_MODE_MISSING
;
645 /* Now tell our userfault_fd that it's responsible for this area */
646 if (ioctl(mis
->userfault_fd
, UFFDIO_REGISTER
, ®_struct
)) {
647 error_report("%s userfault register: %s", __func__
, strerror(errno
));
650 if (!(reg_struct
.ioctls
& ((__u64
)1 << _UFFDIO_COPY
))) {
651 error_report("%s userfault: Region doesn't support COPY", __func__
);
654 if (reg_struct
.ioctls
& ((__u64
)1 << _UFFDIO_ZEROPAGE
)) {
655 qemu_ram_set_uf_zeroable(rb
);
661 int postcopy_wake_shared(struct PostCopyFD
*pcfd
,
662 uint64_t client_addr
,
665 size_t pagesize
= qemu_ram_pagesize(rb
);
666 struct uffdio_range range
;
668 trace_postcopy_wake_shared(client_addr
, qemu_ram_get_idstr(rb
));
669 range
.start
= ROUND_DOWN(client_addr
, pagesize
);
670 range
.len
= pagesize
;
671 ret
= ioctl(pcfd
->fd
, UFFDIO_WAKE
, &range
);
673 error_report("%s: Failed to wake: %zx in %s (%s)",
674 __func__
, (size_t)client_addr
, qemu_ram_get_idstr(rb
),
680 static int postcopy_request_page(MigrationIncomingState
*mis
, RAMBlock
*rb
,
681 ram_addr_t start
, uint64_t haddr
)
683 void *aligned
= (void *)(uintptr_t)ROUND_DOWN(haddr
, qemu_ram_pagesize(rb
));
686 * Discarded pages (via RamDiscardManager) are never migrated. On unlikely
687 * access, place a zeropage, which will also set the relevant bits in the
688 * recv_bitmap accordingly, so we won't try placing a zeropage twice.
690 * Checking a single bit is sufficient to handle pagesize > TPS as either
691 * all relevant bits are set or not.
693 assert(QEMU_IS_ALIGNED(start
, qemu_ram_pagesize(rb
)));
694 if (ramblock_page_is_discarded(rb
, start
)) {
695 bool received
= ramblock_recv_bitmap_test_byte_offset(rb
, start
);
697 return received
? 0 : postcopy_place_page_zero(mis
, aligned
, rb
);
700 return migrate_send_rp_req_pages(mis
, rb
, start
, haddr
);
704 * Callback from shared fault handlers to ask for a page,
705 * the page must be specified by a RAMBlock and an offset in that rb
706 * Note: Only for use by shared fault handlers (in fault thread)
708 int postcopy_request_shared_page(struct PostCopyFD
*pcfd
, RAMBlock
*rb
,
709 uint64_t client_addr
, uint64_t rb_offset
)
711 uint64_t aligned_rbo
= ROUND_DOWN(rb_offset
, qemu_ram_pagesize(rb
));
712 MigrationIncomingState
*mis
= migration_incoming_get_current();
714 trace_postcopy_request_shared_page(pcfd
->idstr
, qemu_ram_get_idstr(rb
),
716 if (ramblock_recv_bitmap_test_byte_offset(rb
, aligned_rbo
)) {
717 trace_postcopy_request_shared_page_present(pcfd
->idstr
,
718 qemu_ram_get_idstr(rb
), rb_offset
);
719 return postcopy_wake_shared(pcfd
, client_addr
, rb
);
721 postcopy_request_page(mis
, rb
, aligned_rbo
, client_addr
);
725 static int get_mem_fault_cpu_index(uint32_t pid
)
729 CPU_FOREACH(cpu_iter
) {
730 if (cpu_iter
->thread_id
== pid
) {
731 trace_get_mem_fault_cpu_index(cpu_iter
->cpu_index
, pid
);
732 return cpu_iter
->cpu_index
;
735 trace_get_mem_fault_cpu_index(-1, pid
);
739 static uint32_t get_low_time_offset(PostcopyBlocktimeContext
*dc
)
741 int64_t start_time_offset
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
) -
743 return start_time_offset
< 1 ? 1 : start_time_offset
& UINT32_MAX
;
747 * This function is being called when pagefault occurs. It
748 * tracks down vCPU blocking time.
750 * @addr: faulted host virtual address
751 * @ptid: faulted process thread id
752 * @rb: ramblock appropriate to addr
754 static void mark_postcopy_blocktime_begin(uintptr_t addr
, uint32_t ptid
,
757 int cpu
, already_received
;
758 MigrationIncomingState
*mis
= migration_incoming_get_current();
759 PostcopyBlocktimeContext
*dc
= mis
->blocktime_ctx
;
760 uint32_t low_time_offset
;
762 if (!dc
|| ptid
== 0) {
765 cpu
= get_mem_fault_cpu_index(ptid
);
770 low_time_offset
= get_low_time_offset(dc
);
771 if (dc
->vcpu_addr
[cpu
] == 0) {
772 qatomic_inc(&dc
->smp_cpus_down
);
775 qatomic_xchg(&dc
->last_begin
, low_time_offset
);
776 qatomic_xchg(&dc
->page_fault_vcpu_time
[cpu
], low_time_offset
);
777 qatomic_xchg(&dc
->vcpu_addr
[cpu
], addr
);
780 * check it here, not at the beginning of the function,
781 * due to, check could occur early than bitmap_set in
782 * qemu_ufd_copy_ioctl
784 already_received
= ramblock_recv_bitmap_test(rb
, (void *)addr
);
785 if (already_received
) {
786 qatomic_xchg(&dc
->vcpu_addr
[cpu
], 0);
787 qatomic_xchg(&dc
->page_fault_vcpu_time
[cpu
], 0);
788 qatomic_dec(&dc
->smp_cpus_down
);
790 trace_mark_postcopy_blocktime_begin(addr
, dc
, dc
->page_fault_vcpu_time
[cpu
],
791 cpu
, already_received
);
795 * This function just provide calculated blocktime per cpu and trace it.
796 * Total blocktime is calculated in mark_postcopy_blocktime_end.
799 * Assume we have 3 CPU
802 * -----***********------------xxx***************------------------------> CPU1
805 * ------------****************xxx---------------------------------------> CPU2
808 * ------------------------****xxx********-------------------------------> CPU3
810 * We have sequence S1,S2,E1,S3,S1,E2,E3,E1
811 * S2,E1 - doesn't match condition due to sequence S1,S2,E1 doesn't include CPU3
812 * S3,S1,E2 - sequence includes all CPUs, in this case overlap will be S1,E2 -
813 * it's a part of total blocktime.
814 * S1 - here is last_begin
815 * Legend of the picture is following:
816 * * - means blocktime per vCPU
817 * x - means overlapped blocktime (total blocktime)
819 * @addr: host virtual address
821 static void mark_postcopy_blocktime_end(uintptr_t addr
)
823 MigrationIncomingState
*mis
= migration_incoming_get_current();
824 PostcopyBlocktimeContext
*dc
= mis
->blocktime_ctx
;
825 MachineState
*ms
= MACHINE(qdev_get_machine());
826 unsigned int smp_cpus
= ms
->smp
.cpus
;
827 int i
, affected_cpu
= 0;
828 bool vcpu_total_blocktime
= false;
829 uint32_t read_vcpu_time
, low_time_offset
;
835 low_time_offset
= get_low_time_offset(dc
);
836 /* lookup cpu, to clear it,
837 * that algorithm looks straightforward, but it's not
838 * optimal, more optimal algorithm is keeping tree or hash
839 * where key is address value is a list of */
840 for (i
= 0; i
< smp_cpus
; i
++) {
841 uint32_t vcpu_blocktime
= 0;
843 read_vcpu_time
= qatomic_fetch_add(&dc
->page_fault_vcpu_time
[i
], 0);
844 if (qatomic_fetch_add(&dc
->vcpu_addr
[i
], 0) != addr
||
845 read_vcpu_time
== 0) {
848 qatomic_xchg(&dc
->vcpu_addr
[i
], 0);
849 vcpu_blocktime
= low_time_offset
- read_vcpu_time
;
851 /* we need to know is that mark_postcopy_end was due to
852 * faulted page, another possible case it's prefetched
853 * page and in that case we shouldn't be here */
854 if (!vcpu_total_blocktime
&&
855 qatomic_fetch_add(&dc
->smp_cpus_down
, 0) == smp_cpus
) {
856 vcpu_total_blocktime
= true;
858 /* continue cycle, due to one page could affect several vCPUs */
859 dc
->vcpu_blocktime
[i
] += vcpu_blocktime
;
862 qatomic_sub(&dc
->smp_cpus_down
, affected_cpu
);
863 if (vcpu_total_blocktime
) {
864 dc
->total_blocktime
+= low_time_offset
- qatomic_fetch_add(
867 trace_mark_postcopy_blocktime_end(addr
, dc
, dc
->total_blocktime
,
871 static bool postcopy_pause_fault_thread(MigrationIncomingState
*mis
)
873 trace_postcopy_pause_fault_thread();
875 qemu_sem_wait(&mis
->postcopy_pause_sem_fault
);
877 trace_postcopy_pause_fault_thread_continued();
883 * Handle faults detected by the USERFAULT markings
885 static void *postcopy_ram_fault_thread(void *opaque
)
887 MigrationIncomingState
*mis
= opaque
;
893 trace_postcopy_ram_fault_thread_entry();
894 rcu_register_thread();
895 mis
->last_rb
= NULL
; /* last RAMBlock we sent part of */
896 qemu_sem_post(&mis
->fault_thread_sem
);
899 size_t pfd_len
= 2 + mis
->postcopy_remote_fds
->len
;
901 pfd
= g_new0(struct pollfd
, pfd_len
);
903 pfd
[0].fd
= mis
->userfault_fd
;
904 pfd
[0].events
= POLLIN
;
905 pfd
[1].fd
= mis
->userfault_event_fd
;
906 pfd
[1].events
= POLLIN
; /* Waiting for eventfd to go positive */
907 trace_postcopy_ram_fault_thread_fds_core(pfd
[0].fd
, pfd
[1].fd
);
908 for (index
= 0; index
< mis
->postcopy_remote_fds
->len
; index
++) {
909 struct PostCopyFD
*pcfd
= &g_array_index(mis
->postcopy_remote_fds
,
910 struct PostCopyFD
, index
);
911 pfd
[2 + index
].fd
= pcfd
->fd
;
912 pfd
[2 + index
].events
= POLLIN
;
913 trace_postcopy_ram_fault_thread_fds_extra(2 + index
, pcfd
->idstr
,
918 ram_addr_t rb_offset
;
922 * We're mainly waiting for the kernel to give us a faulting HVA,
923 * however we can be told to quit via userfault_quit_fd which is
927 poll_result
= poll(pfd
, pfd_len
, -1 /* Wait forever */);
928 if (poll_result
== -1) {
929 error_report("%s: userfault poll: %s", __func__
, strerror(errno
));
933 if (!mis
->to_src_file
) {
935 * Possibly someone tells us that the return path is
936 * broken already using the event. We should hold until
937 * the channel is rebuilt.
939 if (postcopy_pause_fault_thread(mis
)) {
940 /* Continue to read the userfaultfd */
942 error_report("%s: paused but don't allow to continue",
948 if (pfd
[1].revents
) {
951 /* Consume the signal */
952 if (read(mis
->userfault_event_fd
, &tmp64
, 8) != 8) {
953 /* Nothing obviously nicer than posting this error. */
954 error_report("%s: read() failed", __func__
);
957 if (qatomic_read(&mis
->fault_thread_quit
)) {
958 trace_postcopy_ram_fault_thread_quit();
963 if (pfd
[0].revents
) {
965 ret
= read(mis
->userfault_fd
, &msg
, sizeof(msg
));
966 if (ret
!= sizeof(msg
)) {
967 if (errno
== EAGAIN
) {
969 * if a wake up happens on the other thread just after
970 * the poll, there is nothing to read.
975 error_report("%s: Failed to read full userfault "
977 __func__
, strerror(errno
));
980 error_report("%s: Read %d bytes from userfaultfd "
982 __func__
, ret
, sizeof(msg
));
983 break; /* Lost alignment, don't know what we'd read next */
986 if (msg
.event
!= UFFD_EVENT_PAGEFAULT
) {
987 error_report("%s: Read unexpected event %ud from userfaultfd",
988 __func__
, msg
.event
);
989 continue; /* It's not a page fault, shouldn't happen */
992 rb
= qemu_ram_block_from_host(
993 (void *)(uintptr_t)msg
.arg
.pagefault
.address
,
996 error_report("postcopy_ram_fault_thread: Fault outside guest: %"
997 PRIx64
, (uint64_t)msg
.arg
.pagefault
.address
);
1001 rb_offset
= ROUND_DOWN(rb_offset
, qemu_ram_pagesize(rb
));
1002 trace_postcopy_ram_fault_thread_request(msg
.arg
.pagefault
.address
,
1003 qemu_ram_get_idstr(rb
),
1005 msg
.arg
.pagefault
.feat
.ptid
);
1006 mark_postcopy_blocktime_begin(
1007 (uintptr_t)(msg
.arg
.pagefault
.address
),
1008 msg
.arg
.pagefault
.feat
.ptid
, rb
);
1012 * Send the request to the source - we want to request one
1013 * of our host page sizes (which is >= TPS)
1015 ret
= postcopy_request_page(mis
, rb
, rb_offset
,
1016 msg
.arg
.pagefault
.address
);
1018 /* May be network failure, try to wait for recovery */
1019 if (ret
== -EIO
&& postcopy_pause_fault_thread(mis
)) {
1020 /* We got reconnected somehow, try to continue */
1023 /* This is a unavoidable fault */
1024 error_report("%s: postcopy_request_page() get %d",
1031 /* Now handle any requests from external processes on shared memory */
1032 /* TODO: May need to handle devices deregistering during postcopy */
1033 for (index
= 2; index
< pfd_len
&& poll_result
; index
++) {
1034 if (pfd
[index
].revents
) {
1035 struct PostCopyFD
*pcfd
=
1036 &g_array_index(mis
->postcopy_remote_fds
,
1037 struct PostCopyFD
, index
- 2);
1040 if (pfd
[index
].revents
& POLLERR
) {
1041 error_report("%s: POLLERR on poll %zd fd=%d",
1042 __func__
, index
, pcfd
->fd
);
1043 pfd
[index
].events
= 0;
1047 ret
= read(pcfd
->fd
, &msg
, sizeof(msg
));
1048 if (ret
!= sizeof(msg
)) {
1049 if (errno
== EAGAIN
) {
1051 * if a wake up happens on the other thread just after
1052 * the poll, there is nothing to read.
1057 error_report("%s: Failed to read full userfault "
1058 "message: %s (shared) revents=%d",
1059 __func__
, strerror(errno
),
1060 pfd
[index
].revents
);
1061 /*TODO: Could just disable this sharer */
1064 error_report("%s: Read %d bytes from userfaultfd "
1065 "expected %zd (shared)",
1066 __func__
, ret
, sizeof(msg
));
1067 /*TODO: Could just disable this sharer */
1068 break; /*Lost alignment,don't know what we'd read next*/
1071 if (msg
.event
!= UFFD_EVENT_PAGEFAULT
) {
1072 error_report("%s: Read unexpected event %ud "
1073 "from userfaultfd (shared)",
1074 __func__
, msg
.event
);
1075 continue; /* It's not a page fault, shouldn't happen */
1077 /* Call the device handler registered with us */
1078 ret
= pcfd
->handler(pcfd
, &msg
);
1080 error_report("%s: Failed to resolve shared fault on %zd/%s",
1081 __func__
, index
, pcfd
->idstr
);
1082 /* TODO: Fail? Disable this sharer? */
1087 rcu_unregister_thread();
1088 trace_postcopy_ram_fault_thread_exit();
1093 static int postcopy_temp_pages_setup(MigrationIncomingState
*mis
)
1097 mis
->postcopy_tmp_page
= mmap(NULL
, mis
->largest_page_size
,
1098 PROT_READ
| PROT_WRITE
,
1099 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
1100 if (mis
->postcopy_tmp_page
== MAP_FAILED
) {
1102 mis
->postcopy_tmp_page
= NULL
;
1103 error_report("%s: Failed to map postcopy_tmp_page %s",
1104 __func__
, strerror(err
));
1109 * Map large zero page when kernel can't use UFFDIO_ZEROPAGE for hugepages
1111 mis
->postcopy_tmp_zero_page
= mmap(NULL
, mis
->largest_page_size
,
1112 PROT_READ
| PROT_WRITE
,
1113 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
1114 if (mis
->postcopy_tmp_zero_page
== MAP_FAILED
) {
1116 mis
->postcopy_tmp_zero_page
= NULL
;
1117 error_report("%s: Failed to map large zero page %s",
1118 __func__
, strerror(err
));
1122 memset(mis
->postcopy_tmp_zero_page
, '\0', mis
->largest_page_size
);
1127 int postcopy_ram_incoming_setup(MigrationIncomingState
*mis
)
1129 /* Open the fd for the kernel to give us userfaults */
1130 mis
->userfault_fd
= syscall(__NR_userfaultfd
, O_CLOEXEC
| O_NONBLOCK
);
1131 if (mis
->userfault_fd
== -1) {
1132 error_report("%s: Failed to open userfault fd: %s", __func__
,
1138 * Although the host check already tested the API, we need to
1139 * do the check again as an ABI handshake on the new fd.
1141 if (!ufd_check_and_apply(mis
->userfault_fd
, mis
)) {
1145 /* Now an eventfd we use to tell the fault-thread to quit */
1146 mis
->userfault_event_fd
= eventfd(0, EFD_CLOEXEC
);
1147 if (mis
->userfault_event_fd
== -1) {
1148 error_report("%s: Opening userfault_event_fd: %s", __func__
,
1150 close(mis
->userfault_fd
);
1154 qemu_sem_init(&mis
->fault_thread_sem
, 0);
1155 qemu_thread_create(&mis
->fault_thread
, "postcopy/fault",
1156 postcopy_ram_fault_thread
, mis
, QEMU_THREAD_JOINABLE
);
1157 qemu_sem_wait(&mis
->fault_thread_sem
);
1158 qemu_sem_destroy(&mis
->fault_thread_sem
);
1159 mis
->have_fault_thread
= true;
1161 /* Mark so that we get notified of accesses to unwritten areas */
1162 if (foreach_not_ignored_block(ram_block_enable_notify
, mis
)) {
1163 error_report("ram_block_enable_notify failed");
1167 if (postcopy_temp_pages_setup(mis
)) {
1168 /* Error dumped in the sub-function */
1172 trace_postcopy_ram_enable_notify();
1177 static int qemu_ufd_copy_ioctl(MigrationIncomingState
*mis
, void *host_addr
,
1178 void *from_addr
, uint64_t pagesize
, RAMBlock
*rb
)
1180 int userfault_fd
= mis
->userfault_fd
;
1184 struct uffdio_copy copy_struct
;
1185 copy_struct
.dst
= (uint64_t)(uintptr_t)host_addr
;
1186 copy_struct
.src
= (uint64_t)(uintptr_t)from_addr
;
1187 copy_struct
.len
= pagesize
;
1188 copy_struct
.mode
= 0;
1189 ret
= ioctl(userfault_fd
, UFFDIO_COPY
, ©_struct
);
1191 struct uffdio_zeropage zero_struct
;
1192 zero_struct
.range
.start
= (uint64_t)(uintptr_t)host_addr
;
1193 zero_struct
.range
.len
= pagesize
;
1194 zero_struct
.mode
= 0;
1195 ret
= ioctl(userfault_fd
, UFFDIO_ZEROPAGE
, &zero_struct
);
1198 qemu_mutex_lock(&mis
->page_request_mutex
);
1199 ramblock_recv_bitmap_set_range(rb
, host_addr
,
1200 pagesize
/ qemu_target_page_size());
1202 * If this page resolves a page fault for a previous recorded faulted
1203 * address, take a special note to maintain the requested page list.
1205 if (g_tree_lookup(mis
->page_requested
, host_addr
)) {
1206 g_tree_remove(mis
->page_requested
, host_addr
);
1207 mis
->page_requested_count
--;
1208 trace_postcopy_page_req_del(host_addr
, mis
->page_requested_count
);
1210 qemu_mutex_unlock(&mis
->page_request_mutex
);
1211 mark_postcopy_blocktime_end((uintptr_t)host_addr
);
1216 int postcopy_notify_shared_wake(RAMBlock
*rb
, uint64_t offset
)
1219 MigrationIncomingState
*mis
= migration_incoming_get_current();
1220 GArray
*pcrfds
= mis
->postcopy_remote_fds
;
1222 for (i
= 0; i
< pcrfds
->len
; i
++) {
1223 struct PostCopyFD
*cur
= &g_array_index(pcrfds
, struct PostCopyFD
, i
);
1224 int ret
= cur
->waker(cur
, rb
, offset
);
1233 * Place a host page (from) at (host) atomically
1234 * returns 0 on success
1236 int postcopy_place_page(MigrationIncomingState
*mis
, void *host
, void *from
,
1239 size_t pagesize
= qemu_ram_pagesize(rb
);
1241 /* copy also acks to the kernel waking the stalled thread up
1242 * TODO: We can inhibit that ack and only do it if it was requested
1243 * which would be slightly cheaper, but we'd have to be careful
1244 * of the order of updating our page state.
1246 if (qemu_ufd_copy_ioctl(mis
, host
, from
, pagesize
, rb
)) {
1248 error_report("%s: %s copy host: %p from: %p (size: %zd)",
1249 __func__
, strerror(e
), host
, from
, pagesize
);
1254 trace_postcopy_place_page(host
);
1255 return postcopy_notify_shared_wake(rb
,
1256 qemu_ram_block_host_offset(rb
, host
));
1260 * Place a zero page at (host) atomically
1261 * returns 0 on success
1263 int postcopy_place_page_zero(MigrationIncomingState
*mis
, void *host
,
1266 size_t pagesize
= qemu_ram_pagesize(rb
);
1267 trace_postcopy_place_page_zero(host
);
1269 /* Normal RAMBlocks can zero a page using UFFDIO_ZEROPAGE
1270 * but it's not available for everything (e.g. hugetlbpages)
1272 if (qemu_ram_is_uf_zeroable(rb
)) {
1273 if (qemu_ufd_copy_ioctl(mis
, host
, NULL
, pagesize
, rb
)) {
1275 error_report("%s: %s zero host: %p",
1276 __func__
, strerror(e
), host
);
1280 return postcopy_notify_shared_wake(rb
,
1281 qemu_ram_block_host_offset(rb
,
1284 return postcopy_place_page(mis
, host
, mis
->postcopy_tmp_zero_page
, rb
);
1289 /* No target OS support, stubs just fail */
1290 void fill_destination_postcopy_migration_info(MigrationInfo
*info
)
1294 bool postcopy_ram_supported_by_host(MigrationIncomingState
*mis
)
1296 error_report("%s: No OS support", __func__
);
1300 int postcopy_ram_incoming_init(MigrationIncomingState
*mis
)
1302 error_report("postcopy_ram_incoming_init: No OS support");
1306 int postcopy_ram_incoming_cleanup(MigrationIncomingState
*mis
)
1312 int postcopy_ram_prepare_discard(MigrationIncomingState
*mis
)
1318 int postcopy_request_shared_page(struct PostCopyFD
*pcfd
, RAMBlock
*rb
,
1319 uint64_t client_addr
, uint64_t rb_offset
)
1325 int postcopy_ram_incoming_setup(MigrationIncomingState
*mis
)
1331 int postcopy_place_page(MigrationIncomingState
*mis
, void *host
, void *from
,
1338 int postcopy_place_page_zero(MigrationIncomingState
*mis
, void *host
,
1345 int postcopy_wake_shared(struct PostCopyFD
*pcfd
,
1346 uint64_t client_addr
,
1354 /* ------------------------------------------------------------------------- */
1356 void postcopy_fault_thread_notify(MigrationIncomingState
*mis
)
1361 * Wakeup the fault_thread. It's an eventfd that should currently
1362 * be at 0, we're going to increment it to 1
1364 if (write(mis
->userfault_event_fd
, &tmp64
, 8) != 8) {
1365 /* Not much we can do here, but may as well report it */
1366 error_report("%s: incrementing failed: %s", __func__
,
1372 * postcopy_discard_send_init: Called at the start of each RAMBlock before
1373 * asking to discard individual ranges.
1375 * @ms: The current migration state.
1376 * @offset: the bitmap offset of the named RAMBlock in the migration bitmap.
1377 * @name: RAMBlock that discards will operate on.
1379 static PostcopyDiscardState pds
= {0};
1380 void postcopy_discard_send_init(MigrationState
*ms
, const char *name
)
1382 pds
.ramblock_name
= name
;
1389 * postcopy_discard_send_range: Called by the bitmap code for each chunk to
1390 * discard. May send a discard message, may just leave it queued to
1393 * @ms: Current migration state.
1394 * @start,@length: a range of pages in the migration bitmap in the
1395 * RAM block passed to postcopy_discard_send_init() (length=1 is one page)
1397 void postcopy_discard_send_range(MigrationState
*ms
, unsigned long start
,
1398 unsigned long length
)
1400 size_t tp_size
= qemu_target_page_size();
1401 /* Convert to byte offsets within the RAM block */
1402 pds
.start_list
[pds
.cur_entry
] = start
* tp_size
;
1403 pds
.length_list
[pds
.cur_entry
] = length
* tp_size
;
1404 trace_postcopy_discard_send_range(pds
.ramblock_name
, start
, length
);
1408 if (pds
.cur_entry
== MAX_DISCARDS_PER_COMMAND
) {
1409 /* Full set, ship it! */
1410 qemu_savevm_send_postcopy_ram_discard(ms
->to_dst_file
,
1421 * postcopy_discard_send_finish: Called at the end of each RAMBlock by the
1422 * bitmap code. Sends any outstanding discard messages, frees the PDS
1424 * @ms: Current migration state.
1426 void postcopy_discard_send_finish(MigrationState
*ms
)
1428 /* Anything unsent? */
1429 if (pds
.cur_entry
) {
1430 qemu_savevm_send_postcopy_ram_discard(ms
->to_dst_file
,
1438 trace_postcopy_discard_send_finish(pds
.ramblock_name
, pds
.nsentwords
,
1443 * Current state of incoming postcopy; note this is not part of
1444 * MigrationIncomingState since it's state is used during cleanup
1445 * at the end as MIS is being freed.
1447 static PostcopyState incoming_postcopy_state
;
1449 PostcopyState
postcopy_state_get(void)
1451 return qatomic_mb_read(&incoming_postcopy_state
);
1454 /* Set the state and return the old state */
1455 PostcopyState
postcopy_state_set(PostcopyState new_state
)
1457 return qatomic_xchg(&incoming_postcopy_state
, new_state
);
1460 /* Register a handler for external shared memory postcopy
1461 * called on the destination.
1463 void postcopy_register_shared_ufd(struct PostCopyFD
*pcfd
)
1465 MigrationIncomingState
*mis
= migration_incoming_get_current();
1467 mis
->postcopy_remote_fds
= g_array_append_val(mis
->postcopy_remote_fds
,
1471 /* Unregister a handler for external shared memory postcopy
1473 void postcopy_unregister_shared_ufd(struct PostCopyFD
*pcfd
)
1476 MigrationIncomingState
*mis
= migration_incoming_get_current();
1477 GArray
*pcrfds
= mis
->postcopy_remote_fds
;
1480 /* migration has already finished and freed the array */
1483 for (i
= 0; i
< pcrfds
->len
; i
++) {
1484 struct PostCopyFD
*cur
= &g_array_index(pcrfds
, struct PostCopyFD
, i
);
1485 if (cur
->fd
== pcfd
->fd
) {
1486 mis
->postcopy_remote_fds
= g_array_remove_index(pcrfds
, i
);