2 * Postcopy migration for RAM
4 * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates
7 * Dave Gilbert <dgilbert@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
15 * Postcopy is a migration technique where the execution flips from the
16 * source to the destination before all the data has been copied.
19 #include "qemu/osdep.h"
20 #include "exec/target_page.h"
21 #include "migration.h"
22 #include "qemu-file.h"
24 #include "postcopy-ram.h"
26 #include "qapi/error.h"
27 #include "qemu/notify.h"
28 #include "sysemu/sysemu.h"
29 #include "sysemu/balloon.h"
30 #include "qemu/error-report.h"
33 /* Arbitrary limit on size of each discard command,
34 * keeps them around ~200 bytes
36 #define MAX_DISCARDS_PER_COMMAND 12
38 struct PostcopyDiscardState
{
39 const char *ramblock_name
;
42 * Start and length of a discard range (bytes)
44 uint64_t start_list
[MAX_DISCARDS_PER_COMMAND
];
45 uint64_t length_list
[MAX_DISCARDS_PER_COMMAND
];
46 unsigned int nsentwords
;
47 unsigned int nsentcmds
;
50 static NotifierWithReturnList postcopy_notifier_list
;
52 void postcopy_infrastructure_init(void)
54 notifier_with_return_list_init(&postcopy_notifier_list
);
57 void postcopy_add_notifier(NotifierWithReturn
*nn
)
59 notifier_with_return_list_add(&postcopy_notifier_list
, nn
);
62 void postcopy_remove_notifier(NotifierWithReturn
*n
)
64 notifier_with_return_remove(n
);
67 int postcopy_notify(enum PostcopyNotifyReason reason
, Error
**errp
)
69 struct PostcopyNotifyData pnd
;
73 return notifier_with_return_list_notify(&postcopy_notifier_list
,
77 /* Postcopy needs to detect accesses to pages that haven't yet been copied
78 * across, and efficiently map new pages in, the techniques for doing this
79 * are target OS specific.
81 #if defined(__linux__)
84 #include <sys/ioctl.h>
85 #include <sys/syscall.h>
86 #include <asm/types.h> /* for __u64 */
89 #if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD)
90 #include <sys/eventfd.h>
91 #include <linux/userfaultfd.h>
95 * receive_ufd_features: check userfault fd features, to request only supported
96 * features in the future.
98 * Returns: true on success
100 * __NR_userfaultfd - should be checked before
101 * @features: out parameter will contain uffdio_api.features provided by kernel
104 static bool receive_ufd_features(uint64_t *features
)
106 struct uffdio_api api_struct
= {0};
110 /* if we are here __NR_userfaultfd should exists */
111 ufd
= syscall(__NR_userfaultfd
, O_CLOEXEC
);
113 error_report("%s: syscall __NR_userfaultfd failed: %s", __func__
,
119 api_struct
.api
= UFFD_API
;
120 api_struct
.features
= 0;
121 if (ioctl(ufd
, UFFDIO_API
, &api_struct
)) {
122 error_report("%s: UFFDIO_API failed: %s", __func__
,
128 *features
= api_struct
.features
;
136 * request_ufd_features: this function should be called only once on a newly
137 * opened ufd, subsequent calls will lead to error.
139 * Returns: true on succes
141 * @ufd: fd obtained from userfaultfd syscall
142 * @features: bit mask see UFFD_API_FEATURES
144 static bool request_ufd_features(int ufd
, uint64_t features
)
146 struct uffdio_api api_struct
= {0};
149 api_struct
.api
= UFFD_API
;
150 api_struct
.features
= features
;
151 if (ioctl(ufd
, UFFDIO_API
, &api_struct
)) {
152 error_report("%s failed: UFFDIO_API failed: %s", __func__
,
157 ioctl_mask
= (__u64
)1 << _UFFDIO_REGISTER
|
158 (__u64
)1 << _UFFDIO_UNREGISTER
;
159 if ((api_struct
.ioctls
& ioctl_mask
) != ioctl_mask
) {
160 error_report("Missing userfault features: %" PRIx64
,
161 (uint64_t)(~api_struct
.ioctls
& ioctl_mask
));
168 static bool ufd_check_and_apply(int ufd
, MigrationIncomingState
*mis
)
170 uint64_t asked_features
= 0;
171 static uint64_t supported_features
;
174 * it's not possible to
175 * request UFFD_API twice per one fd
176 * userfault fd features is persistent
178 if (!supported_features
) {
179 if (!receive_ufd_features(&supported_features
)) {
180 error_report("%s failed", __func__
);
186 * request features, even if asked_features is 0, due to
187 * kernel expects UFFD_API before UFFDIO_REGISTER, per
188 * userfault file descriptor
190 if (!request_ufd_features(ufd
, asked_features
)) {
191 error_report("%s failed: features %" PRIu64
, __func__
,
196 if (getpagesize() != ram_pagesize_summary()) {
197 bool have_hp
= false;
198 /* We've got a huge page */
199 #ifdef UFFD_FEATURE_MISSING_HUGETLBFS
200 have_hp
= supported_features
& UFFD_FEATURE_MISSING_HUGETLBFS
;
203 error_report("Userfault on this host does not support huge pages");
210 /* Callback from postcopy_ram_supported_by_host block iterator.
212 static int test_ramblock_postcopiable(const char *block_name
, void *host_addr
,
213 ram_addr_t offset
, ram_addr_t length
, void *opaque
)
215 RAMBlock
*rb
= qemu_ram_block_by_name(block_name
);
216 size_t pagesize
= qemu_ram_pagesize(rb
);
218 if (qemu_ram_is_shared(rb
)) {
219 error_report("Postcopy on shared RAM (%s) is not yet supported",
224 if (length
% pagesize
) {
225 error_report("Postcopy requires RAM blocks to be a page size multiple,"
226 " block %s is 0x" RAM_ADDR_FMT
" bytes with a "
227 "page size of 0x%zx", block_name
, length
, pagesize
);
234 * Note: This has the side effect of munlock'ing all of RAM, that's
235 * normally fine since if the postcopy succeeds it gets turned back on at the
238 bool postcopy_ram_supported_by_host(MigrationIncomingState
*mis
)
240 long pagesize
= getpagesize();
242 bool ret
= false; /* Error unless we change it */
243 void *testarea
= NULL
;
244 struct uffdio_register reg_struct
;
245 struct uffdio_range range_struct
;
246 uint64_t feature_mask
;
247 Error
*local_err
= NULL
;
249 if (qemu_target_page_size() > pagesize
) {
250 error_report("Target page size bigger than host page size");
254 ufd
= syscall(__NR_userfaultfd
, O_CLOEXEC
);
256 error_report("%s: userfaultfd not available: %s", __func__
,
261 /* Give devices a chance to object */
262 if (postcopy_notify(POSTCOPY_NOTIFY_PROBE
, &local_err
)) {
263 error_report_err(local_err
);
267 /* Version and features check */
268 if (!ufd_check_and_apply(ufd
, mis
)) {
272 /* We don't support postcopy with shared RAM yet */
273 if (qemu_ram_foreach_block(test_ramblock_postcopiable
, NULL
)) {
278 * userfault and mlock don't go together; we'll put it back later if
282 error_report("%s: munlockall: %s", __func__
, strerror(errno
));
287 * We need to check that the ops we need are supported on anon memory
288 * To do that we need to register a chunk and see the flags that
291 testarea
= mmap(NULL
, pagesize
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
|
292 MAP_ANONYMOUS
, -1, 0);
293 if (testarea
== MAP_FAILED
) {
294 error_report("%s: Failed to map test area: %s", __func__
,
298 g_assert(((size_t)testarea
& (pagesize
-1)) == 0);
300 reg_struct
.range
.start
= (uintptr_t)testarea
;
301 reg_struct
.range
.len
= pagesize
;
302 reg_struct
.mode
= UFFDIO_REGISTER_MODE_MISSING
;
304 if (ioctl(ufd
, UFFDIO_REGISTER
, ®_struct
)) {
305 error_report("%s userfault register: %s", __func__
, strerror(errno
));
309 range_struct
.start
= (uintptr_t)testarea
;
310 range_struct
.len
= pagesize
;
311 if (ioctl(ufd
, UFFDIO_UNREGISTER
, &range_struct
)) {
312 error_report("%s userfault unregister: %s", __func__
, strerror(errno
));
316 feature_mask
= (__u64
)1 << _UFFDIO_WAKE
|
317 (__u64
)1 << _UFFDIO_COPY
|
318 (__u64
)1 << _UFFDIO_ZEROPAGE
;
319 if ((reg_struct
.ioctls
& feature_mask
) != feature_mask
) {
320 error_report("Missing userfault map features: %" PRIx64
,
321 (uint64_t)(~reg_struct
.ioctls
& feature_mask
));
329 munmap(testarea
, pagesize
);
338 * Setup an area of RAM so that it *can* be used for postcopy later; this
339 * must be done right at the start prior to pre-copy.
340 * opaque should be the MIS.
342 static int init_range(const char *block_name
, void *host_addr
,
343 ram_addr_t offset
, ram_addr_t length
, void *opaque
)
345 trace_postcopy_init_range(block_name
, host_addr
, offset
, length
);
348 * We need the whole of RAM to be truly empty for postcopy, so things
349 * like ROMs and any data tables built during init must be zero'd
350 * - we're going to get the copy from the source anyway.
351 * (Precopy will just overwrite this data, so doesn't need the discard)
353 if (ram_discard_range(block_name
, 0, length
)) {
361 * At the end of migration, undo the effects of init_range
362 * opaque should be the MIS.
364 static int cleanup_range(const char *block_name
, void *host_addr
,
365 ram_addr_t offset
, ram_addr_t length
, void *opaque
)
367 MigrationIncomingState
*mis
= opaque
;
368 struct uffdio_range range_struct
;
369 trace_postcopy_cleanup_range(block_name
, host_addr
, offset
, length
);
372 * We turned off hugepage for the precopy stage with postcopy enabled
373 * we can turn it back on now.
375 qemu_madvise(host_addr
, length
, QEMU_MADV_HUGEPAGE
);
378 * We can also turn off userfault now since we should have all the
379 * pages. It can be useful to leave it on to debug postcopy
380 * if you're not sure it's always getting every page.
382 range_struct
.start
= (uintptr_t)host_addr
;
383 range_struct
.len
= length
;
385 if (ioctl(mis
->userfault_fd
, UFFDIO_UNREGISTER
, &range_struct
)) {
386 error_report("%s: userfault unregister %s", __func__
, strerror(errno
));
395 * Initialise postcopy-ram, setting the RAM to a state where we can go into
396 * postcopy later; must be called prior to any precopy.
397 * called from arch_init's similarly named ram_postcopy_incoming_init
399 int postcopy_ram_incoming_init(MigrationIncomingState
*mis
, size_t ram_pages
)
401 if (qemu_ram_foreach_block(init_range
, NULL
)) {
409 * At the end of a migration where postcopy_ram_incoming_init was called.
411 int postcopy_ram_incoming_cleanup(MigrationIncomingState
*mis
)
413 trace_postcopy_ram_incoming_cleanup_entry();
415 if (mis
->have_fault_thread
) {
416 Error
*local_err
= NULL
;
418 if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_END
, &local_err
)) {
419 error_report_err(local_err
);
423 if (qemu_ram_foreach_block(cleanup_range
, mis
)) {
426 /* Let the fault thread quit */
427 atomic_set(&mis
->fault_thread_quit
, 1);
428 postcopy_fault_thread_notify(mis
);
429 trace_postcopy_ram_incoming_cleanup_join();
430 qemu_thread_join(&mis
->fault_thread
);
432 trace_postcopy_ram_incoming_cleanup_closeuf();
433 close(mis
->userfault_fd
);
434 close(mis
->userfault_event_fd
);
435 mis
->have_fault_thread
= false;
438 qemu_balloon_inhibit(false);
441 if (os_mlock() < 0) {
442 error_report("mlock: %s", strerror(errno
));
444 * It doesn't feel right to fail at this point, we have a valid
450 postcopy_state_set(POSTCOPY_INCOMING_END
);
452 if (mis
->postcopy_tmp_page
) {
453 munmap(mis
->postcopy_tmp_page
, mis
->largest_page_size
);
454 mis
->postcopy_tmp_page
= NULL
;
456 if (mis
->postcopy_tmp_zero_page
) {
457 munmap(mis
->postcopy_tmp_zero_page
, mis
->largest_page_size
);
458 mis
->postcopy_tmp_zero_page
= NULL
;
460 trace_postcopy_ram_incoming_cleanup_exit();
465 * Disable huge pages on an area
467 static int nhp_range(const char *block_name
, void *host_addr
,
468 ram_addr_t offset
, ram_addr_t length
, void *opaque
)
470 trace_postcopy_nhp_range(block_name
, host_addr
, offset
, length
);
473 * Before we do discards we need to ensure those discards really
474 * do delete areas of the page, even if THP thinks a hugepage would
475 * be a good idea, so force hugepages off.
477 qemu_madvise(host_addr
, length
, QEMU_MADV_NOHUGEPAGE
);
483 * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard
484 * however leaving it until after precopy means that most of the precopy
487 int postcopy_ram_prepare_discard(MigrationIncomingState
*mis
)
489 if (qemu_ram_foreach_block(nhp_range
, mis
)) {
493 postcopy_state_set(POSTCOPY_INCOMING_DISCARD
);
499 * Mark the given area of RAM as requiring notification to unwritten areas
500 * Used as a callback on qemu_ram_foreach_block.
501 * host_addr: Base of area to mark
502 * offset: Offset in the whole ram arena
503 * length: Length of the section
504 * opaque: MigrationIncomingState pointer
505 * Returns 0 on success
507 static int ram_block_enable_notify(const char *block_name
, void *host_addr
,
508 ram_addr_t offset
, ram_addr_t length
,
511 MigrationIncomingState
*mis
= opaque
;
512 struct uffdio_register reg_struct
;
514 reg_struct
.range
.start
= (uintptr_t)host_addr
;
515 reg_struct
.range
.len
= length
;
516 reg_struct
.mode
= UFFDIO_REGISTER_MODE_MISSING
;
518 /* Now tell our userfault_fd that it's responsible for this area */
519 if (ioctl(mis
->userfault_fd
, UFFDIO_REGISTER
, ®_struct
)) {
520 error_report("%s userfault register: %s", __func__
, strerror(errno
));
523 if (!(reg_struct
.ioctls
& ((__u64
)1 << _UFFDIO_COPY
))) {
524 error_report("%s userfault: Region doesn't support COPY", __func__
);
527 if (reg_struct
.ioctls
& ((__u64
)1 << _UFFDIO_ZEROPAGE
)) {
528 RAMBlock
*rb
= qemu_ram_block_by_name(block_name
);
529 qemu_ram_set_uf_zeroable(rb
);
535 int postcopy_wake_shared(struct PostCopyFD
*pcfd
,
536 uint64_t client_addr
,
539 size_t pagesize
= qemu_ram_pagesize(rb
);
540 struct uffdio_range range
;
542 trace_postcopy_wake_shared(client_addr
, qemu_ram_get_idstr(rb
));
543 range
.start
= client_addr
& ~(pagesize
- 1);
544 range
.len
= pagesize
;
545 ret
= ioctl(pcfd
->fd
, UFFDIO_WAKE
, &range
);
547 error_report("%s: Failed to wake: %zx in %s (%s)",
548 __func__
, (size_t)client_addr
, qemu_ram_get_idstr(rb
),
555 * Callback from shared fault handlers to ask for a page,
556 * the page must be specified by a RAMBlock and an offset in that rb
557 * Note: Only for use by shared fault handlers (in fault thread)
559 int postcopy_request_shared_page(struct PostCopyFD
*pcfd
, RAMBlock
*rb
,
560 uint64_t client_addr
, uint64_t rb_offset
)
562 size_t pagesize
= qemu_ram_pagesize(rb
);
563 uint64_t aligned_rbo
= rb_offset
& ~(pagesize
- 1);
564 MigrationIncomingState
*mis
= migration_incoming_get_current();
566 trace_postcopy_request_shared_page(pcfd
->idstr
, qemu_ram_get_idstr(rb
),
568 if (ramblock_recv_bitmap_test_byte_offset(rb
, aligned_rbo
)) {
569 trace_postcopy_request_shared_page_present(pcfd
->idstr
,
570 qemu_ram_get_idstr(rb
), rb_offset
);
571 return postcopy_wake_shared(pcfd
, client_addr
, rb
);
573 if (rb
!= mis
->last_rb
) {
575 migrate_send_rp_req_pages(mis
, qemu_ram_get_idstr(rb
),
576 aligned_rbo
, pagesize
);
578 /* Save some space */
579 migrate_send_rp_req_pages(mis
, NULL
, aligned_rbo
, pagesize
);
585 * Handle faults detected by the USERFAULT markings
587 static void *postcopy_ram_fault_thread(void *opaque
)
589 MigrationIncomingState
*mis
= opaque
;
595 trace_postcopy_ram_fault_thread_entry();
596 mis
->last_rb
= NULL
; /* last RAMBlock we sent part of */
597 qemu_sem_post(&mis
->fault_thread_sem
);
600 size_t pfd_len
= 2 + mis
->postcopy_remote_fds
->len
;
602 pfd
= g_new0(struct pollfd
, pfd_len
);
604 pfd
[0].fd
= mis
->userfault_fd
;
605 pfd
[0].events
= POLLIN
;
606 pfd
[1].fd
= mis
->userfault_event_fd
;
607 pfd
[1].events
= POLLIN
; /* Waiting for eventfd to go positive */
608 trace_postcopy_ram_fault_thread_fds_core(pfd
[0].fd
, pfd
[1].fd
);
609 for (index
= 0; index
< mis
->postcopy_remote_fds
->len
; index
++) {
610 struct PostCopyFD
*pcfd
= &g_array_index(mis
->postcopy_remote_fds
,
611 struct PostCopyFD
, index
);
612 pfd
[2 + index
].fd
= pcfd
->fd
;
613 pfd
[2 + index
].events
= POLLIN
;
614 trace_postcopy_ram_fault_thread_fds_extra(2 + index
, pcfd
->idstr
,
619 ram_addr_t rb_offset
;
623 * We're mainly waiting for the kernel to give us a faulting HVA,
624 * however we can be told to quit via userfault_quit_fd which is
628 poll_result
= poll(pfd
, pfd_len
, -1 /* Wait forever */);
629 if (poll_result
== -1) {
630 error_report("%s: userfault poll: %s", __func__
, strerror(errno
));
634 if (pfd
[1].revents
) {
637 /* Consume the signal */
638 if (read(mis
->userfault_event_fd
, &tmp64
, 8) != 8) {
639 /* Nothing obviously nicer than posting this error. */
640 error_report("%s: read() failed", __func__
);
643 if (atomic_read(&mis
->fault_thread_quit
)) {
644 trace_postcopy_ram_fault_thread_quit();
649 if (pfd
[0].revents
) {
651 ret
= read(mis
->userfault_fd
, &msg
, sizeof(msg
));
652 if (ret
!= sizeof(msg
)) {
653 if (errno
== EAGAIN
) {
655 * if a wake up happens on the other thread just after
656 * the poll, there is nothing to read.
661 error_report("%s: Failed to read full userfault "
663 __func__
, strerror(errno
));
666 error_report("%s: Read %d bytes from userfaultfd "
668 __func__
, ret
, sizeof(msg
));
669 break; /* Lost alignment, don't know what we'd read next */
672 if (msg
.event
!= UFFD_EVENT_PAGEFAULT
) {
673 error_report("%s: Read unexpected event %ud from userfaultfd",
674 __func__
, msg
.event
);
675 continue; /* It's not a page fault, shouldn't happen */
678 rb
= qemu_ram_block_from_host(
679 (void *)(uintptr_t)msg
.arg
.pagefault
.address
,
682 error_report("postcopy_ram_fault_thread: Fault outside guest: %"
683 PRIx64
, (uint64_t)msg
.arg
.pagefault
.address
);
687 rb_offset
&= ~(qemu_ram_pagesize(rb
) - 1);
688 trace_postcopy_ram_fault_thread_request(msg
.arg
.pagefault
.address
,
689 qemu_ram_get_idstr(rb
),
692 * Send the request to the source - we want to request one
693 * of our host page sizes (which is >= TPS)
695 if (rb
!= mis
->last_rb
) {
697 migrate_send_rp_req_pages(mis
, qemu_ram_get_idstr(rb
),
698 rb_offset
, qemu_ram_pagesize(rb
));
700 /* Save some space */
701 migrate_send_rp_req_pages(mis
, NULL
,
702 rb_offset
, qemu_ram_pagesize(rb
));
706 /* Now handle any requests from external processes on shared memory */
707 /* TODO: May need to handle devices deregistering during postcopy */
708 for (index
= 2; index
< pfd_len
&& poll_result
; index
++) {
709 if (pfd
[index
].revents
) {
710 struct PostCopyFD
*pcfd
=
711 &g_array_index(mis
->postcopy_remote_fds
,
712 struct PostCopyFD
, index
- 2);
715 if (pfd
[index
].revents
& POLLERR
) {
716 error_report("%s: POLLERR on poll %zd fd=%d",
717 __func__
, index
, pcfd
->fd
);
718 pfd
[index
].events
= 0;
722 ret
= read(pcfd
->fd
, &msg
, sizeof(msg
));
723 if (ret
!= sizeof(msg
)) {
724 if (errno
== EAGAIN
) {
726 * if a wake up happens on the other thread just after
727 * the poll, there is nothing to read.
732 error_report("%s: Failed to read full userfault "
733 "message: %s (shared) revents=%d",
734 __func__
, strerror(errno
),
736 /*TODO: Could just disable this sharer */
739 error_report("%s: Read %d bytes from userfaultfd "
740 "expected %zd (shared)",
741 __func__
, ret
, sizeof(msg
));
742 /*TODO: Could just disable this sharer */
743 break; /*Lost alignment,don't know what we'd read next*/
746 if (msg
.event
!= UFFD_EVENT_PAGEFAULT
) {
747 error_report("%s: Read unexpected event %ud "
748 "from userfaultfd (shared)",
749 __func__
, msg
.event
);
750 continue; /* It's not a page fault, shouldn't happen */
752 /* Call the device handler registered with us */
753 ret
= pcfd
->handler(pcfd
, &msg
);
755 error_report("%s: Failed to resolve shared fault on %zd/%s",
756 __func__
, index
, pcfd
->idstr
);
757 /* TODO: Fail? Disable this sharer? */
762 trace_postcopy_ram_fault_thread_exit();
766 int postcopy_ram_enable_notify(MigrationIncomingState
*mis
)
768 /* Open the fd for the kernel to give us userfaults */
769 mis
->userfault_fd
= syscall(__NR_userfaultfd
, O_CLOEXEC
| O_NONBLOCK
);
770 if (mis
->userfault_fd
== -1) {
771 error_report("%s: Failed to open userfault fd: %s", __func__
,
777 * Although the host check already tested the API, we need to
778 * do the check again as an ABI handshake on the new fd.
780 if (!ufd_check_and_apply(mis
->userfault_fd
, mis
)) {
784 /* Now an eventfd we use to tell the fault-thread to quit */
785 mis
->userfault_event_fd
= eventfd(0, EFD_CLOEXEC
);
786 if (mis
->userfault_event_fd
== -1) {
787 error_report("%s: Opening userfault_event_fd: %s", __func__
,
789 close(mis
->userfault_fd
);
793 qemu_sem_init(&mis
->fault_thread_sem
, 0);
794 qemu_thread_create(&mis
->fault_thread
, "postcopy/fault",
795 postcopy_ram_fault_thread
, mis
, QEMU_THREAD_JOINABLE
);
796 qemu_sem_wait(&mis
->fault_thread_sem
);
797 qemu_sem_destroy(&mis
->fault_thread_sem
);
798 mis
->have_fault_thread
= true;
800 /* Mark so that we get notified of accesses to unwritten areas */
801 if (qemu_ram_foreach_block(ram_block_enable_notify
, mis
)) {
806 * Ballooning can mark pages as absent while we're postcopying
807 * that would cause false userfaults.
809 qemu_balloon_inhibit(true);
811 trace_postcopy_ram_enable_notify();
816 static int qemu_ufd_copy_ioctl(int userfault_fd
, void *host_addr
,
817 void *from_addr
, uint64_t pagesize
, RAMBlock
*rb
)
821 struct uffdio_copy copy_struct
;
822 copy_struct
.dst
= (uint64_t)(uintptr_t)host_addr
;
823 copy_struct
.src
= (uint64_t)(uintptr_t)from_addr
;
824 copy_struct
.len
= pagesize
;
825 copy_struct
.mode
= 0;
826 ret
= ioctl(userfault_fd
, UFFDIO_COPY
, ©_struct
);
828 struct uffdio_zeropage zero_struct
;
829 zero_struct
.range
.start
= (uint64_t)(uintptr_t)host_addr
;
830 zero_struct
.range
.len
= pagesize
;
831 zero_struct
.mode
= 0;
832 ret
= ioctl(userfault_fd
, UFFDIO_ZEROPAGE
, &zero_struct
);
835 ramblock_recv_bitmap_set_range(rb
, host_addr
,
836 pagesize
/ qemu_target_page_size());
841 int postcopy_notify_shared_wake(RAMBlock
*rb
, uint64_t offset
)
844 MigrationIncomingState
*mis
= migration_incoming_get_current();
845 GArray
*pcrfds
= mis
->postcopy_remote_fds
;
847 for (i
= 0; i
< pcrfds
->len
; i
++) {
848 struct PostCopyFD
*cur
= &g_array_index(pcrfds
, struct PostCopyFD
, i
);
849 int ret
= cur
->waker(cur
, rb
, offset
);
858 * Place a host page (from) at (host) atomically
859 * returns 0 on success
861 int postcopy_place_page(MigrationIncomingState
*mis
, void *host
, void *from
,
864 size_t pagesize
= qemu_ram_pagesize(rb
);
866 /* copy also acks to the kernel waking the stalled thread up
867 * TODO: We can inhibit that ack and only do it if it was requested
868 * which would be slightly cheaper, but we'd have to be careful
869 * of the order of updating our page state.
871 if (qemu_ufd_copy_ioctl(mis
->userfault_fd
, host
, from
, pagesize
, rb
)) {
873 error_report("%s: %s copy host: %p from: %p (size: %zd)",
874 __func__
, strerror(e
), host
, from
, pagesize
);
879 trace_postcopy_place_page(host
);
880 return postcopy_notify_shared_wake(rb
,
881 qemu_ram_block_host_offset(rb
, host
));
885 * Place a zero page at (host) atomically
886 * returns 0 on success
888 int postcopy_place_page_zero(MigrationIncomingState
*mis
, void *host
,
891 size_t pagesize
= qemu_ram_pagesize(rb
);
892 trace_postcopy_place_page_zero(host
);
894 /* Normal RAMBlocks can zero a page using UFFDIO_ZEROPAGE
895 * but it's not available for everything (e.g. hugetlbpages)
897 if (qemu_ram_is_uf_zeroable(rb
)) {
898 if (qemu_ufd_copy_ioctl(mis
->userfault_fd
, host
, NULL
, pagesize
, rb
)) {
900 error_report("%s: %s zero host: %p",
901 __func__
, strerror(e
), host
);
905 return postcopy_notify_shared_wake(rb
,
906 qemu_ram_block_host_offset(rb
,
909 /* The kernel can't use UFFDIO_ZEROPAGE for hugepages */
910 if (!mis
->postcopy_tmp_zero_page
) {
911 mis
->postcopy_tmp_zero_page
= mmap(NULL
, mis
->largest_page_size
,
912 PROT_READ
| PROT_WRITE
,
913 MAP_PRIVATE
| MAP_ANONYMOUS
,
915 if (mis
->postcopy_tmp_zero_page
== MAP_FAILED
) {
917 mis
->postcopy_tmp_zero_page
= NULL
;
918 error_report("%s: %s mapping large zero page",
919 __func__
, strerror(e
));
922 memset(mis
->postcopy_tmp_zero_page
, '\0', mis
->largest_page_size
);
924 return postcopy_place_page(mis
, host
, mis
->postcopy_tmp_zero_page
,
930 * Returns a target page of memory that can be mapped at a later point in time
931 * using postcopy_place_page
932 * The same address is used repeatedly, postcopy_place_page just takes the
934 * Returns: Pointer to allocated page
937 void *postcopy_get_tmp_page(MigrationIncomingState
*mis
)
939 if (!mis
->postcopy_tmp_page
) {
940 mis
->postcopy_tmp_page
= mmap(NULL
, mis
->largest_page_size
,
941 PROT_READ
| PROT_WRITE
, MAP_PRIVATE
|
942 MAP_ANONYMOUS
, -1, 0);
943 if (mis
->postcopy_tmp_page
== MAP_FAILED
) {
944 mis
->postcopy_tmp_page
= NULL
;
945 error_report("%s: %s", __func__
, strerror(errno
));
950 return mis
->postcopy_tmp_page
;
954 /* No target OS support, stubs just fail */
955 bool postcopy_ram_supported_by_host(MigrationIncomingState
*mis
)
957 error_report("%s: No OS support", __func__
);
961 int postcopy_ram_incoming_init(MigrationIncomingState
*mis
, size_t ram_pages
)
963 error_report("postcopy_ram_incoming_init: No OS support");
967 int postcopy_ram_incoming_cleanup(MigrationIncomingState
*mis
)
973 int postcopy_ram_prepare_discard(MigrationIncomingState
*mis
)
979 int postcopy_request_shared_page(struct PostCopyFD
*pcfd
, RAMBlock
*rb
,
980 uint64_t client_addr
, uint64_t rb_offset
)
986 int postcopy_ram_enable_notify(MigrationIncomingState
*mis
)
992 int postcopy_place_page(MigrationIncomingState
*mis
, void *host
, void *from
,
999 int postcopy_place_page_zero(MigrationIncomingState
*mis
, void *host
,
1006 void *postcopy_get_tmp_page(MigrationIncomingState
*mis
)
1012 int postcopy_wake_shared(struct PostCopyFD
*pcfd
,
1013 uint64_t client_addr
,
1021 /* ------------------------------------------------------------------------- */
1023 void postcopy_fault_thread_notify(MigrationIncomingState
*mis
)
1028 * Wakeup the fault_thread. It's an eventfd that should currently
1029 * be at 0, we're going to increment it to 1
1031 if (write(mis
->userfault_event_fd
, &tmp64
, 8) != 8) {
1032 /* Not much we can do here, but may as well report it */
1033 error_report("%s: incrementing failed: %s", __func__
,
1039 * postcopy_discard_send_init: Called at the start of each RAMBlock before
1040 * asking to discard individual ranges.
1042 * @ms: The current migration state.
1043 * @offset: the bitmap offset of the named RAMBlock in the migration
1045 * @name: RAMBlock that discards will operate on.
1047 * returns: a new PDS.
1049 PostcopyDiscardState
*postcopy_discard_send_init(MigrationState
*ms
,
1052 PostcopyDiscardState
*res
= g_malloc0(sizeof(PostcopyDiscardState
));
1055 res
->ramblock_name
= name
;
1062 * postcopy_discard_send_range: Called by the bitmap code for each chunk to
1063 * discard. May send a discard message, may just leave it queued to
1066 * @ms: Current migration state.
1067 * @pds: Structure initialised by postcopy_discard_send_init().
1068 * @start,@length: a range of pages in the migration bitmap in the
1069 * RAM block passed to postcopy_discard_send_init() (length=1 is one page)
1071 void postcopy_discard_send_range(MigrationState
*ms
, PostcopyDiscardState
*pds
,
1072 unsigned long start
, unsigned long length
)
1074 size_t tp_size
= qemu_target_page_size();
1075 /* Convert to byte offsets within the RAM block */
1076 pds
->start_list
[pds
->cur_entry
] = start
* tp_size
;
1077 pds
->length_list
[pds
->cur_entry
] = length
* tp_size
;
1078 trace_postcopy_discard_send_range(pds
->ramblock_name
, start
, length
);
1082 if (pds
->cur_entry
== MAX_DISCARDS_PER_COMMAND
) {
1083 /* Full set, ship it! */
1084 qemu_savevm_send_postcopy_ram_discard(ms
->to_dst_file
,
1095 * postcopy_discard_send_finish: Called at the end of each RAMBlock by the
1096 * bitmap code. Sends any outstanding discard messages, frees the PDS
1098 * @ms: Current migration state.
1099 * @pds: Structure initialised by postcopy_discard_send_init().
1101 void postcopy_discard_send_finish(MigrationState
*ms
, PostcopyDiscardState
*pds
)
1103 /* Anything unsent? */
1104 if (pds
->cur_entry
) {
1105 qemu_savevm_send_postcopy_ram_discard(ms
->to_dst_file
,
1113 trace_postcopy_discard_send_finish(pds
->ramblock_name
, pds
->nsentwords
,
1120 * Current state of incoming postcopy; note this is not part of
1121 * MigrationIncomingState since it's state is used during cleanup
1122 * at the end as MIS is being freed.
1124 static PostcopyState incoming_postcopy_state
;
1126 PostcopyState
postcopy_state_get(void)
1128 return atomic_mb_read(&incoming_postcopy_state
);
1131 /* Set the state and return the old state */
1132 PostcopyState
postcopy_state_set(PostcopyState new_state
)
1134 return atomic_xchg(&incoming_postcopy_state
, new_state
);
1137 /* Register a handler for external shared memory postcopy
1138 * called on the destination.
1140 void postcopy_register_shared_ufd(struct PostCopyFD
*pcfd
)
1142 MigrationIncomingState
*mis
= migration_incoming_get_current();
1144 mis
->postcopy_remote_fds
= g_array_append_val(mis
->postcopy_remote_fds
,
1148 /* Unregister a handler for external shared memory postcopy
1150 void postcopy_unregister_shared_ufd(struct PostCopyFD
*pcfd
)
1153 MigrationIncomingState
*mis
= migration_incoming_get_current();
1154 GArray
*pcrfds
= mis
->postcopy_remote_fds
;
1156 for (i
= 0; i
< pcrfds
->len
; i
++) {
1157 struct PostCopyFD
*cur
= &g_array_index(pcrfds
, struct PostCopyFD
, i
);
1158 if (cur
->fd
== pcfd
->fd
) {
1159 mis
->postcopy_remote_fds
= g_array_remove_index(pcrfds
, i
);