2 * Postcopy migration for RAM
4 * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates
7 * Dave Gilbert <dgilbert@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
15 * Postcopy is a migration technique where the execution flips from the
16 * source to the destination before all the data has been copied.
19 #include "qemu/osdep.h"
21 #include "qemu-common.h"
22 #include "exec/target_page.h"
23 #include "migration/migration.h"
24 #include "migration/qemu-file.h"
25 #include "postcopy-ram.h"
26 #include "sysemu/sysemu.h"
27 #include "sysemu/balloon.h"
28 #include "qemu/error-report.h"
31 /* Arbitrary limit on size of each discard command,
32 * keeps them around ~200 bytes
34 #define MAX_DISCARDS_PER_COMMAND 12
36 struct PostcopyDiscardState
{
37 const char *ramblock_name
;
40 * Start and length of a discard range (bytes)
42 uint64_t start_list
[MAX_DISCARDS_PER_COMMAND
];
43 uint64_t length_list
[MAX_DISCARDS_PER_COMMAND
];
44 unsigned int nsentwords
;
45 unsigned int nsentcmds
;
48 /* Postcopy needs to detect accesses to pages that haven't yet been copied
49 * across, and efficiently map new pages in, the techniques for doing this
50 * are target OS specific.
52 #if defined(__linux__)
55 #include <sys/ioctl.h>
56 #include <sys/syscall.h>
57 #include <asm/types.h> /* for __u64 */
60 #if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD)
61 #include <sys/eventfd.h>
62 #include <linux/userfaultfd.h>
64 static bool ufd_version_check(int ufd
)
66 struct uffdio_api api_struct
;
69 api_struct
.api
= UFFD_API
;
70 api_struct
.features
= 0;
71 if (ioctl(ufd
, UFFDIO_API
, &api_struct
)) {
72 error_report("postcopy_ram_supported_by_host: UFFDIO_API failed: %s",
77 ioctl_mask
= (__u64
)1 << _UFFDIO_REGISTER
|
78 (__u64
)1 << _UFFDIO_UNREGISTER
;
79 if ((api_struct
.ioctls
& ioctl_mask
) != ioctl_mask
) {
80 error_report("Missing userfault features: %" PRIx64
,
81 (uint64_t)(~api_struct
.ioctls
& ioctl_mask
));
85 if (getpagesize() != ram_pagesize_summary()) {
87 /* We've got a huge page */
88 #ifdef UFFD_FEATURE_MISSING_HUGETLBFS
89 have_hp
= api_struct
.features
& UFFD_FEATURE_MISSING_HUGETLBFS
;
92 error_report("Userfault on this host does not support huge pages");
99 /* Callback from postcopy_ram_supported_by_host block iterator.
101 static int test_ramblock_postcopiable(const char *block_name
, void *host_addr
,
102 ram_addr_t offset
, ram_addr_t length
, void *opaque
)
104 RAMBlock
*rb
= qemu_ram_block_by_name(block_name
);
105 size_t pagesize
= qemu_ram_pagesize(rb
);
107 if (qemu_ram_is_shared(rb
)) {
108 error_report("Postcopy on shared RAM (%s) is not yet supported",
113 if (length
% pagesize
) {
114 error_report("Postcopy requires RAM blocks to be a page size multiple,"
115 " block %s is 0x" RAM_ADDR_FMT
" bytes with a "
116 "page size of 0x%zx", block_name
, length
, pagesize
);
123 * Note: This has the side effect of munlock'ing all of RAM, that's
124 * normally fine since if the postcopy succeeds it gets turned back on at the
127 bool postcopy_ram_supported_by_host(void)
129 long pagesize
= getpagesize();
131 bool ret
= false; /* Error unless we change it */
132 void *testarea
= NULL
;
133 struct uffdio_register reg_struct
;
134 struct uffdio_range range_struct
;
135 uint64_t feature_mask
;
137 if (qemu_target_page_size() > pagesize
) {
138 error_report("Target page size bigger than host page size");
142 ufd
= syscall(__NR_userfaultfd
, O_CLOEXEC
);
144 error_report("%s: userfaultfd not available: %s", __func__
,
149 /* Version and features check */
150 if (!ufd_version_check(ufd
)) {
154 /* We don't support postcopy with shared RAM yet */
155 if (qemu_ram_foreach_block(test_ramblock_postcopiable
, NULL
)) {
160 * userfault and mlock don't go together; we'll put it back later if
164 error_report("%s: munlockall: %s", __func__
, strerror(errno
));
169 * We need to check that the ops we need are supported on anon memory
170 * To do that we need to register a chunk and see the flags that
173 testarea
= mmap(NULL
, pagesize
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
|
174 MAP_ANONYMOUS
, -1, 0);
175 if (testarea
== MAP_FAILED
) {
176 error_report("%s: Failed to map test area: %s", __func__
,
180 g_assert(((size_t)testarea
& (pagesize
-1)) == 0);
182 reg_struct
.range
.start
= (uintptr_t)testarea
;
183 reg_struct
.range
.len
= pagesize
;
184 reg_struct
.mode
= UFFDIO_REGISTER_MODE_MISSING
;
186 if (ioctl(ufd
, UFFDIO_REGISTER
, ®_struct
)) {
187 error_report("%s userfault register: %s", __func__
, strerror(errno
));
191 range_struct
.start
= (uintptr_t)testarea
;
192 range_struct
.len
= pagesize
;
193 if (ioctl(ufd
, UFFDIO_UNREGISTER
, &range_struct
)) {
194 error_report("%s userfault unregister: %s", __func__
, strerror(errno
));
198 feature_mask
= (__u64
)1 << _UFFDIO_WAKE
|
199 (__u64
)1 << _UFFDIO_COPY
|
200 (__u64
)1 << _UFFDIO_ZEROPAGE
;
201 if ((reg_struct
.ioctls
& feature_mask
) != feature_mask
) {
202 error_report("Missing userfault map features: %" PRIx64
,
203 (uint64_t)(~reg_struct
.ioctls
& feature_mask
));
211 munmap(testarea
, pagesize
);
220 * Setup an area of RAM so that it *can* be used for postcopy later; this
221 * must be done right at the start prior to pre-copy.
222 * opaque should be the MIS.
224 static int init_range(const char *block_name
, void *host_addr
,
225 ram_addr_t offset
, ram_addr_t length
, void *opaque
)
227 trace_postcopy_init_range(block_name
, host_addr
, offset
, length
);
230 * We need the whole of RAM to be truly empty for postcopy, so things
231 * like ROMs and any data tables built during init must be zero'd
232 * - we're going to get the copy from the source anyway.
233 * (Precopy will just overwrite this data, so doesn't need the discard)
235 if (ram_discard_range(block_name
, 0, length
)) {
243 * At the end of migration, undo the effects of init_range
244 * opaque should be the MIS.
246 static int cleanup_range(const char *block_name
, void *host_addr
,
247 ram_addr_t offset
, ram_addr_t length
, void *opaque
)
249 MigrationIncomingState
*mis
= opaque
;
250 struct uffdio_range range_struct
;
251 trace_postcopy_cleanup_range(block_name
, host_addr
, offset
, length
);
254 * We turned off hugepage for the precopy stage with postcopy enabled
255 * we can turn it back on now.
257 qemu_madvise(host_addr
, length
, QEMU_MADV_HUGEPAGE
);
260 * We can also turn off userfault now since we should have all the
261 * pages. It can be useful to leave it on to debug postcopy
262 * if you're not sure it's always getting every page.
264 range_struct
.start
= (uintptr_t)host_addr
;
265 range_struct
.len
= length
;
267 if (ioctl(mis
->userfault_fd
, UFFDIO_UNREGISTER
, &range_struct
)) {
268 error_report("%s: userfault unregister %s", __func__
, strerror(errno
));
277 * Initialise postcopy-ram, setting the RAM to a state where we can go into
278 * postcopy later; must be called prior to any precopy.
279 * called from arch_init's similarly named ram_postcopy_incoming_init
281 int postcopy_ram_incoming_init(MigrationIncomingState
*mis
, size_t ram_pages
)
283 if (qemu_ram_foreach_block(init_range
, NULL
)) {
291 * At the end of a migration where postcopy_ram_incoming_init was called.
293 int postcopy_ram_incoming_cleanup(MigrationIncomingState
*mis
)
295 trace_postcopy_ram_incoming_cleanup_entry();
297 if (mis
->have_fault_thread
) {
300 if (qemu_ram_foreach_block(cleanup_range
, mis
)) {
304 * Tell the fault_thread to exit, it's an eventfd that should
305 * currently be at 0, we're going to increment it to 1
308 if (write(mis
->userfault_quit_fd
, &tmp64
, 8) == 8) {
309 trace_postcopy_ram_incoming_cleanup_join();
310 qemu_thread_join(&mis
->fault_thread
);
312 /* Not much we can do here, but may as well report it */
313 error_report("%s: incrementing userfault_quit_fd: %s", __func__
,
316 trace_postcopy_ram_incoming_cleanup_closeuf();
317 close(mis
->userfault_fd
);
318 close(mis
->userfault_quit_fd
);
319 mis
->have_fault_thread
= false;
322 qemu_balloon_inhibit(false);
325 if (os_mlock() < 0) {
326 error_report("mlock: %s", strerror(errno
));
328 * It doesn't feel right to fail at this point, we have a valid
334 postcopy_state_set(POSTCOPY_INCOMING_END
);
335 migrate_send_rp_shut(mis
, qemu_file_get_error(mis
->from_src_file
) != 0);
337 if (mis
->postcopy_tmp_page
) {
338 munmap(mis
->postcopy_tmp_page
, mis
->largest_page_size
);
339 mis
->postcopy_tmp_page
= NULL
;
341 if (mis
->postcopy_tmp_zero_page
) {
342 munmap(mis
->postcopy_tmp_zero_page
, mis
->largest_page_size
);
343 mis
->postcopy_tmp_zero_page
= NULL
;
345 trace_postcopy_ram_incoming_cleanup_exit();
350 * Disable huge pages on an area
352 static int nhp_range(const char *block_name
, void *host_addr
,
353 ram_addr_t offset
, ram_addr_t length
, void *opaque
)
355 trace_postcopy_nhp_range(block_name
, host_addr
, offset
, length
);
358 * Before we do discards we need to ensure those discards really
359 * do delete areas of the page, even if THP thinks a hugepage would
360 * be a good idea, so force hugepages off.
362 qemu_madvise(host_addr
, length
, QEMU_MADV_NOHUGEPAGE
);
368 * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard
369 * however leaving it until after precopy means that most of the precopy
372 int postcopy_ram_prepare_discard(MigrationIncomingState
*mis
)
374 if (qemu_ram_foreach_block(nhp_range
, mis
)) {
378 postcopy_state_set(POSTCOPY_INCOMING_DISCARD
);
384 * Mark the given area of RAM as requiring notification to unwritten areas
385 * Used as a callback on qemu_ram_foreach_block.
386 * host_addr: Base of area to mark
387 * offset: Offset in the whole ram arena
388 * length: Length of the section
389 * opaque: MigrationIncomingState pointer
390 * Returns 0 on success
392 static int ram_block_enable_notify(const char *block_name
, void *host_addr
,
393 ram_addr_t offset
, ram_addr_t length
,
396 MigrationIncomingState
*mis
= opaque
;
397 struct uffdio_register reg_struct
;
399 reg_struct
.range
.start
= (uintptr_t)host_addr
;
400 reg_struct
.range
.len
= length
;
401 reg_struct
.mode
= UFFDIO_REGISTER_MODE_MISSING
;
403 /* Now tell our userfault_fd that it's responsible for this area */
404 if (ioctl(mis
->userfault_fd
, UFFDIO_REGISTER
, ®_struct
)) {
405 error_report("%s userfault register: %s", __func__
, strerror(errno
));
408 if (!(reg_struct
.ioctls
& ((__u64
)1 << _UFFDIO_COPY
))) {
409 error_report("%s userfault: Region doesn't support COPY", __func__
);
417 * Handle faults detected by the USERFAULT markings
419 static void *postcopy_ram_fault_thread(void *opaque
)
421 MigrationIncomingState
*mis
= opaque
;
425 RAMBlock
*last_rb
= NULL
; /* last RAMBlock we sent part of */
427 trace_postcopy_ram_fault_thread_entry();
428 qemu_sem_post(&mis
->fault_thread_sem
);
431 ram_addr_t rb_offset
;
432 struct pollfd pfd
[2];
435 * We're mainly waiting for the kernel to give us a faulting HVA,
436 * however we can be told to quit via userfault_quit_fd which is
439 pfd
[0].fd
= mis
->userfault_fd
;
440 pfd
[0].events
= POLLIN
;
442 pfd
[1].fd
= mis
->userfault_quit_fd
;
443 pfd
[1].events
= POLLIN
; /* Waiting for eventfd to go positive */
446 if (poll(pfd
, 2, -1 /* Wait forever */) == -1) {
447 error_report("%s: userfault poll: %s", __func__
, strerror(errno
));
451 if (pfd
[1].revents
) {
452 trace_postcopy_ram_fault_thread_quit();
456 ret
= read(mis
->userfault_fd
, &msg
, sizeof(msg
));
457 if (ret
!= sizeof(msg
)) {
458 if (errno
== EAGAIN
) {
460 * if a wake up happens on the other thread just after
461 * the poll, there is nothing to read.
466 error_report("%s: Failed to read full userfault message: %s",
467 __func__
, strerror(errno
));
470 error_report("%s: Read %d bytes from userfaultfd expected %zd",
471 __func__
, ret
, sizeof(msg
));
472 break; /* Lost alignment, don't know what we'd read next */
475 if (msg
.event
!= UFFD_EVENT_PAGEFAULT
) {
476 error_report("%s: Read unexpected event %ud from userfaultfd",
477 __func__
, msg
.event
);
478 continue; /* It's not a page fault, shouldn't happen */
481 rb
= qemu_ram_block_from_host(
482 (void *)(uintptr_t)msg
.arg
.pagefault
.address
,
485 error_report("postcopy_ram_fault_thread: Fault outside guest: %"
486 PRIx64
, (uint64_t)msg
.arg
.pagefault
.address
);
490 rb_offset
&= ~(qemu_ram_pagesize(rb
) - 1);
491 trace_postcopy_ram_fault_thread_request(msg
.arg
.pagefault
.address
,
492 qemu_ram_get_idstr(rb
),
496 * Send the request to the source - we want to request one
497 * of our host page sizes (which is >= TPS)
501 migrate_send_rp_req_pages(mis
, qemu_ram_get_idstr(rb
),
502 rb_offset
, qemu_ram_pagesize(rb
));
504 /* Save some space */
505 migrate_send_rp_req_pages(mis
, NULL
,
506 rb_offset
, qemu_ram_pagesize(rb
));
509 trace_postcopy_ram_fault_thread_exit();
513 int postcopy_ram_enable_notify(MigrationIncomingState
*mis
)
515 /* Open the fd for the kernel to give us userfaults */
516 mis
->userfault_fd
= syscall(__NR_userfaultfd
, O_CLOEXEC
| O_NONBLOCK
);
517 if (mis
->userfault_fd
== -1) {
518 error_report("%s: Failed to open userfault fd: %s", __func__
,
524 * Although the host check already tested the API, we need to
525 * do the check again as an ABI handshake on the new fd.
527 if (!ufd_version_check(mis
->userfault_fd
)) {
531 /* Now an eventfd we use to tell the fault-thread to quit */
532 mis
->userfault_quit_fd
= eventfd(0, EFD_CLOEXEC
);
533 if (mis
->userfault_quit_fd
== -1) {
534 error_report("%s: Opening userfault_quit_fd: %s", __func__
,
536 close(mis
->userfault_fd
);
540 qemu_sem_init(&mis
->fault_thread_sem
, 0);
541 qemu_thread_create(&mis
->fault_thread
, "postcopy/fault",
542 postcopy_ram_fault_thread
, mis
, QEMU_THREAD_JOINABLE
);
543 qemu_sem_wait(&mis
->fault_thread_sem
);
544 qemu_sem_destroy(&mis
->fault_thread_sem
);
545 mis
->have_fault_thread
= true;
547 /* Mark so that we get notified of accesses to unwritten areas */
548 if (qemu_ram_foreach_block(ram_block_enable_notify
, mis
)) {
553 * Ballooning can mark pages as absent while we're postcopying
554 * that would cause false userfaults.
556 qemu_balloon_inhibit(true);
558 trace_postcopy_ram_enable_notify();
564 * Place a host page (from) at (host) atomically
565 * returns 0 on success
567 int postcopy_place_page(MigrationIncomingState
*mis
, void *host
, void *from
,
570 struct uffdio_copy copy_struct
;
572 copy_struct
.dst
= (uint64_t)(uintptr_t)host
;
573 copy_struct
.src
= (uint64_t)(uintptr_t)from
;
574 copy_struct
.len
= pagesize
;
575 copy_struct
.mode
= 0;
577 /* copy also acks to the kernel waking the stalled thread up
578 * TODO: We can inhibit that ack and only do it if it was requested
579 * which would be slightly cheaper, but we'd have to be careful
580 * of the order of updating our page state.
582 if (ioctl(mis
->userfault_fd
, UFFDIO_COPY
, ©_struct
)) {
584 error_report("%s: %s copy host: %p from: %p (size: %zd)",
585 __func__
, strerror(e
), host
, from
, pagesize
);
590 trace_postcopy_place_page(host
);
595 * Place a zero page at (host) atomically
596 * returns 0 on success
598 int postcopy_place_page_zero(MigrationIncomingState
*mis
, void *host
,
601 trace_postcopy_place_page_zero(host
);
603 if (pagesize
== getpagesize()) {
604 struct uffdio_zeropage zero_struct
;
605 zero_struct
.range
.start
= (uint64_t)(uintptr_t)host
;
606 zero_struct
.range
.len
= getpagesize();
607 zero_struct
.mode
= 0;
609 if (ioctl(mis
->userfault_fd
, UFFDIO_ZEROPAGE
, &zero_struct
)) {
611 error_report("%s: %s zero host: %p",
612 __func__
, strerror(e
), host
);
617 /* The kernel can't use UFFDIO_ZEROPAGE for hugepages */
618 if (!mis
->postcopy_tmp_zero_page
) {
619 mis
->postcopy_tmp_zero_page
= mmap(NULL
, mis
->largest_page_size
,
620 PROT_READ
| PROT_WRITE
,
621 MAP_PRIVATE
| MAP_ANONYMOUS
,
623 if (mis
->postcopy_tmp_zero_page
== MAP_FAILED
) {
625 mis
->postcopy_tmp_zero_page
= NULL
;
626 error_report("%s: %s mapping large zero page",
627 __func__
, strerror(e
));
630 memset(mis
->postcopy_tmp_zero_page
, '\0', mis
->largest_page_size
);
632 return postcopy_place_page(mis
, host
, mis
->postcopy_tmp_zero_page
,
640 * Returns a target page of memory that can be mapped at a later point in time
641 * using postcopy_place_page
642 * The same address is used repeatedly, postcopy_place_page just takes the
644 * Returns: Pointer to allocated page
647 void *postcopy_get_tmp_page(MigrationIncomingState
*mis
)
649 if (!mis
->postcopy_tmp_page
) {
650 mis
->postcopy_tmp_page
= mmap(NULL
, mis
->largest_page_size
,
651 PROT_READ
| PROT_WRITE
, MAP_PRIVATE
|
652 MAP_ANONYMOUS
, -1, 0);
653 if (mis
->postcopy_tmp_page
== MAP_FAILED
) {
654 mis
->postcopy_tmp_page
= NULL
;
655 error_report("%s: %s", __func__
, strerror(errno
));
660 return mis
->postcopy_tmp_page
;
664 /* No target OS support, stubs just fail */
665 bool postcopy_ram_supported_by_host(void)
667 error_report("%s: No OS support", __func__
);
671 int postcopy_ram_incoming_init(MigrationIncomingState
*mis
, size_t ram_pages
)
673 error_report("postcopy_ram_incoming_init: No OS support");
677 int postcopy_ram_incoming_cleanup(MigrationIncomingState
*mis
)
683 int postcopy_ram_prepare_discard(MigrationIncomingState
*mis
)
689 int postcopy_ram_enable_notify(MigrationIncomingState
*mis
)
695 int postcopy_place_page(MigrationIncomingState
*mis
, void *host
, void *from
,
702 int postcopy_place_page_zero(MigrationIncomingState
*mis
, void *host
,
709 void *postcopy_get_tmp_page(MigrationIncomingState
*mis
)
717 /* ------------------------------------------------------------------------- */
720 * postcopy_discard_send_init: Called at the start of each RAMBlock before
721 * asking to discard individual ranges.
723 * @ms: The current migration state.
724 * @offset: the bitmap offset of the named RAMBlock in the migration
726 * @name: RAMBlock that discards will operate on.
728 * returns: a new PDS.
730 PostcopyDiscardState
*postcopy_discard_send_init(MigrationState
*ms
,
733 PostcopyDiscardState
*res
= g_malloc0(sizeof(PostcopyDiscardState
));
736 res
->ramblock_name
= name
;
743 * postcopy_discard_send_range: Called by the bitmap code for each chunk to
744 * discard. May send a discard message, may just leave it queued to
747 * @ms: Current migration state.
748 * @pds: Structure initialised by postcopy_discard_send_init().
749 * @start,@length: a range of pages in the migration bitmap in the
750 * RAM block passed to postcopy_discard_send_init() (length=1 is one page)
752 void postcopy_discard_send_range(MigrationState
*ms
, PostcopyDiscardState
*pds
,
753 unsigned long start
, unsigned long length
)
755 size_t tp_size
= qemu_target_page_size();
756 /* Convert to byte offsets within the RAM block */
757 pds
->start_list
[pds
->cur_entry
] = start
* tp_size
;
758 pds
->length_list
[pds
->cur_entry
] = length
* tp_size
;
759 trace_postcopy_discard_send_range(pds
->ramblock_name
, start
, length
);
763 if (pds
->cur_entry
== MAX_DISCARDS_PER_COMMAND
) {
764 /* Full set, ship it! */
765 qemu_savevm_send_postcopy_ram_discard(ms
->to_dst_file
,
776 * postcopy_discard_send_finish: Called at the end of each RAMBlock by the
777 * bitmap code. Sends any outstanding discard messages, frees the PDS
779 * @ms: Current migration state.
780 * @pds: Structure initialised by postcopy_discard_send_init().
782 void postcopy_discard_send_finish(MigrationState
*ms
, PostcopyDiscardState
*pds
)
784 /* Anything unsent? */
785 if (pds
->cur_entry
) {
786 qemu_savevm_send_postcopy_ram_discard(ms
->to_dst_file
,
794 trace_postcopy_discard_send_finish(pds
->ramblock_name
, pds
->nsentwords
,
801 * Current state of incoming postcopy; note this is not part of
802 * MigrationIncomingState since it's state is used during cleanup
803 * at the end as MIS is being freed.
805 static PostcopyState incoming_postcopy_state
;
807 PostcopyState
postcopy_state_get(void)
809 return atomic_mb_read(&incoming_postcopy_state
);
812 /* Set the state and return the old state */
813 PostcopyState
postcopy_state_set(PostcopyState new_state
)
815 return atomic_xchg(&incoming_postcopy_state
, new_state
);