2 * Postcopy migration for RAM
4 * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates
7 * Dave Gilbert <dgilbert@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
15 * Postcopy is a migration technique where the execution flips from the
16 * source to the destination before all the data has been copied.
23 #include "qemu-common.h"
24 #include "migration/migration.h"
25 #include "migration/postcopy-ram.h"
26 #include "sysemu/sysemu.h"
27 #include "sysemu/balloon.h"
28 #include "qemu/error-report.h"
31 /* Arbitrary limit on size of each discard command,
32 * keeps them around ~200 bytes
34 #define MAX_DISCARDS_PER_COMMAND 12
36 struct PostcopyDiscardState
{
37 const char *ramblock_name
;
38 uint64_t offset
; /* Bitmap entry for the 1st bit of this RAMBlock */
41 * Start and length of a discard range (bytes)
43 uint64_t start_list
[MAX_DISCARDS_PER_COMMAND
];
44 uint64_t length_list
[MAX_DISCARDS_PER_COMMAND
];
45 unsigned int nsentwords
;
46 unsigned int nsentcmds
;
49 /* Postcopy needs to detect accesses to pages that haven't yet been copied
50 * across, and efficiently map new pages in, the techniques for doing this
51 * are target OS specific.
53 #if defined(__linux__)
56 #include <sys/eventfd.h>
58 #include <sys/ioctl.h>
59 #include <sys/syscall.h>
60 #include <sys/types.h>
61 #include <asm/types.h> /* for __u64 */
64 #if defined(__linux__) && defined(__NR_userfaultfd)
65 #include <linux/userfaultfd.h>
67 static bool ufd_version_check(int ufd
)
69 struct uffdio_api api_struct
;
72 api_struct
.api
= UFFD_API
;
73 api_struct
.features
= 0;
74 if (ioctl(ufd
, UFFDIO_API
, &api_struct
)) {
75 error_report("postcopy_ram_supported_by_host: UFFDIO_API failed: %s",
80 ioctl_mask
= (__u64
)1 << _UFFDIO_REGISTER
|
81 (__u64
)1 << _UFFDIO_UNREGISTER
;
82 if ((api_struct
.ioctls
& ioctl_mask
) != ioctl_mask
) {
83 error_report("Missing userfault features: %" PRIx64
,
84 (uint64_t)(~api_struct
.ioctls
& ioctl_mask
));
92 * Note: This has the side effect of munlock'ing all of RAM, that's
93 * normally fine since if the postcopy succeeds it gets turned back on at the
96 bool postcopy_ram_supported_by_host(void)
98 long pagesize
= getpagesize();
100 bool ret
= false; /* Error unless we change it */
101 void *testarea
= NULL
;
102 struct uffdio_register reg_struct
;
103 struct uffdio_range range_struct
;
104 uint64_t feature_mask
;
106 if ((1ul << qemu_target_page_bits()) > pagesize
) {
107 error_report("Target page size bigger than host page size");
111 ufd
= syscall(__NR_userfaultfd
, O_CLOEXEC
);
113 error_report("%s: userfaultfd not available: %s", __func__
,
118 /* Version and features check */
119 if (!ufd_version_check(ufd
)) {
124 * userfault and mlock don't go together; we'll put it back later if
128 error_report("%s: munlockall: %s", __func__
, strerror(errno
));
133 * We need to check that the ops we need are supported on anon memory
134 * To do that we need to register a chunk and see the flags that
137 testarea
= mmap(NULL
, pagesize
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
|
138 MAP_ANONYMOUS
, -1, 0);
139 if (testarea
== MAP_FAILED
) {
140 error_report("%s: Failed to map test area: %s", __func__
,
144 g_assert(((size_t)testarea
& (pagesize
-1)) == 0);
146 reg_struct
.range
.start
= (uintptr_t)testarea
;
147 reg_struct
.range
.len
= pagesize
;
148 reg_struct
.mode
= UFFDIO_REGISTER_MODE_MISSING
;
150 if (ioctl(ufd
, UFFDIO_REGISTER
, ®_struct
)) {
151 error_report("%s userfault register: %s", __func__
, strerror(errno
));
155 range_struct
.start
= (uintptr_t)testarea
;
156 range_struct
.len
= pagesize
;
157 if (ioctl(ufd
, UFFDIO_UNREGISTER
, &range_struct
)) {
158 error_report("%s userfault unregister: %s", __func__
, strerror(errno
));
162 feature_mask
= (__u64
)1 << _UFFDIO_WAKE
|
163 (__u64
)1 << _UFFDIO_COPY
|
164 (__u64
)1 << _UFFDIO_ZEROPAGE
;
165 if ((reg_struct
.ioctls
& feature_mask
) != feature_mask
) {
166 error_report("Missing userfault map features: %" PRIx64
,
167 (uint64_t)(~reg_struct
.ioctls
& feature_mask
));
175 munmap(testarea
, pagesize
);
184 * postcopy_ram_discard_range: Discard a range of memory.
185 * We can assume that if we've been called postcopy_ram_hosttest returned true.
187 * @mis: Current incoming migration state.
188 * @start, @length: range of memory to discard.
190 * returns: 0 on success.
192 int postcopy_ram_discard_range(MigrationIncomingState
*mis
, uint8_t *start
,
195 trace_postcopy_ram_discard_range(start
, length
);
196 if (madvise(start
, length
, MADV_DONTNEED
)) {
197 error_report("%s MADV_DONTNEED: %s", __func__
, strerror(errno
));
205 * Setup an area of RAM so that it *can* be used for postcopy later; this
206 * must be done right at the start prior to pre-copy.
207 * opaque should be the MIS.
209 static int init_range(const char *block_name
, void *host_addr
,
210 ram_addr_t offset
, ram_addr_t length
, void *opaque
)
212 MigrationIncomingState
*mis
= opaque
;
214 trace_postcopy_init_range(block_name
, host_addr
, offset
, length
);
217 * We need the whole of RAM to be truly empty for postcopy, so things
218 * like ROMs and any data tables built during init must be zero'd
219 * - we're going to get the copy from the source anyway.
220 * (Precopy will just overwrite this data, so doesn't need the discard)
222 if (postcopy_ram_discard_range(mis
, host_addr
, length
)) {
230 * At the end of migration, undo the effects of init_range
231 * opaque should be the MIS.
233 static int cleanup_range(const char *block_name
, void *host_addr
,
234 ram_addr_t offset
, ram_addr_t length
, void *opaque
)
236 MigrationIncomingState
*mis
= opaque
;
237 struct uffdio_range range_struct
;
238 trace_postcopy_cleanup_range(block_name
, host_addr
, offset
, length
);
241 * We turned off hugepage for the precopy stage with postcopy enabled
242 * we can turn it back on now.
244 if (qemu_madvise(host_addr
, length
, QEMU_MADV_HUGEPAGE
)) {
245 error_report("%s HUGEPAGE: %s", __func__
, strerror(errno
));
250 * We can also turn off userfault now since we should have all the
251 * pages. It can be useful to leave it on to debug postcopy
252 * if you're not sure it's always getting every page.
254 range_struct
.start
= (uintptr_t)host_addr
;
255 range_struct
.len
= length
;
257 if (ioctl(mis
->userfault_fd
, UFFDIO_UNREGISTER
, &range_struct
)) {
258 error_report("%s: userfault unregister %s", __func__
, strerror(errno
));
267 * Initialise postcopy-ram, setting the RAM to a state where we can go into
268 * postcopy later; must be called prior to any precopy.
269 * called from arch_init's similarly named ram_postcopy_incoming_init
271 int postcopy_ram_incoming_init(MigrationIncomingState
*mis
, size_t ram_pages
)
273 if (qemu_ram_foreach_block(init_range
, mis
)) {
281 * At the end of a migration where postcopy_ram_incoming_init was called.
283 int postcopy_ram_incoming_cleanup(MigrationIncomingState
*mis
)
285 trace_postcopy_ram_incoming_cleanup_entry();
287 if (mis
->have_fault_thread
) {
290 if (qemu_ram_foreach_block(cleanup_range
, mis
)) {
294 * Tell the fault_thread to exit, it's an eventfd that should
295 * currently be at 0, we're going to increment it to 1
298 if (write(mis
->userfault_quit_fd
, &tmp64
, 8) == 8) {
299 trace_postcopy_ram_incoming_cleanup_join();
300 qemu_thread_join(&mis
->fault_thread
);
302 /* Not much we can do here, but may as well report it */
303 error_report("%s: incrementing userfault_quit_fd: %s", __func__
,
306 trace_postcopy_ram_incoming_cleanup_closeuf();
307 close(mis
->userfault_fd
);
308 close(mis
->userfault_quit_fd
);
309 mis
->have_fault_thread
= false;
312 qemu_balloon_inhibit(false);
315 if (os_mlock() < 0) {
316 error_report("mlock: %s", strerror(errno
));
318 * It doesn't feel right to fail at this point, we have a valid
324 postcopy_state_set(POSTCOPY_INCOMING_END
);
325 migrate_send_rp_shut(mis
, qemu_file_get_error(mis
->from_src_file
) != 0);
327 if (mis
->postcopy_tmp_page
) {
328 munmap(mis
->postcopy_tmp_page
, getpagesize());
329 mis
->postcopy_tmp_page
= NULL
;
331 trace_postcopy_ram_incoming_cleanup_exit();
336 * Disable huge pages on an area
338 static int nhp_range(const char *block_name
, void *host_addr
,
339 ram_addr_t offset
, ram_addr_t length
, void *opaque
)
341 trace_postcopy_nhp_range(block_name
, host_addr
, offset
, length
);
344 * Before we do discards we need to ensure those discards really
345 * do delete areas of the page, even if THP thinks a hugepage would
346 * be a good idea, so force hugepages off.
348 if (qemu_madvise(host_addr
, length
, QEMU_MADV_NOHUGEPAGE
)) {
349 error_report("%s: NOHUGEPAGE: %s", __func__
, strerror(errno
));
357 * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard
358 * however leaving it until after precopy means that most of the precopy
361 int postcopy_ram_prepare_discard(MigrationIncomingState
*mis
)
363 if (qemu_ram_foreach_block(nhp_range
, mis
)) {
367 postcopy_state_set(POSTCOPY_INCOMING_DISCARD
);
373 * Mark the given area of RAM as requiring notification to unwritten areas
374 * Used as a callback on qemu_ram_foreach_block.
375 * host_addr: Base of area to mark
376 * offset: Offset in the whole ram arena
377 * length: Length of the section
378 * opaque: MigrationIncomingState pointer
379 * Returns 0 on success
381 static int ram_block_enable_notify(const char *block_name
, void *host_addr
,
382 ram_addr_t offset
, ram_addr_t length
,
385 MigrationIncomingState
*mis
= opaque
;
386 struct uffdio_register reg_struct
;
388 reg_struct
.range
.start
= (uintptr_t)host_addr
;
389 reg_struct
.range
.len
= length
;
390 reg_struct
.mode
= UFFDIO_REGISTER_MODE_MISSING
;
392 /* Now tell our userfault_fd that it's responsible for this area */
393 if (ioctl(mis
->userfault_fd
, UFFDIO_REGISTER
, ®_struct
)) {
394 error_report("%s userfault register: %s", __func__
, strerror(errno
));
402 * Handle faults detected by the USERFAULT markings
404 static void *postcopy_ram_fault_thread(void *opaque
)
406 MigrationIncomingState
*mis
= opaque
;
409 size_t hostpagesize
= getpagesize();
411 RAMBlock
*last_rb
= NULL
; /* last RAMBlock we sent part of */
413 trace_postcopy_ram_fault_thread_entry();
414 qemu_sem_post(&mis
->fault_thread_sem
);
417 ram_addr_t rb_offset
;
418 ram_addr_t in_raspace
;
419 struct pollfd pfd
[2];
422 * We're mainly waiting for the kernel to give us a faulting HVA,
423 * however we can be told to quit via userfault_quit_fd which is
426 pfd
[0].fd
= mis
->userfault_fd
;
427 pfd
[0].events
= POLLIN
;
429 pfd
[1].fd
= mis
->userfault_quit_fd
;
430 pfd
[1].events
= POLLIN
; /* Waiting for eventfd to go positive */
433 if (poll(pfd
, 2, -1 /* Wait forever */) == -1) {
434 error_report("%s: userfault poll: %s", __func__
, strerror(errno
));
438 if (pfd
[1].revents
) {
439 trace_postcopy_ram_fault_thread_quit();
443 ret
= read(mis
->userfault_fd
, &msg
, sizeof(msg
));
444 if (ret
!= sizeof(msg
)) {
445 if (errno
== EAGAIN
) {
447 * if a wake up happens on the other thread just after
448 * the poll, there is nothing to read.
453 error_report("%s: Failed to read full userfault message: %s",
454 __func__
, strerror(errno
));
457 error_report("%s: Read %d bytes from userfaultfd expected %zd",
458 __func__
, ret
, sizeof(msg
));
459 break; /* Lost alignment, don't know what we'd read next */
462 if (msg
.event
!= UFFD_EVENT_PAGEFAULT
) {
463 error_report("%s: Read unexpected event %ud from userfaultfd",
464 __func__
, msg
.event
);
465 continue; /* It's not a page fault, shouldn't happen */
468 rb
= qemu_ram_block_from_host(
469 (void *)(uintptr_t)msg
.arg
.pagefault
.address
,
470 true, &in_raspace
, &rb_offset
);
472 error_report("postcopy_ram_fault_thread: Fault outside guest: %"
473 PRIx64
, (uint64_t)msg
.arg
.pagefault
.address
);
477 rb_offset
&= ~(hostpagesize
- 1);
478 trace_postcopy_ram_fault_thread_request(msg
.arg
.pagefault
.address
,
479 qemu_ram_get_idstr(rb
),
483 * Send the request to the source - we want to request one
484 * of our host page sizes (which is >= TPS)
488 migrate_send_rp_req_pages(mis
, qemu_ram_get_idstr(rb
),
489 rb_offset
, hostpagesize
);
491 /* Save some space */
492 migrate_send_rp_req_pages(mis
, NULL
,
493 rb_offset
, hostpagesize
);
496 trace_postcopy_ram_fault_thread_exit();
500 int postcopy_ram_enable_notify(MigrationIncomingState
*mis
)
502 /* Open the fd for the kernel to give us userfaults */
503 mis
->userfault_fd
= syscall(__NR_userfaultfd
, O_CLOEXEC
| O_NONBLOCK
);
504 if (mis
->userfault_fd
== -1) {
505 error_report("%s: Failed to open userfault fd: %s", __func__
,
511 * Although the host check already tested the API, we need to
512 * do the check again as an ABI handshake on the new fd.
514 if (!ufd_version_check(mis
->userfault_fd
)) {
518 /* Now an eventfd we use to tell the fault-thread to quit */
519 mis
->userfault_quit_fd
= eventfd(0, EFD_CLOEXEC
);
520 if (mis
->userfault_quit_fd
== -1) {
521 error_report("%s: Opening userfault_quit_fd: %s", __func__
,
523 close(mis
->userfault_fd
);
527 qemu_sem_init(&mis
->fault_thread_sem
, 0);
528 qemu_thread_create(&mis
->fault_thread
, "postcopy/fault",
529 postcopy_ram_fault_thread
, mis
, QEMU_THREAD_JOINABLE
);
530 qemu_sem_wait(&mis
->fault_thread_sem
);
531 qemu_sem_destroy(&mis
->fault_thread_sem
);
532 mis
->have_fault_thread
= true;
534 /* Mark so that we get notified of accesses to unwritten areas */
535 if (qemu_ram_foreach_block(ram_block_enable_notify
, mis
)) {
540 * Ballooning can mark pages as absent while we're postcopying
541 * that would cause false userfaults.
543 qemu_balloon_inhibit(true);
545 trace_postcopy_ram_enable_notify();
551 * Place a host page (from) at (host) atomically
552 * returns 0 on success
554 int postcopy_place_page(MigrationIncomingState
*mis
, void *host
, void *from
)
556 struct uffdio_copy copy_struct
;
558 copy_struct
.dst
= (uint64_t)(uintptr_t)host
;
559 copy_struct
.src
= (uint64_t)(uintptr_t)from
;
560 copy_struct
.len
= getpagesize();
561 copy_struct
.mode
= 0;
563 /* copy also acks to the kernel waking the stalled thread up
564 * TODO: We can inhibit that ack and only do it if it was requested
565 * which would be slightly cheaper, but we'd have to be careful
566 * of the order of updating our page state.
568 if (ioctl(mis
->userfault_fd
, UFFDIO_COPY
, ©_struct
)) {
570 error_report("%s: %s copy host: %p from: %p",
571 __func__
, strerror(e
), host
, from
);
576 trace_postcopy_place_page(host
);
581 * Place a zero page at (host) atomically
582 * returns 0 on success
584 int postcopy_place_page_zero(MigrationIncomingState
*mis
, void *host
)
586 struct uffdio_zeropage zero_struct
;
588 zero_struct
.range
.start
= (uint64_t)(uintptr_t)host
;
589 zero_struct
.range
.len
= getpagesize();
590 zero_struct
.mode
= 0;
592 if (ioctl(mis
->userfault_fd
, UFFDIO_ZEROPAGE
, &zero_struct
)) {
594 error_report("%s: %s zero host: %p",
595 __func__
, strerror(e
), host
);
600 trace_postcopy_place_page_zero(host
);
605 * Returns a target page of memory that can be mapped at a later point in time
606 * using postcopy_place_page
607 * The same address is used repeatedly, postcopy_place_page just takes the
609 * Returns: Pointer to allocated page
612 void *postcopy_get_tmp_page(MigrationIncomingState
*mis
)
614 if (!mis
->postcopy_tmp_page
) {
615 mis
->postcopy_tmp_page
= mmap(NULL
, getpagesize(),
616 PROT_READ
| PROT_WRITE
, MAP_PRIVATE
|
617 MAP_ANONYMOUS
, -1, 0);
618 if (!mis
->postcopy_tmp_page
) {
619 error_report("%s: %s", __func__
, strerror(errno
));
624 return mis
->postcopy_tmp_page
;
628 /* No target OS support, stubs just fail */
629 bool postcopy_ram_supported_by_host(void)
631 error_report("%s: No OS support", __func__
);
635 int postcopy_ram_incoming_init(MigrationIncomingState
*mis
, size_t ram_pages
)
637 error_report("postcopy_ram_incoming_init: No OS support");
641 int postcopy_ram_incoming_cleanup(MigrationIncomingState
*mis
)
647 int postcopy_ram_discard_range(MigrationIncomingState
*mis
, uint8_t *start
,
654 int postcopy_ram_prepare_discard(MigrationIncomingState
*mis
)
660 int postcopy_ram_enable_notify(MigrationIncomingState
*mis
)
666 int postcopy_place_page(MigrationIncomingState
*mis
, void *host
, void *from
)
672 int postcopy_place_page_zero(MigrationIncomingState
*mis
, void *host
)
678 void *postcopy_get_tmp_page(MigrationIncomingState
*mis
)
686 /* ------------------------------------------------------------------------- */
689 * postcopy_discard_send_init: Called at the start of each RAMBlock before
690 * asking to discard individual ranges.
692 * @ms: The current migration state.
693 * @offset: the bitmap offset of the named RAMBlock in the migration
695 * @name: RAMBlock that discards will operate on.
697 * returns: a new PDS.
699 PostcopyDiscardState
*postcopy_discard_send_init(MigrationState
*ms
,
700 unsigned long offset
,
703 PostcopyDiscardState
*res
= g_malloc0(sizeof(PostcopyDiscardState
));
706 res
->ramblock_name
= name
;
707 res
->offset
= offset
;
714 * postcopy_discard_send_range: Called by the bitmap code for each chunk to
715 * discard. May send a discard message, may just leave it queued to
718 * @ms: Current migration state.
719 * @pds: Structure initialised by postcopy_discard_send_init().
720 * @start,@length: a range of pages in the migration bitmap in the
721 * RAM block passed to postcopy_discard_send_init() (length=1 is one page)
723 void postcopy_discard_send_range(MigrationState
*ms
, PostcopyDiscardState
*pds
,
724 unsigned long start
, unsigned long length
)
726 size_t tp_bits
= qemu_target_page_bits();
727 /* Convert to byte offsets within the RAM block */
728 pds
->start_list
[pds
->cur_entry
] = (start
- pds
->offset
) << tp_bits
;
729 pds
->length_list
[pds
->cur_entry
] = length
<< tp_bits
;
730 trace_postcopy_discard_send_range(pds
->ramblock_name
, start
, length
);
734 if (pds
->cur_entry
== MAX_DISCARDS_PER_COMMAND
) {
735 /* Full set, ship it! */
736 qemu_savevm_send_postcopy_ram_discard(ms
->file
, pds
->ramblock_name
,
746 * postcopy_discard_send_finish: Called at the end of each RAMBlock by the
747 * bitmap code. Sends any outstanding discard messages, frees the PDS
749 * @ms: Current migration state.
750 * @pds: Structure initialised by postcopy_discard_send_init().
752 void postcopy_discard_send_finish(MigrationState
*ms
, PostcopyDiscardState
*pds
)
754 /* Anything unsent? */
755 if (pds
->cur_entry
) {
756 qemu_savevm_send_postcopy_ram_discard(ms
->file
, pds
->ramblock_name
,
763 trace_postcopy_discard_send_finish(pds
->ramblock_name
, pds
->nsentwords
,