postcopy: Incoming initialisation
[qemu/ar7.git] / migration / postcopy-ram.c
blob8478bfd3b4fb8c00a5fa8c16ed135148d12032ae
1 /*
2 * Postcopy migration for RAM
4 * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates
6 * Authors:
7 * Dave Gilbert <dgilbert@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
15 * Postcopy is a migration technique where the execution flips from the
16 * source to the destination before all the data has been copied.
19 #include <glib.h>
20 #include <stdio.h>
21 #include <unistd.h>
23 #include "qemu-common.h"
24 #include "migration/migration.h"
25 #include "migration/postcopy-ram.h"
26 #include "sysemu/sysemu.h"
27 #include "qemu/error-report.h"
28 #include "trace.h"
30 /* Arbitrary limit on size of each discard command,
31 * keeps them around ~200 bytes
33 #define MAX_DISCARDS_PER_COMMAND 12
35 struct PostcopyDiscardState {
36 const char *ramblock_name;
37 uint64_t offset; /* Bitmap entry for the 1st bit of this RAMBlock */
38 uint16_t cur_entry;
40 * Start and length of a discard range (bytes)
42 uint64_t start_list[MAX_DISCARDS_PER_COMMAND];
43 uint64_t length_list[MAX_DISCARDS_PER_COMMAND];
44 unsigned int nsentwords;
45 unsigned int nsentcmds;
48 /* Postcopy needs to detect accesses to pages that haven't yet been copied
49 * across, and efficiently map new pages in, the techniques for doing this
50 * are target OS specific.
52 #if defined(__linux__)
54 #include <sys/mman.h>
55 #include <sys/ioctl.h>
56 #include <sys/syscall.h>
57 #include <sys/types.h>
58 #include <asm/types.h> /* for __u64 */
59 #endif
61 #if defined(__linux__) && defined(__NR_userfaultfd)
62 #include <linux/userfaultfd.h>
64 static bool ufd_version_check(int ufd)
66 struct uffdio_api api_struct;
67 uint64_t ioctl_mask;
69 api_struct.api = UFFD_API;
70 api_struct.features = 0;
71 if (ioctl(ufd, UFFDIO_API, &api_struct)) {
72 error_report("postcopy_ram_supported_by_host: UFFDIO_API failed: %s",
73 strerror(errno));
74 return false;
77 ioctl_mask = (__u64)1 << _UFFDIO_REGISTER |
78 (__u64)1 << _UFFDIO_UNREGISTER;
79 if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) {
80 error_report("Missing userfault features: %" PRIx64,
81 (uint64_t)(~api_struct.ioctls & ioctl_mask));
82 return false;
85 return true;
88 bool postcopy_ram_supported_by_host(void)
90 long pagesize = getpagesize();
91 int ufd = -1;
92 bool ret = false; /* Error unless we change it */
93 void *testarea = NULL;
94 struct uffdio_register reg_struct;
95 struct uffdio_range range_struct;
96 uint64_t feature_mask;
98 if ((1ul << qemu_target_page_bits()) > pagesize) {
99 error_report("Target page size bigger than host page size");
100 goto out;
103 ufd = syscall(__NR_userfaultfd, O_CLOEXEC);
104 if (ufd == -1) {
105 error_report("%s: userfaultfd not available: %s", __func__,
106 strerror(errno));
107 goto out;
110 /* Version and features check */
111 if (!ufd_version_check(ufd)) {
112 goto out;
116 * We need to check that the ops we need are supported on anon memory
117 * To do that we need to register a chunk and see the flags that
118 * are returned.
120 testarea = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE |
121 MAP_ANONYMOUS, -1, 0);
122 if (testarea == MAP_FAILED) {
123 error_report("%s: Failed to map test area: %s", __func__,
124 strerror(errno));
125 goto out;
127 g_assert(((size_t)testarea & (pagesize-1)) == 0);
129 reg_struct.range.start = (uintptr_t)testarea;
130 reg_struct.range.len = pagesize;
131 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
133 if (ioctl(ufd, UFFDIO_REGISTER, &reg_struct)) {
134 error_report("%s userfault register: %s", __func__, strerror(errno));
135 goto out;
138 range_struct.start = (uintptr_t)testarea;
139 range_struct.len = pagesize;
140 if (ioctl(ufd, UFFDIO_UNREGISTER, &range_struct)) {
141 error_report("%s userfault unregister: %s", __func__, strerror(errno));
142 goto out;
145 feature_mask = (__u64)1 << _UFFDIO_WAKE |
146 (__u64)1 << _UFFDIO_COPY |
147 (__u64)1 << _UFFDIO_ZEROPAGE;
148 if ((reg_struct.ioctls & feature_mask) != feature_mask) {
149 error_report("Missing userfault map features: %" PRIx64,
150 (uint64_t)(~reg_struct.ioctls & feature_mask));
151 goto out;
154 /* Success! */
155 ret = true;
156 out:
157 if (testarea) {
158 munmap(testarea, pagesize);
160 if (ufd != -1) {
161 close(ufd);
163 return ret;
167 * postcopy_ram_discard_range: Discard a range of memory.
168 * We can assume that if we've been called postcopy_ram_hosttest returned true.
170 * @mis: Current incoming migration state.
171 * @start, @length: range of memory to discard.
173 * returns: 0 on success.
175 int postcopy_ram_discard_range(MigrationIncomingState *mis, uint8_t *start,
176 size_t length)
178 trace_postcopy_ram_discard_range(start, length);
179 if (madvise(start, length, MADV_DONTNEED)) {
180 error_report("%s MADV_DONTNEED: %s", __func__, strerror(errno));
181 return -1;
184 return 0;
188 * Setup an area of RAM so that it *can* be used for postcopy later; this
189 * must be done right at the start prior to pre-copy.
190 * opaque should be the MIS.
192 static int init_range(const char *block_name, void *host_addr,
193 ram_addr_t offset, ram_addr_t length, void *opaque)
195 MigrationIncomingState *mis = opaque;
197 trace_postcopy_init_range(block_name, host_addr, offset, length);
200 * We need the whole of RAM to be truly empty for postcopy, so things
201 * like ROMs and any data tables built during init must be zero'd
202 * - we're going to get the copy from the source anyway.
203 * (Precopy will just overwrite this data, so doesn't need the discard)
205 if (postcopy_ram_discard_range(mis, host_addr, length)) {
206 return -1;
209 return 0;
213 * At the end of migration, undo the effects of init_range
214 * opaque should be the MIS.
216 static int cleanup_range(const char *block_name, void *host_addr,
217 ram_addr_t offset, ram_addr_t length, void *opaque)
219 MigrationIncomingState *mis = opaque;
220 struct uffdio_range range_struct;
221 trace_postcopy_cleanup_range(block_name, host_addr, offset, length);
224 * We turned off hugepage for the precopy stage with postcopy enabled
225 * we can turn it back on now.
227 #ifdef MADV_HUGEPAGE
228 if (madvise(host_addr, length, MADV_HUGEPAGE)) {
229 error_report("%s HUGEPAGE: %s", __func__, strerror(errno));
230 return -1;
232 #endif
235 * We can also turn off userfault now since we should have all the
236 * pages. It can be useful to leave it on to debug postcopy
237 * if you're not sure it's always getting every page.
239 range_struct.start = (uintptr_t)host_addr;
240 range_struct.len = length;
242 if (ioctl(mis->userfault_fd, UFFDIO_UNREGISTER, &range_struct)) {
243 error_report("%s: userfault unregister %s", __func__, strerror(errno));
245 return -1;
248 return 0;
252 * Initialise postcopy-ram, setting the RAM to a state where we can go into
253 * postcopy later; must be called prior to any precopy.
254 * called from arch_init's similarly named ram_postcopy_incoming_init
256 int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages)
258 if (qemu_ram_foreach_block(init_range, mis)) {
259 return -1;
262 return 0;
266 * At the end of a migration where postcopy_ram_incoming_init was called.
268 int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
270 /* TODO: Join the fault thread once we're sure it will exit */
271 if (qemu_ram_foreach_block(cleanup_range, mis)) {
272 return -1;
275 return 0;
278 #else
279 /* No target OS support, stubs just fail */
280 bool postcopy_ram_supported_by_host(void)
282 error_report("%s: No OS support", __func__);
283 return false;
286 int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages)
288 error_report("postcopy_ram_incoming_init: No OS support");
289 return -1;
292 int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
294 assert(0);
295 return -1;
298 int postcopy_ram_discard_range(MigrationIncomingState *mis, uint8_t *start,
299 size_t length)
301 assert(0);
302 return -1;
304 #endif
306 /* ------------------------------------------------------------------------- */
309 * postcopy_discard_send_init: Called at the start of each RAMBlock before
310 * asking to discard individual ranges.
312 * @ms: The current migration state.
313 * @offset: the bitmap offset of the named RAMBlock in the migration
314 * bitmap.
315 * @name: RAMBlock that discards will operate on.
317 * returns: a new PDS.
319 PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms,
320 unsigned long offset,
321 const char *name)
323 PostcopyDiscardState *res = g_malloc0(sizeof(PostcopyDiscardState));
325 if (res) {
326 res->ramblock_name = name;
327 res->offset = offset;
330 return res;
334 * postcopy_discard_send_range: Called by the bitmap code for each chunk to
335 * discard. May send a discard message, may just leave it queued to
336 * be sent later.
338 * @ms: Current migration state.
339 * @pds: Structure initialised by postcopy_discard_send_init().
340 * @start,@length: a range of pages in the migration bitmap in the
341 * RAM block passed to postcopy_discard_send_init() (length=1 is one page)
343 void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds,
344 unsigned long start, unsigned long length)
346 size_t tp_bits = qemu_target_page_bits();
347 /* Convert to byte offsets within the RAM block */
348 pds->start_list[pds->cur_entry] = (start - pds->offset) << tp_bits;
349 pds->length_list[pds->cur_entry] = length << tp_bits;
350 trace_postcopy_discard_send_range(pds->ramblock_name, start, length);
351 pds->cur_entry++;
352 pds->nsentwords++;
354 if (pds->cur_entry == MAX_DISCARDS_PER_COMMAND) {
355 /* Full set, ship it! */
356 qemu_savevm_send_postcopy_ram_discard(ms->file, pds->ramblock_name,
357 pds->cur_entry,
358 pds->start_list,
359 pds->length_list);
360 pds->nsentcmds++;
361 pds->cur_entry = 0;
366 * postcopy_discard_send_finish: Called at the end of each RAMBlock by the
367 * bitmap code. Sends any outstanding discard messages, frees the PDS
369 * @ms: Current migration state.
370 * @pds: Structure initialised by postcopy_discard_send_init().
372 void postcopy_discard_send_finish(MigrationState *ms, PostcopyDiscardState *pds)
374 /* Anything unsent? */
375 if (pds->cur_entry) {
376 qemu_savevm_send_postcopy_ram_discard(ms->file, pds->ramblock_name,
377 pds->cur_entry,
378 pds->start_list,
379 pds->length_list);
380 pds->nsentcmds++;
383 trace_postcopy_discard_send_finish(pds->ramblock_name, pds->nsentwords,
384 pds->nsentcmds);
386 g_free(pds);