migration: wire vmstate_save_state errors up to vmstate_subsection_save
[qemu/ar7.git] / dump.c
blob7ebcf553b2d3cae793075f51ed5fe19a87cf0548
1 /*
2 * QEMU dump
4 * Copyright Fujitsu, Corp. 2011, 2012
6 * Authors:
7 * Wen Congyang <wency@cn.fujitsu.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
16 #include "elf.h"
17 #include "cpu.h"
18 #include "exec/hwaddr.h"
19 #include "monitor/monitor.h"
20 #include "sysemu/kvm.h"
21 #include "sysemu/dump.h"
22 #include "sysemu/sysemu.h"
23 #include "sysemu/memory_mapping.h"
24 #include "sysemu/cpus.h"
25 #include "qapi/qmp/qerror.h"
26 #include "qmp-commands.h"
27 #include "qapi-event.h"
29 #include <zlib.h>
30 #ifdef CONFIG_LZO
31 #include <lzo/lzo1x.h>
32 #endif
33 #ifdef CONFIG_SNAPPY
34 #include <snappy-c.h>
35 #endif
36 #ifndef ELF_MACHINE_UNAME
37 #define ELF_MACHINE_UNAME "Unknown"
38 #endif
40 uint16_t cpu_to_dump16(DumpState *s, uint16_t val)
42 if (s->dump_info.d_endian == ELFDATA2LSB) {
43 val = cpu_to_le16(val);
44 } else {
45 val = cpu_to_be16(val);
48 return val;
51 uint32_t cpu_to_dump32(DumpState *s, uint32_t val)
53 if (s->dump_info.d_endian == ELFDATA2LSB) {
54 val = cpu_to_le32(val);
55 } else {
56 val = cpu_to_be32(val);
59 return val;
62 uint64_t cpu_to_dump64(DumpState *s, uint64_t val)
64 if (s->dump_info.d_endian == ELFDATA2LSB) {
65 val = cpu_to_le64(val);
66 } else {
67 val = cpu_to_be64(val);
70 return val;
73 static int dump_cleanup(DumpState *s)
75 guest_phys_blocks_free(&s->guest_phys_blocks);
76 memory_mapping_list_free(&s->list);
77 close(s->fd);
78 if (s->resume) {
79 if (s->detached) {
80 qemu_mutex_lock_iothread();
82 vm_start();
83 if (s->detached) {
84 qemu_mutex_unlock_iothread();
88 return 0;
91 static int fd_write_vmcore(const void *buf, size_t size, void *opaque)
93 DumpState *s = opaque;
94 size_t written_size;
96 written_size = qemu_write_full(s->fd, buf, size);
97 if (written_size != size) {
98 return -1;
101 return 0;
104 static void write_elf64_header(DumpState *s, Error **errp)
106 Elf64_Ehdr elf_header;
107 int ret;
109 memset(&elf_header, 0, sizeof(Elf64_Ehdr));
110 memcpy(&elf_header, ELFMAG, SELFMAG);
111 elf_header.e_ident[EI_CLASS] = ELFCLASS64;
112 elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
113 elf_header.e_ident[EI_VERSION] = EV_CURRENT;
114 elf_header.e_type = cpu_to_dump16(s, ET_CORE);
115 elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
116 elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
117 elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
118 elf_header.e_phoff = cpu_to_dump64(s, sizeof(Elf64_Ehdr));
119 elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf64_Phdr));
120 elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num);
121 if (s->have_section) {
122 uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info;
124 elf_header.e_shoff = cpu_to_dump64(s, shoff);
125 elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf64_Shdr));
126 elf_header.e_shnum = cpu_to_dump16(s, 1);
129 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
130 if (ret < 0) {
131 error_setg(errp, "dump: failed to write elf header");
135 static void write_elf32_header(DumpState *s, Error **errp)
137 Elf32_Ehdr elf_header;
138 int ret;
140 memset(&elf_header, 0, sizeof(Elf32_Ehdr));
141 memcpy(&elf_header, ELFMAG, SELFMAG);
142 elf_header.e_ident[EI_CLASS] = ELFCLASS32;
143 elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
144 elf_header.e_ident[EI_VERSION] = EV_CURRENT;
145 elf_header.e_type = cpu_to_dump16(s, ET_CORE);
146 elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
147 elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
148 elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
149 elf_header.e_phoff = cpu_to_dump32(s, sizeof(Elf32_Ehdr));
150 elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf32_Phdr));
151 elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num);
152 if (s->have_section) {
153 uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info;
155 elf_header.e_shoff = cpu_to_dump32(s, shoff);
156 elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf32_Shdr));
157 elf_header.e_shnum = cpu_to_dump16(s, 1);
160 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
161 if (ret < 0) {
162 error_setg(errp, "dump: failed to write elf header");
166 static void write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
167 int phdr_index, hwaddr offset,
168 hwaddr filesz, Error **errp)
170 Elf64_Phdr phdr;
171 int ret;
173 memset(&phdr, 0, sizeof(Elf64_Phdr));
174 phdr.p_type = cpu_to_dump32(s, PT_LOAD);
175 phdr.p_offset = cpu_to_dump64(s, offset);
176 phdr.p_paddr = cpu_to_dump64(s, memory_mapping->phys_addr);
177 phdr.p_filesz = cpu_to_dump64(s, filesz);
178 phdr.p_memsz = cpu_to_dump64(s, memory_mapping->length);
179 phdr.p_vaddr = cpu_to_dump64(s, memory_mapping->virt_addr);
181 assert(memory_mapping->length >= filesz);
183 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
184 if (ret < 0) {
185 error_setg(errp, "dump: failed to write program header table");
189 static void write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
190 int phdr_index, hwaddr offset,
191 hwaddr filesz, Error **errp)
193 Elf32_Phdr phdr;
194 int ret;
196 memset(&phdr, 0, sizeof(Elf32_Phdr));
197 phdr.p_type = cpu_to_dump32(s, PT_LOAD);
198 phdr.p_offset = cpu_to_dump32(s, offset);
199 phdr.p_paddr = cpu_to_dump32(s, memory_mapping->phys_addr);
200 phdr.p_filesz = cpu_to_dump32(s, filesz);
201 phdr.p_memsz = cpu_to_dump32(s, memory_mapping->length);
202 phdr.p_vaddr = cpu_to_dump32(s, memory_mapping->virt_addr);
204 assert(memory_mapping->length >= filesz);
206 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
207 if (ret < 0) {
208 error_setg(errp, "dump: failed to write program header table");
212 static void write_elf64_note(DumpState *s, Error **errp)
214 Elf64_Phdr phdr;
215 hwaddr begin = s->memory_offset - s->note_size;
216 int ret;
218 memset(&phdr, 0, sizeof(Elf64_Phdr));
219 phdr.p_type = cpu_to_dump32(s, PT_NOTE);
220 phdr.p_offset = cpu_to_dump64(s, begin);
221 phdr.p_paddr = 0;
222 phdr.p_filesz = cpu_to_dump64(s, s->note_size);
223 phdr.p_memsz = cpu_to_dump64(s, s->note_size);
224 phdr.p_vaddr = 0;
226 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
227 if (ret < 0) {
228 error_setg(errp, "dump: failed to write program header table");
232 static inline int cpu_index(CPUState *cpu)
234 return cpu->cpu_index + 1;
237 static void write_elf64_notes(WriteCoreDumpFunction f, DumpState *s,
238 Error **errp)
240 CPUState *cpu;
241 int ret;
242 int id;
244 CPU_FOREACH(cpu) {
245 id = cpu_index(cpu);
246 ret = cpu_write_elf64_note(f, cpu, id, s);
247 if (ret < 0) {
248 error_setg(errp, "dump: failed to write elf notes");
249 return;
253 CPU_FOREACH(cpu) {
254 ret = cpu_write_elf64_qemunote(f, cpu, s);
255 if (ret < 0) {
256 error_setg(errp, "dump: failed to write CPU status");
257 return;
262 static void write_elf32_note(DumpState *s, Error **errp)
264 hwaddr begin = s->memory_offset - s->note_size;
265 Elf32_Phdr phdr;
266 int ret;
268 memset(&phdr, 0, sizeof(Elf32_Phdr));
269 phdr.p_type = cpu_to_dump32(s, PT_NOTE);
270 phdr.p_offset = cpu_to_dump32(s, begin);
271 phdr.p_paddr = 0;
272 phdr.p_filesz = cpu_to_dump32(s, s->note_size);
273 phdr.p_memsz = cpu_to_dump32(s, s->note_size);
274 phdr.p_vaddr = 0;
276 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
277 if (ret < 0) {
278 error_setg(errp, "dump: failed to write program header table");
282 static void write_elf32_notes(WriteCoreDumpFunction f, DumpState *s,
283 Error **errp)
285 CPUState *cpu;
286 int ret;
287 int id;
289 CPU_FOREACH(cpu) {
290 id = cpu_index(cpu);
291 ret = cpu_write_elf32_note(f, cpu, id, s);
292 if (ret < 0) {
293 error_setg(errp, "dump: failed to write elf notes");
294 return;
298 CPU_FOREACH(cpu) {
299 ret = cpu_write_elf32_qemunote(f, cpu, s);
300 if (ret < 0) {
301 error_setg(errp, "dump: failed to write CPU status");
302 return;
307 static void write_elf_section(DumpState *s, int type, Error **errp)
309 Elf32_Shdr shdr32;
310 Elf64_Shdr shdr64;
311 int shdr_size;
312 void *shdr;
313 int ret;
315 if (type == 0) {
316 shdr_size = sizeof(Elf32_Shdr);
317 memset(&shdr32, 0, shdr_size);
318 shdr32.sh_info = cpu_to_dump32(s, s->sh_info);
319 shdr = &shdr32;
320 } else {
321 shdr_size = sizeof(Elf64_Shdr);
322 memset(&shdr64, 0, shdr_size);
323 shdr64.sh_info = cpu_to_dump32(s, s->sh_info);
324 shdr = &shdr64;
327 ret = fd_write_vmcore(&shdr, shdr_size, s);
328 if (ret < 0) {
329 error_setg(errp, "dump: failed to write section header table");
333 static void write_data(DumpState *s, void *buf, int length, Error **errp)
335 int ret;
337 ret = fd_write_vmcore(buf, length, s);
338 if (ret < 0) {
339 error_setg(errp, "dump: failed to save memory");
340 } else {
341 s->written_size += length;
345 /* write the memory to vmcore. 1 page per I/O. */
346 static void write_memory(DumpState *s, GuestPhysBlock *block, ram_addr_t start,
347 int64_t size, Error **errp)
349 int64_t i;
350 Error *local_err = NULL;
352 for (i = 0; i < size / s->dump_info.page_size; i++) {
353 write_data(s, block->host_addr + start + i * s->dump_info.page_size,
354 s->dump_info.page_size, &local_err);
355 if (local_err) {
356 error_propagate(errp, local_err);
357 return;
361 if ((size % s->dump_info.page_size) != 0) {
362 write_data(s, block->host_addr + start + i * s->dump_info.page_size,
363 size % s->dump_info.page_size, &local_err);
364 if (local_err) {
365 error_propagate(errp, local_err);
366 return;
371 /* get the memory's offset and size in the vmcore */
372 static void get_offset_range(hwaddr phys_addr,
373 ram_addr_t mapping_length,
374 DumpState *s,
375 hwaddr *p_offset,
376 hwaddr *p_filesz)
378 GuestPhysBlock *block;
379 hwaddr offset = s->memory_offset;
380 int64_t size_in_block, start;
382 /* When the memory is not stored into vmcore, offset will be -1 */
383 *p_offset = -1;
384 *p_filesz = 0;
386 if (s->has_filter) {
387 if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
388 return;
392 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
393 if (s->has_filter) {
394 if (block->target_start >= s->begin + s->length ||
395 block->target_end <= s->begin) {
396 /* This block is out of the range */
397 continue;
400 if (s->begin <= block->target_start) {
401 start = block->target_start;
402 } else {
403 start = s->begin;
406 size_in_block = block->target_end - start;
407 if (s->begin + s->length < block->target_end) {
408 size_in_block -= block->target_end - (s->begin + s->length);
410 } else {
411 start = block->target_start;
412 size_in_block = block->target_end - block->target_start;
415 if (phys_addr >= start && phys_addr < start + size_in_block) {
416 *p_offset = phys_addr - start + offset;
418 /* The offset range mapped from the vmcore file must not spill over
419 * the GuestPhysBlock, clamp it. The rest of the mapping will be
420 * zero-filled in memory at load time; see
421 * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>.
423 *p_filesz = phys_addr + mapping_length <= start + size_in_block ?
424 mapping_length :
425 size_in_block - (phys_addr - start);
426 return;
429 offset += size_in_block;
433 static void write_elf_loads(DumpState *s, Error **errp)
435 hwaddr offset, filesz;
436 MemoryMapping *memory_mapping;
437 uint32_t phdr_index = 1;
438 uint32_t max_index;
439 Error *local_err = NULL;
441 if (s->have_section) {
442 max_index = s->sh_info;
443 } else {
444 max_index = s->phdr_num;
447 QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
448 get_offset_range(memory_mapping->phys_addr,
449 memory_mapping->length,
450 s, &offset, &filesz);
451 if (s->dump_info.d_class == ELFCLASS64) {
452 write_elf64_load(s, memory_mapping, phdr_index++, offset,
453 filesz, &local_err);
454 } else {
455 write_elf32_load(s, memory_mapping, phdr_index++, offset,
456 filesz, &local_err);
459 if (local_err) {
460 error_propagate(errp, local_err);
461 return;
464 if (phdr_index >= max_index) {
465 break;
470 /* write elf header, PT_NOTE and elf note to vmcore. */
471 static void dump_begin(DumpState *s, Error **errp)
473 Error *local_err = NULL;
476 * the vmcore's format is:
477 * --------------
478 * | elf header |
479 * --------------
480 * | PT_NOTE |
481 * --------------
482 * | PT_LOAD |
483 * --------------
484 * | ...... |
485 * --------------
486 * | PT_LOAD |
487 * --------------
488 * | sec_hdr |
489 * --------------
490 * | elf note |
491 * --------------
492 * | memory |
493 * --------------
495 * we only know where the memory is saved after we write elf note into
496 * vmcore.
499 /* write elf header to vmcore */
500 if (s->dump_info.d_class == ELFCLASS64) {
501 write_elf64_header(s, &local_err);
502 } else {
503 write_elf32_header(s, &local_err);
505 if (local_err) {
506 error_propagate(errp, local_err);
507 return;
510 if (s->dump_info.d_class == ELFCLASS64) {
511 /* write PT_NOTE to vmcore */
512 write_elf64_note(s, &local_err);
513 if (local_err) {
514 error_propagate(errp, local_err);
515 return;
518 /* write all PT_LOAD to vmcore */
519 write_elf_loads(s, &local_err);
520 if (local_err) {
521 error_propagate(errp, local_err);
522 return;
525 /* write section to vmcore */
526 if (s->have_section) {
527 write_elf_section(s, 1, &local_err);
528 if (local_err) {
529 error_propagate(errp, local_err);
530 return;
534 /* write notes to vmcore */
535 write_elf64_notes(fd_write_vmcore, s, &local_err);
536 if (local_err) {
537 error_propagate(errp, local_err);
538 return;
540 } else {
541 /* write PT_NOTE to vmcore */
542 write_elf32_note(s, &local_err);
543 if (local_err) {
544 error_propagate(errp, local_err);
545 return;
548 /* write all PT_LOAD to vmcore */
549 write_elf_loads(s, &local_err);
550 if (local_err) {
551 error_propagate(errp, local_err);
552 return;
555 /* write section to vmcore */
556 if (s->have_section) {
557 write_elf_section(s, 0, &local_err);
558 if (local_err) {
559 error_propagate(errp, local_err);
560 return;
564 /* write notes to vmcore */
565 write_elf32_notes(fd_write_vmcore, s, &local_err);
566 if (local_err) {
567 error_propagate(errp, local_err);
568 return;
573 static int get_next_block(DumpState *s, GuestPhysBlock *block)
575 while (1) {
576 block = QTAILQ_NEXT(block, next);
577 if (!block) {
578 /* no more block */
579 return 1;
582 s->start = 0;
583 s->next_block = block;
584 if (s->has_filter) {
585 if (block->target_start >= s->begin + s->length ||
586 block->target_end <= s->begin) {
587 /* This block is out of the range */
588 continue;
591 if (s->begin > block->target_start) {
592 s->start = s->begin - block->target_start;
596 return 0;
600 /* write all memory to vmcore */
601 static void dump_iterate(DumpState *s, Error **errp)
603 GuestPhysBlock *block;
604 int64_t size;
605 Error *local_err = NULL;
607 do {
608 block = s->next_block;
610 size = block->target_end - block->target_start;
611 if (s->has_filter) {
612 size -= s->start;
613 if (s->begin + s->length < block->target_end) {
614 size -= block->target_end - (s->begin + s->length);
617 write_memory(s, block, s->start, size, &local_err);
618 if (local_err) {
619 error_propagate(errp, local_err);
620 return;
623 } while (!get_next_block(s, block));
626 static void create_vmcore(DumpState *s, Error **errp)
628 Error *local_err = NULL;
630 dump_begin(s, &local_err);
631 if (local_err) {
632 error_propagate(errp, local_err);
633 return;
636 dump_iterate(s, errp);
639 static int write_start_flat_header(int fd)
641 MakedumpfileHeader *mh;
642 int ret = 0;
644 QEMU_BUILD_BUG_ON(sizeof *mh > MAX_SIZE_MDF_HEADER);
645 mh = g_malloc0(MAX_SIZE_MDF_HEADER);
647 memcpy(mh->signature, MAKEDUMPFILE_SIGNATURE,
648 MIN(sizeof mh->signature, sizeof MAKEDUMPFILE_SIGNATURE));
650 mh->type = cpu_to_be64(TYPE_FLAT_HEADER);
651 mh->version = cpu_to_be64(VERSION_FLAT_HEADER);
653 size_t written_size;
654 written_size = qemu_write_full(fd, mh, MAX_SIZE_MDF_HEADER);
655 if (written_size != MAX_SIZE_MDF_HEADER) {
656 ret = -1;
659 g_free(mh);
660 return ret;
663 static int write_end_flat_header(int fd)
665 MakedumpfileDataHeader mdh;
667 mdh.offset = END_FLAG_FLAT_HEADER;
668 mdh.buf_size = END_FLAG_FLAT_HEADER;
670 size_t written_size;
671 written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
672 if (written_size != sizeof(mdh)) {
673 return -1;
676 return 0;
679 static int write_buffer(int fd, off_t offset, const void *buf, size_t size)
681 size_t written_size;
682 MakedumpfileDataHeader mdh;
684 mdh.offset = cpu_to_be64(offset);
685 mdh.buf_size = cpu_to_be64(size);
687 written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
688 if (written_size != sizeof(mdh)) {
689 return -1;
692 written_size = qemu_write_full(fd, buf, size);
693 if (written_size != size) {
694 return -1;
697 return 0;
700 static int buf_write_note(const void *buf, size_t size, void *opaque)
702 DumpState *s = opaque;
704 /* note_buf is not enough */
705 if (s->note_buf_offset + size > s->note_size) {
706 return -1;
709 memcpy(s->note_buf + s->note_buf_offset, buf, size);
711 s->note_buf_offset += size;
713 return 0;
716 /* write common header, sub header and elf note to vmcore */
717 static void create_header32(DumpState *s, Error **errp)
719 DiskDumpHeader32 *dh = NULL;
720 KdumpSubHeader32 *kh = NULL;
721 size_t size;
722 uint32_t block_size;
723 uint32_t sub_hdr_size;
724 uint32_t bitmap_blocks;
725 uint32_t status = 0;
726 uint64_t offset_note;
727 Error *local_err = NULL;
729 /* write common header, the version of kdump-compressed format is 6th */
730 size = sizeof(DiskDumpHeader32);
731 dh = g_malloc0(size);
733 strncpy(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE));
734 dh->header_version = cpu_to_dump32(s, 6);
735 block_size = s->dump_info.page_size;
736 dh->block_size = cpu_to_dump32(s, block_size);
737 sub_hdr_size = sizeof(struct KdumpSubHeader32) + s->note_size;
738 sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
739 dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
740 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
741 dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
742 dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
743 bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
744 dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
745 strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
747 if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
748 status |= DUMP_DH_COMPRESSED_ZLIB;
750 #ifdef CONFIG_LZO
751 if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
752 status |= DUMP_DH_COMPRESSED_LZO;
754 #endif
755 #ifdef CONFIG_SNAPPY
756 if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
757 status |= DUMP_DH_COMPRESSED_SNAPPY;
759 #endif
760 dh->status = cpu_to_dump32(s, status);
762 if (write_buffer(s->fd, 0, dh, size) < 0) {
763 error_setg(errp, "dump: failed to write disk dump header");
764 goto out;
767 /* write sub header */
768 size = sizeof(KdumpSubHeader32);
769 kh = g_malloc0(size);
771 /* 64bit max_mapnr_64 */
772 kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
773 kh->phys_base = cpu_to_dump32(s, s->dump_info.phys_base);
774 kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
776 offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
777 kh->offset_note = cpu_to_dump64(s, offset_note);
778 kh->note_size = cpu_to_dump32(s, s->note_size);
780 if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
781 block_size, kh, size) < 0) {
782 error_setg(errp, "dump: failed to write kdump sub header");
783 goto out;
786 /* write note */
787 s->note_buf = g_malloc0(s->note_size);
788 s->note_buf_offset = 0;
790 /* use s->note_buf to store notes temporarily */
791 write_elf32_notes(buf_write_note, s, &local_err);
792 if (local_err) {
793 error_propagate(errp, local_err);
794 goto out;
796 if (write_buffer(s->fd, offset_note, s->note_buf,
797 s->note_size) < 0) {
798 error_setg(errp, "dump: failed to write notes");
799 goto out;
802 /* get offset of dump_bitmap */
803 s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
804 block_size;
806 /* get offset of page */
807 s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
808 block_size;
810 out:
811 g_free(dh);
812 g_free(kh);
813 g_free(s->note_buf);
816 /* write common header, sub header and elf note to vmcore */
817 static void create_header64(DumpState *s, Error **errp)
819 DiskDumpHeader64 *dh = NULL;
820 KdumpSubHeader64 *kh = NULL;
821 size_t size;
822 uint32_t block_size;
823 uint32_t sub_hdr_size;
824 uint32_t bitmap_blocks;
825 uint32_t status = 0;
826 uint64_t offset_note;
827 Error *local_err = NULL;
829 /* write common header, the version of kdump-compressed format is 6th */
830 size = sizeof(DiskDumpHeader64);
831 dh = g_malloc0(size);
833 strncpy(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE));
834 dh->header_version = cpu_to_dump32(s, 6);
835 block_size = s->dump_info.page_size;
836 dh->block_size = cpu_to_dump32(s, block_size);
837 sub_hdr_size = sizeof(struct KdumpSubHeader64) + s->note_size;
838 sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
839 dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
840 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
841 dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
842 dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
843 bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
844 dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
845 strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
847 if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
848 status |= DUMP_DH_COMPRESSED_ZLIB;
850 #ifdef CONFIG_LZO
851 if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
852 status |= DUMP_DH_COMPRESSED_LZO;
854 #endif
855 #ifdef CONFIG_SNAPPY
856 if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
857 status |= DUMP_DH_COMPRESSED_SNAPPY;
859 #endif
860 dh->status = cpu_to_dump32(s, status);
862 if (write_buffer(s->fd, 0, dh, size) < 0) {
863 error_setg(errp, "dump: failed to write disk dump header");
864 goto out;
867 /* write sub header */
868 size = sizeof(KdumpSubHeader64);
869 kh = g_malloc0(size);
871 /* 64bit max_mapnr_64 */
872 kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
873 kh->phys_base = cpu_to_dump64(s, s->dump_info.phys_base);
874 kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
876 offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
877 kh->offset_note = cpu_to_dump64(s, offset_note);
878 kh->note_size = cpu_to_dump64(s, s->note_size);
880 if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
881 block_size, kh, size) < 0) {
882 error_setg(errp, "dump: failed to write kdump sub header");
883 goto out;
886 /* write note */
887 s->note_buf = g_malloc0(s->note_size);
888 s->note_buf_offset = 0;
890 /* use s->note_buf to store notes temporarily */
891 write_elf64_notes(buf_write_note, s, &local_err);
892 if (local_err) {
893 error_propagate(errp, local_err);
894 goto out;
897 if (write_buffer(s->fd, offset_note, s->note_buf,
898 s->note_size) < 0) {
899 error_setg(errp, "dump: failed to write notes");
900 goto out;
903 /* get offset of dump_bitmap */
904 s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
905 block_size;
907 /* get offset of page */
908 s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
909 block_size;
911 out:
912 g_free(dh);
913 g_free(kh);
914 g_free(s->note_buf);
917 static void write_dump_header(DumpState *s, Error **errp)
919 Error *local_err = NULL;
921 if (s->dump_info.d_class == ELFCLASS32) {
922 create_header32(s, &local_err);
923 } else {
924 create_header64(s, &local_err);
926 error_propagate(errp, local_err);
929 static size_t dump_bitmap_get_bufsize(DumpState *s)
931 return s->dump_info.page_size;
935 * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be
936 * rewritten, so if need to set the first bit, set last_pfn and pfn to 0.
937 * set_dump_bitmap will always leave the recently set bit un-sync. And setting
938 * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into
939 * vmcore, ie. synchronizing un-sync bit into vmcore.
941 static int set_dump_bitmap(uint64_t last_pfn, uint64_t pfn, bool value,
942 uint8_t *buf, DumpState *s)
944 off_t old_offset, new_offset;
945 off_t offset_bitmap1, offset_bitmap2;
946 uint32_t byte, bit;
947 size_t bitmap_bufsize = dump_bitmap_get_bufsize(s);
948 size_t bits_per_buf = bitmap_bufsize * CHAR_BIT;
950 /* should not set the previous place */
951 assert(last_pfn <= pfn);
954 * if the bit needed to be set is not cached in buf, flush the data in buf
955 * to vmcore firstly.
956 * making new_offset be bigger than old_offset can also sync remained data
957 * into vmcore.
959 old_offset = bitmap_bufsize * (last_pfn / bits_per_buf);
960 new_offset = bitmap_bufsize * (pfn / bits_per_buf);
962 while (old_offset < new_offset) {
963 /* calculate the offset and write dump_bitmap */
964 offset_bitmap1 = s->offset_dump_bitmap + old_offset;
965 if (write_buffer(s->fd, offset_bitmap1, buf,
966 bitmap_bufsize) < 0) {
967 return -1;
970 /* dump level 1 is chosen, so 1st and 2nd bitmap are same */
971 offset_bitmap2 = s->offset_dump_bitmap + s->len_dump_bitmap +
972 old_offset;
973 if (write_buffer(s->fd, offset_bitmap2, buf,
974 bitmap_bufsize) < 0) {
975 return -1;
978 memset(buf, 0, bitmap_bufsize);
979 old_offset += bitmap_bufsize;
982 /* get the exact place of the bit in the buf, and set it */
983 byte = (pfn % bits_per_buf) / CHAR_BIT;
984 bit = (pfn % bits_per_buf) % CHAR_BIT;
985 if (value) {
986 buf[byte] |= 1u << bit;
987 } else {
988 buf[byte] &= ~(1u << bit);
991 return 0;
994 static uint64_t dump_paddr_to_pfn(DumpState *s, uint64_t addr)
996 int target_page_shift = ctz32(s->dump_info.page_size);
998 return (addr >> target_page_shift) - ARCH_PFN_OFFSET;
1001 static uint64_t dump_pfn_to_paddr(DumpState *s, uint64_t pfn)
1003 int target_page_shift = ctz32(s->dump_info.page_size);
1005 return (pfn + ARCH_PFN_OFFSET) << target_page_shift;
1009 * exam every page and return the page frame number and the address of the page.
1010 * bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys
1011 * blocks, so block->target_start and block->target_end should be interal
1012 * multiples of the target page size.
1014 static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr,
1015 uint8_t **bufptr, DumpState *s)
1017 GuestPhysBlock *block = *blockptr;
1018 hwaddr addr, target_page_mask = ~((hwaddr)s->dump_info.page_size - 1);
1019 uint8_t *buf;
1021 /* block == NULL means the start of the iteration */
1022 if (!block) {
1023 block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
1024 *blockptr = block;
1025 assert((block->target_start & ~target_page_mask) == 0);
1026 assert((block->target_end & ~target_page_mask) == 0);
1027 *pfnptr = dump_paddr_to_pfn(s, block->target_start);
1028 if (bufptr) {
1029 *bufptr = block->host_addr;
1031 return true;
1034 *pfnptr = *pfnptr + 1;
1035 addr = dump_pfn_to_paddr(s, *pfnptr);
1037 if ((addr >= block->target_start) &&
1038 (addr + s->dump_info.page_size <= block->target_end)) {
1039 buf = block->host_addr + (addr - block->target_start);
1040 } else {
1041 /* the next page is in the next block */
1042 block = QTAILQ_NEXT(block, next);
1043 *blockptr = block;
1044 if (!block) {
1045 return false;
1047 assert((block->target_start & ~target_page_mask) == 0);
1048 assert((block->target_end & ~target_page_mask) == 0);
1049 *pfnptr = dump_paddr_to_pfn(s, block->target_start);
1050 buf = block->host_addr;
1053 if (bufptr) {
1054 *bufptr = buf;
1057 return true;
1060 static void write_dump_bitmap(DumpState *s, Error **errp)
1062 int ret = 0;
1063 uint64_t last_pfn, pfn;
1064 void *dump_bitmap_buf;
1065 size_t num_dumpable;
1066 GuestPhysBlock *block_iter = NULL;
1067 size_t bitmap_bufsize = dump_bitmap_get_bufsize(s);
1068 size_t bits_per_buf = bitmap_bufsize * CHAR_BIT;
1070 /* dump_bitmap_buf is used to store dump_bitmap temporarily */
1071 dump_bitmap_buf = g_malloc0(bitmap_bufsize);
1073 num_dumpable = 0;
1074 last_pfn = 0;
1077 * exam memory page by page, and set the bit in dump_bitmap corresponded
1078 * to the existing page.
1080 while (get_next_page(&block_iter, &pfn, NULL, s)) {
1081 ret = set_dump_bitmap(last_pfn, pfn, true, dump_bitmap_buf, s);
1082 if (ret < 0) {
1083 error_setg(errp, "dump: failed to set dump_bitmap");
1084 goto out;
1087 last_pfn = pfn;
1088 num_dumpable++;
1092 * set_dump_bitmap will always leave the recently set bit un-sync. Here we
1093 * set the remaining bits from last_pfn to the end of the bitmap buffer to
1094 * 0. With those set, the un-sync bit will be synchronized into the vmcore.
1096 if (num_dumpable > 0) {
1097 ret = set_dump_bitmap(last_pfn, last_pfn + bits_per_buf, false,
1098 dump_bitmap_buf, s);
1099 if (ret < 0) {
1100 error_setg(errp, "dump: failed to sync dump_bitmap");
1101 goto out;
1105 /* number of dumpable pages that will be dumped later */
1106 s->num_dumpable = num_dumpable;
1108 out:
1109 g_free(dump_bitmap_buf);
1112 static void prepare_data_cache(DataCache *data_cache, DumpState *s,
1113 off_t offset)
1115 data_cache->fd = s->fd;
1116 data_cache->data_size = 0;
1117 data_cache->buf_size = 4 * dump_bitmap_get_bufsize(s);
1118 data_cache->buf = g_malloc0(data_cache->buf_size);
1119 data_cache->offset = offset;
1122 static int write_cache(DataCache *dc, const void *buf, size_t size,
1123 bool flag_sync)
1126 * dc->buf_size should not be less than size, otherwise dc will never be
1127 * enough
1129 assert(size <= dc->buf_size);
1132 * if flag_sync is set, synchronize data in dc->buf into vmcore.
1133 * otherwise check if the space is enough for caching data in buf, if not,
1134 * write the data in dc->buf to dc->fd and reset dc->buf
1136 if ((!flag_sync && dc->data_size + size > dc->buf_size) ||
1137 (flag_sync && dc->data_size > 0)) {
1138 if (write_buffer(dc->fd, dc->offset, dc->buf, dc->data_size) < 0) {
1139 return -1;
1142 dc->offset += dc->data_size;
1143 dc->data_size = 0;
1146 if (!flag_sync) {
1147 memcpy(dc->buf + dc->data_size, buf, size);
1148 dc->data_size += size;
1151 return 0;
1154 static void free_data_cache(DataCache *data_cache)
1156 g_free(data_cache->buf);
1159 static size_t get_len_buf_out(size_t page_size, uint32_t flag_compress)
1161 switch (flag_compress) {
1162 case DUMP_DH_COMPRESSED_ZLIB:
1163 return compressBound(page_size);
1165 case DUMP_DH_COMPRESSED_LZO:
1167 * LZO will expand incompressible data by a little amount. Please check
1168 * the following URL to see the expansion calculation:
1169 * http://www.oberhumer.com/opensource/lzo/lzofaq.php
1171 return page_size + page_size / 16 + 64 + 3;
1173 #ifdef CONFIG_SNAPPY
1174 case DUMP_DH_COMPRESSED_SNAPPY:
1175 return snappy_max_compressed_length(page_size);
1176 #endif
1178 return 0;
1182 * check if the page is all 0
1184 static inline bool is_zero_page(const uint8_t *buf, size_t page_size)
1186 return buffer_is_zero(buf, page_size);
1189 static void write_dump_pages(DumpState *s, Error **errp)
1191 int ret = 0;
1192 DataCache page_desc, page_data;
1193 size_t len_buf_out, size_out;
1194 #ifdef CONFIG_LZO
1195 lzo_bytep wrkmem = NULL;
1196 #endif
1197 uint8_t *buf_out = NULL;
1198 off_t offset_desc, offset_data;
1199 PageDescriptor pd, pd_zero;
1200 uint8_t *buf;
1201 GuestPhysBlock *block_iter = NULL;
1202 uint64_t pfn_iter;
1204 /* get offset of page_desc and page_data in dump file */
1205 offset_desc = s->offset_page;
1206 offset_data = offset_desc + sizeof(PageDescriptor) * s->num_dumpable;
1208 prepare_data_cache(&page_desc, s, offset_desc);
1209 prepare_data_cache(&page_data, s, offset_data);
1211 /* prepare buffer to store compressed data */
1212 len_buf_out = get_len_buf_out(s->dump_info.page_size, s->flag_compress);
1213 assert(len_buf_out != 0);
1215 #ifdef CONFIG_LZO
1216 wrkmem = g_malloc(LZO1X_1_MEM_COMPRESS);
1217 #endif
1219 buf_out = g_malloc(len_buf_out);
1222 * init zero page's page_desc and page_data, because every zero page
1223 * uses the same page_data
1225 pd_zero.size = cpu_to_dump32(s, s->dump_info.page_size);
1226 pd_zero.flags = cpu_to_dump32(s, 0);
1227 pd_zero.offset = cpu_to_dump64(s, offset_data);
1228 pd_zero.page_flags = cpu_to_dump64(s, 0);
1229 buf = g_malloc0(s->dump_info.page_size);
1230 ret = write_cache(&page_data, buf, s->dump_info.page_size, false);
1231 g_free(buf);
1232 if (ret < 0) {
1233 error_setg(errp, "dump: failed to write page data (zero page)");
1234 goto out;
1237 offset_data += s->dump_info.page_size;
1240 * dump memory to vmcore page by page. zero page will all be resided in the
1241 * first page of page section
1243 while (get_next_page(&block_iter, &pfn_iter, &buf, s)) {
1244 /* check zero page */
1245 if (is_zero_page(buf, s->dump_info.page_size)) {
1246 ret = write_cache(&page_desc, &pd_zero, sizeof(PageDescriptor),
1247 false);
1248 if (ret < 0) {
1249 error_setg(errp, "dump: failed to write page desc");
1250 goto out;
1252 } else {
1254 * not zero page, then:
1255 * 1. compress the page
1256 * 2. write the compressed page into the cache of page_data
1257 * 3. get page desc of the compressed page and write it into the
1258 * cache of page_desc
1260 * only one compression format will be used here, for
1261 * s->flag_compress is set. But when compression fails to work,
1262 * we fall back to save in plaintext.
1264 size_out = len_buf_out;
1265 if ((s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) &&
1266 (compress2(buf_out, (uLongf *)&size_out, buf,
1267 s->dump_info.page_size, Z_BEST_SPEED) == Z_OK) &&
1268 (size_out < s->dump_info.page_size)) {
1269 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_ZLIB);
1270 pd.size = cpu_to_dump32(s, size_out);
1272 ret = write_cache(&page_data, buf_out, size_out, false);
1273 if (ret < 0) {
1274 error_setg(errp, "dump: failed to write page data");
1275 goto out;
1277 #ifdef CONFIG_LZO
1278 } else if ((s->flag_compress & DUMP_DH_COMPRESSED_LZO) &&
1279 (lzo1x_1_compress(buf, s->dump_info.page_size, buf_out,
1280 (lzo_uint *)&size_out, wrkmem) == LZO_E_OK) &&
1281 (size_out < s->dump_info.page_size)) {
1282 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_LZO);
1283 pd.size = cpu_to_dump32(s, size_out);
1285 ret = write_cache(&page_data, buf_out, size_out, false);
1286 if (ret < 0) {
1287 error_setg(errp, "dump: failed to write page data");
1288 goto out;
1290 #endif
1291 #ifdef CONFIG_SNAPPY
1292 } else if ((s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) &&
1293 (snappy_compress((char *)buf, s->dump_info.page_size,
1294 (char *)buf_out, &size_out) == SNAPPY_OK) &&
1295 (size_out < s->dump_info.page_size)) {
1296 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_SNAPPY);
1297 pd.size = cpu_to_dump32(s, size_out);
1299 ret = write_cache(&page_data, buf_out, size_out, false);
1300 if (ret < 0) {
1301 error_setg(errp, "dump: failed to write page data");
1302 goto out;
1304 #endif
1305 } else {
1307 * fall back to save in plaintext, size_out should be
1308 * assigned the target's page size
1310 pd.flags = cpu_to_dump32(s, 0);
1311 size_out = s->dump_info.page_size;
1312 pd.size = cpu_to_dump32(s, size_out);
1314 ret = write_cache(&page_data, buf,
1315 s->dump_info.page_size, false);
1316 if (ret < 0) {
1317 error_setg(errp, "dump: failed to write page data");
1318 goto out;
1322 /* get and write page desc here */
1323 pd.page_flags = cpu_to_dump64(s, 0);
1324 pd.offset = cpu_to_dump64(s, offset_data);
1325 offset_data += size_out;
1327 ret = write_cache(&page_desc, &pd, sizeof(PageDescriptor), false);
1328 if (ret < 0) {
1329 error_setg(errp, "dump: failed to write page desc");
1330 goto out;
1333 s->written_size += s->dump_info.page_size;
1336 ret = write_cache(&page_desc, NULL, 0, true);
1337 if (ret < 0) {
1338 error_setg(errp, "dump: failed to sync cache for page_desc");
1339 goto out;
1341 ret = write_cache(&page_data, NULL, 0, true);
1342 if (ret < 0) {
1343 error_setg(errp, "dump: failed to sync cache for page_data");
1344 goto out;
1347 out:
1348 free_data_cache(&page_desc);
1349 free_data_cache(&page_data);
1351 #ifdef CONFIG_LZO
1352 g_free(wrkmem);
1353 #endif
1355 g_free(buf_out);
1358 static void create_kdump_vmcore(DumpState *s, Error **errp)
1360 int ret;
1361 Error *local_err = NULL;
1364 * the kdump-compressed format is:
1365 * File offset
1366 * +------------------------------------------+ 0x0
1367 * | main header (struct disk_dump_header) |
1368 * |------------------------------------------+ block 1
1369 * | sub header (struct kdump_sub_header) |
1370 * |------------------------------------------+ block 2
1371 * | 1st-dump_bitmap |
1372 * |------------------------------------------+ block 2 + X blocks
1373 * | 2nd-dump_bitmap | (aligned by block)
1374 * |------------------------------------------+ block 2 + 2 * X blocks
1375 * | page desc for pfn 0 (struct page_desc) | (aligned by block)
1376 * | page desc for pfn 1 (struct page_desc) |
1377 * | : |
1378 * |------------------------------------------| (not aligned by block)
1379 * | page data (pfn 0) |
1380 * | page data (pfn 1) |
1381 * | : |
1382 * +------------------------------------------+
1385 ret = write_start_flat_header(s->fd);
1386 if (ret < 0) {
1387 error_setg(errp, "dump: failed to write start flat header");
1388 return;
1391 write_dump_header(s, &local_err);
1392 if (local_err) {
1393 error_propagate(errp, local_err);
1394 return;
1397 write_dump_bitmap(s, &local_err);
1398 if (local_err) {
1399 error_propagate(errp, local_err);
1400 return;
1403 write_dump_pages(s, &local_err);
1404 if (local_err) {
1405 error_propagate(errp, local_err);
1406 return;
1409 ret = write_end_flat_header(s->fd);
1410 if (ret < 0) {
1411 error_setg(errp, "dump: failed to write end flat header");
1412 return;
1416 static ram_addr_t get_start_block(DumpState *s)
1418 GuestPhysBlock *block;
1420 if (!s->has_filter) {
1421 s->next_block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
1422 return 0;
1425 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
1426 if (block->target_start >= s->begin + s->length ||
1427 block->target_end <= s->begin) {
1428 /* This block is out of the range */
1429 continue;
1432 s->next_block = block;
1433 if (s->begin > block->target_start) {
1434 s->start = s->begin - block->target_start;
1435 } else {
1436 s->start = 0;
1438 return s->start;
1441 return -1;
1444 static void get_max_mapnr(DumpState *s)
1446 GuestPhysBlock *last_block;
1448 last_block = QTAILQ_LAST(&s->guest_phys_blocks.head, GuestPhysBlockHead);
1449 s->max_mapnr = dump_paddr_to_pfn(s, last_block->target_end);
1452 static DumpState dump_state_global = { .status = DUMP_STATUS_NONE };
1454 static void dump_state_prepare(DumpState *s)
1456 /* zero the struct, setting status to active */
1457 *s = (DumpState) { .status = DUMP_STATUS_ACTIVE };
1460 bool dump_in_progress(void)
1462 DumpState *state = &dump_state_global;
1463 return (atomic_read(&state->status) == DUMP_STATUS_ACTIVE);
1466 /* calculate total size of memory to be dumped (taking filter into
1467 * acoount.) */
1468 static int64_t dump_calculate_size(DumpState *s)
1470 GuestPhysBlock *block;
1471 int64_t size = 0, total = 0, left = 0, right = 0;
1473 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
1474 if (s->has_filter) {
1475 /* calculate the overlapped region. */
1476 left = MAX(s->begin, block->target_start);
1477 right = MIN(s->begin + s->length, block->target_end);
1478 size = right - left;
1479 size = size > 0 ? size : 0;
1480 } else {
1481 /* count the whole region in */
1482 size = (block->target_end - block->target_start);
1484 total += size;
1487 return total;
1490 static void dump_init(DumpState *s, int fd, bool has_format,
1491 DumpGuestMemoryFormat format, bool paging, bool has_filter,
1492 int64_t begin, int64_t length, Error **errp)
1494 CPUState *cpu;
1495 int nr_cpus;
1496 Error *err = NULL;
1497 int ret;
1499 s->has_format = has_format;
1500 s->format = format;
1501 s->written_size = 0;
1503 /* kdump-compressed is conflict with paging and filter */
1504 if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1505 assert(!paging && !has_filter);
1508 if (runstate_is_running()) {
1509 vm_stop(RUN_STATE_SAVE_VM);
1510 s->resume = true;
1511 } else {
1512 s->resume = false;
1515 /* If we use KVM, we should synchronize the registers before we get dump
1516 * info or physmap info.
1518 cpu_synchronize_all_states();
1519 nr_cpus = 0;
1520 CPU_FOREACH(cpu) {
1521 nr_cpus++;
1524 s->fd = fd;
1525 s->has_filter = has_filter;
1526 s->begin = begin;
1527 s->length = length;
1529 memory_mapping_list_init(&s->list);
1531 guest_phys_blocks_init(&s->guest_phys_blocks);
1532 guest_phys_blocks_append(&s->guest_phys_blocks);
1533 s->total_size = dump_calculate_size(s);
1534 #ifdef DEBUG_DUMP_GUEST_MEMORY
1535 fprintf(stderr, "DUMP: total memory to dump: %lu\n", s->total_size);
1536 #endif
1538 /* it does not make sense to dump non-existent memory */
1539 if (!s->total_size) {
1540 error_setg(errp, "dump: no guest memory to dump");
1541 goto cleanup;
1544 s->start = get_start_block(s);
1545 if (s->start == -1) {
1546 error_setg(errp, QERR_INVALID_PARAMETER, "begin");
1547 goto cleanup;
1550 /* get dump info: endian, class and architecture.
1551 * If the target architecture is not supported, cpu_get_dump_info() will
1552 * return -1.
1554 ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks);
1555 if (ret < 0) {
1556 error_setg(errp, QERR_UNSUPPORTED);
1557 goto cleanup;
1560 if (!s->dump_info.page_size) {
1561 s->dump_info.page_size = TARGET_PAGE_SIZE;
1564 s->note_size = cpu_get_note_size(s->dump_info.d_class,
1565 s->dump_info.d_machine, nr_cpus);
1566 if (s->note_size < 0) {
1567 error_setg(errp, QERR_UNSUPPORTED);
1568 goto cleanup;
1571 /* get memory mapping */
1572 if (paging) {
1573 qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, &err);
1574 if (err != NULL) {
1575 error_propagate(errp, err);
1576 goto cleanup;
1578 } else {
1579 qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks);
1582 s->nr_cpus = nr_cpus;
1584 get_max_mapnr(s);
1586 uint64_t tmp;
1587 tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT),
1588 s->dump_info.page_size);
1589 s->len_dump_bitmap = tmp * s->dump_info.page_size;
1591 /* init for kdump-compressed format */
1592 if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1593 switch (format) {
1594 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB:
1595 s->flag_compress = DUMP_DH_COMPRESSED_ZLIB;
1596 break;
1598 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO:
1599 #ifdef CONFIG_LZO
1600 if (lzo_init() != LZO_E_OK) {
1601 error_setg(errp, "failed to initialize the LZO library");
1602 goto cleanup;
1604 #endif
1605 s->flag_compress = DUMP_DH_COMPRESSED_LZO;
1606 break;
1608 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY:
1609 s->flag_compress = DUMP_DH_COMPRESSED_SNAPPY;
1610 break;
1612 default:
1613 s->flag_compress = 0;
1616 return;
1619 if (s->has_filter) {
1620 memory_mapping_filter(&s->list, s->begin, s->length);
1624 * calculate phdr_num
1626 * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
1628 s->phdr_num = 1; /* PT_NOTE */
1629 if (s->list.num < UINT16_MAX - 2) {
1630 s->phdr_num += s->list.num;
1631 s->have_section = false;
1632 } else {
1633 s->have_section = true;
1634 s->phdr_num = PN_XNUM;
1635 s->sh_info = 1; /* PT_NOTE */
1637 /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
1638 if (s->list.num <= UINT32_MAX - 1) {
1639 s->sh_info += s->list.num;
1640 } else {
1641 s->sh_info = UINT32_MAX;
1645 if (s->dump_info.d_class == ELFCLASS64) {
1646 if (s->have_section) {
1647 s->memory_offset = sizeof(Elf64_Ehdr) +
1648 sizeof(Elf64_Phdr) * s->sh_info +
1649 sizeof(Elf64_Shdr) + s->note_size;
1650 } else {
1651 s->memory_offset = sizeof(Elf64_Ehdr) +
1652 sizeof(Elf64_Phdr) * s->phdr_num + s->note_size;
1654 } else {
1655 if (s->have_section) {
1656 s->memory_offset = sizeof(Elf32_Ehdr) +
1657 sizeof(Elf32_Phdr) * s->sh_info +
1658 sizeof(Elf32_Shdr) + s->note_size;
1659 } else {
1660 s->memory_offset = sizeof(Elf32_Ehdr) +
1661 sizeof(Elf32_Phdr) * s->phdr_num + s->note_size;
1665 return;
1667 cleanup:
1668 dump_cleanup(s);
1671 /* this operation might be time consuming. */
1672 static void dump_process(DumpState *s, Error **errp)
1674 Error *local_err = NULL;
1675 DumpQueryResult *result = NULL;
1677 if (s->has_format && s->format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1678 create_kdump_vmcore(s, &local_err);
1679 } else {
1680 create_vmcore(s, &local_err);
1683 /* make sure status is written after written_size updates */
1684 smp_wmb();
1685 atomic_set(&s->status,
1686 (local_err ? DUMP_STATUS_FAILED : DUMP_STATUS_COMPLETED));
1688 /* send DUMP_COMPLETED message (unconditionally) */
1689 result = qmp_query_dump(NULL);
1690 /* should never fail */
1691 assert(result);
1692 qapi_event_send_dump_completed(result, !!local_err, (local_err ? \
1693 error_get_pretty(local_err) : NULL),
1694 &error_abort);
1695 qapi_free_DumpQueryResult(result);
1697 error_propagate(errp, local_err);
1698 dump_cleanup(s);
1701 static void *dump_thread(void *data)
1703 DumpState *s = (DumpState *)data;
1704 dump_process(s, NULL);
1705 return NULL;
1708 DumpQueryResult *qmp_query_dump(Error **errp)
1710 DumpQueryResult *result = g_new(DumpQueryResult, 1);
1711 DumpState *state = &dump_state_global;
1712 result->status = atomic_read(&state->status);
1713 /* make sure we are reading status and written_size in order */
1714 smp_rmb();
1715 result->completed = state->written_size;
1716 result->total = state->total_size;
1717 return result;
1720 void qmp_dump_guest_memory(bool paging, const char *file,
1721 bool has_detach, bool detach,
1722 bool has_begin, int64_t begin, bool has_length,
1723 int64_t length, bool has_format,
1724 DumpGuestMemoryFormat format, Error **errp)
1726 const char *p;
1727 int fd = -1;
1728 DumpState *s;
1729 Error *local_err = NULL;
1730 bool detach_p = false;
1732 if (runstate_check(RUN_STATE_INMIGRATE)) {
1733 error_setg(errp, "Dump not allowed during incoming migration.");
1734 return;
1737 /* if there is a dump in background, we should wait until the dump
1738 * finished */
1739 if (dump_in_progress()) {
1740 error_setg(errp, "There is a dump in process, please wait.");
1741 return;
1745 * kdump-compressed format need the whole memory dumped, so paging or
1746 * filter is not supported here.
1748 if ((has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) &&
1749 (paging || has_begin || has_length)) {
1750 error_setg(errp, "kdump-compressed format doesn't support paging or "
1751 "filter");
1752 return;
1754 if (has_begin && !has_length) {
1755 error_setg(errp, QERR_MISSING_PARAMETER, "length");
1756 return;
1758 if (!has_begin && has_length) {
1759 error_setg(errp, QERR_MISSING_PARAMETER, "begin");
1760 return;
1762 if (has_detach) {
1763 detach_p = detach;
1766 /* check whether lzo/snappy is supported */
1767 #ifndef CONFIG_LZO
1768 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO) {
1769 error_setg(errp, "kdump-lzo is not available now");
1770 return;
1772 #endif
1774 #ifndef CONFIG_SNAPPY
1775 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY) {
1776 error_setg(errp, "kdump-snappy is not available now");
1777 return;
1779 #endif
1781 #if !defined(WIN32)
1782 if (strstart(file, "fd:", &p)) {
1783 fd = monitor_get_fd(cur_mon, p, errp);
1784 if (fd == -1) {
1785 return;
1788 #endif
1790 if (strstart(file, "file:", &p)) {
1791 fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
1792 if (fd < 0) {
1793 error_setg_file_open(errp, errno, p);
1794 return;
1798 if (fd == -1) {
1799 error_setg(errp, QERR_INVALID_PARAMETER, "protocol");
1800 return;
1803 s = &dump_state_global;
1804 dump_state_prepare(s);
1806 dump_init(s, fd, has_format, format, paging, has_begin,
1807 begin, length, &local_err);
1808 if (local_err) {
1809 error_propagate(errp, local_err);
1810 atomic_set(&s->status, DUMP_STATUS_FAILED);
1811 return;
1814 if (detach_p) {
1815 /* detached dump */
1816 s->detached = true;
1817 qemu_thread_create(&s->dump_thread, "dump_thread", dump_thread,
1818 s, QEMU_THREAD_DETACHED);
1819 } else {
1820 /* sync dump */
1821 dump_process(s, errp);
1825 DumpGuestMemoryCapability *qmp_query_dump_guest_memory_capability(Error **errp)
1827 DumpGuestMemoryFormatList *item;
1828 DumpGuestMemoryCapability *cap =
1829 g_malloc0(sizeof(DumpGuestMemoryCapability));
1831 /* elf is always available */
1832 item = g_malloc0(sizeof(DumpGuestMemoryFormatList));
1833 cap->formats = item;
1834 item->value = DUMP_GUEST_MEMORY_FORMAT_ELF;
1836 /* kdump-zlib is always available */
1837 item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
1838 item = item->next;
1839 item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB;
1841 /* add new item if kdump-lzo is available */
1842 #ifdef CONFIG_LZO
1843 item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
1844 item = item->next;
1845 item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO;
1846 #endif
1848 /* add new item if kdump-snappy is available */
1849 #ifdef CONFIG_SNAPPY
1850 item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
1851 item = item->next;
1852 item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY;
1853 #endif
1855 return cap;