dump: Propagate errors into qmp_dump_guest_memory()
[qemu/ar7.git] / dump.c
blob07d2300c222fcb4723eed8a52d1bc02aa4571c21
1 /*
2 * QEMU dump
4 * Copyright Fujitsu, Corp. 2011, 2012
6 * Authors:
7 * Wen Congyang <wency@cn.fujitsu.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 #include "qemu-common.h"
15 #include "elf.h"
16 #include "cpu.h"
17 #include "exec/cpu-all.h"
18 #include "exec/hwaddr.h"
19 #include "monitor/monitor.h"
20 #include "sysemu/kvm.h"
21 #include "sysemu/dump.h"
22 #include "sysemu/sysemu.h"
23 #include "sysemu/memory_mapping.h"
24 #include "sysemu/cpus.h"
25 #include "qapi/error.h"
26 #include "qmp-commands.h"
28 #include <zlib.h>
29 #ifdef CONFIG_LZO
30 #include <lzo/lzo1x.h>
31 #endif
32 #ifdef CONFIG_SNAPPY
33 #include <snappy-c.h>
34 #endif
35 #ifndef ELF_MACHINE_UNAME
36 #define ELF_MACHINE_UNAME "Unknown"
37 #endif
39 uint16_t cpu_to_dump16(DumpState *s, uint16_t val)
41 if (s->dump_info.d_endian == ELFDATA2LSB) {
42 val = cpu_to_le16(val);
43 } else {
44 val = cpu_to_be16(val);
47 return val;
50 uint32_t cpu_to_dump32(DumpState *s, uint32_t val)
52 if (s->dump_info.d_endian == ELFDATA2LSB) {
53 val = cpu_to_le32(val);
54 } else {
55 val = cpu_to_be32(val);
58 return val;
61 uint64_t cpu_to_dump64(DumpState *s, uint64_t val)
63 if (s->dump_info.d_endian == ELFDATA2LSB) {
64 val = cpu_to_le64(val);
65 } else {
66 val = cpu_to_be64(val);
69 return val;
72 static int dump_cleanup(DumpState *s)
74 guest_phys_blocks_free(&s->guest_phys_blocks);
75 memory_mapping_list_free(&s->list);
76 close(s->fd);
77 if (s->resume) {
78 vm_start();
81 return 0;
84 static void dump_error(DumpState *s, const char *reason, Error **errp)
86 dump_cleanup(s);
87 error_setg(errp, "%s", reason);
90 static int fd_write_vmcore(const void *buf, size_t size, void *opaque)
92 DumpState *s = opaque;
93 size_t written_size;
95 written_size = qemu_write_full(s->fd, buf, size);
96 if (written_size != size) {
97 return -1;
100 return 0;
103 static int write_elf64_header(DumpState *s, Error **errp)
105 Elf64_Ehdr elf_header;
106 int ret;
108 memset(&elf_header, 0, sizeof(Elf64_Ehdr));
109 memcpy(&elf_header, ELFMAG, SELFMAG);
110 elf_header.e_ident[EI_CLASS] = ELFCLASS64;
111 elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
112 elf_header.e_ident[EI_VERSION] = EV_CURRENT;
113 elf_header.e_type = cpu_to_dump16(s, ET_CORE);
114 elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
115 elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
116 elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
117 elf_header.e_phoff = cpu_to_dump64(s, sizeof(Elf64_Ehdr));
118 elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf64_Phdr));
119 elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num);
120 if (s->have_section) {
121 uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info;
123 elf_header.e_shoff = cpu_to_dump64(s, shoff);
124 elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf64_Shdr));
125 elf_header.e_shnum = cpu_to_dump16(s, 1);
128 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
129 if (ret < 0) {
130 dump_error(s, "dump: failed to write elf header", errp);
131 return -1;
134 return 0;
137 static int write_elf32_header(DumpState *s, Error **errp)
139 Elf32_Ehdr elf_header;
140 int ret;
142 memset(&elf_header, 0, sizeof(Elf32_Ehdr));
143 memcpy(&elf_header, ELFMAG, SELFMAG);
144 elf_header.e_ident[EI_CLASS] = ELFCLASS32;
145 elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
146 elf_header.e_ident[EI_VERSION] = EV_CURRENT;
147 elf_header.e_type = cpu_to_dump16(s, ET_CORE);
148 elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
149 elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
150 elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
151 elf_header.e_phoff = cpu_to_dump32(s, sizeof(Elf32_Ehdr));
152 elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf32_Phdr));
153 elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num);
154 if (s->have_section) {
155 uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info;
157 elf_header.e_shoff = cpu_to_dump32(s, shoff);
158 elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf32_Shdr));
159 elf_header.e_shnum = cpu_to_dump16(s, 1);
162 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
163 if (ret < 0) {
164 dump_error(s, "dump: failed to write elf header", errp);
165 return -1;
168 return 0;
171 static int write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
172 int phdr_index, hwaddr offset,
173 hwaddr filesz, Error **errp)
175 Elf64_Phdr phdr;
176 int ret;
178 memset(&phdr, 0, sizeof(Elf64_Phdr));
179 phdr.p_type = cpu_to_dump32(s, PT_LOAD);
180 phdr.p_offset = cpu_to_dump64(s, offset);
181 phdr.p_paddr = cpu_to_dump64(s, memory_mapping->phys_addr);
182 phdr.p_filesz = cpu_to_dump64(s, filesz);
183 phdr.p_memsz = cpu_to_dump64(s, memory_mapping->length);
184 phdr.p_vaddr = cpu_to_dump64(s, memory_mapping->virt_addr);
186 assert(memory_mapping->length >= filesz);
188 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
189 if (ret < 0) {
190 dump_error(s, "dump: failed to write program header table", errp);
191 return -1;
194 return 0;
197 static int write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
198 int phdr_index, hwaddr offset,
199 hwaddr filesz, Error **errp)
201 Elf32_Phdr phdr;
202 int ret;
204 memset(&phdr, 0, sizeof(Elf32_Phdr));
205 phdr.p_type = cpu_to_dump32(s, PT_LOAD);
206 phdr.p_offset = cpu_to_dump32(s, offset);
207 phdr.p_paddr = cpu_to_dump32(s, memory_mapping->phys_addr);
208 phdr.p_filesz = cpu_to_dump32(s, filesz);
209 phdr.p_memsz = cpu_to_dump32(s, memory_mapping->length);
210 phdr.p_vaddr = cpu_to_dump32(s, memory_mapping->virt_addr);
212 assert(memory_mapping->length >= filesz);
214 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
215 if (ret < 0) {
216 dump_error(s, "dump: failed to write program header table", errp);
217 return -1;
220 return 0;
223 static int write_elf64_note(DumpState *s, Error **errp)
225 Elf64_Phdr phdr;
226 hwaddr begin = s->memory_offset - s->note_size;
227 int ret;
229 memset(&phdr, 0, sizeof(Elf64_Phdr));
230 phdr.p_type = cpu_to_dump32(s, PT_NOTE);
231 phdr.p_offset = cpu_to_dump64(s, begin);
232 phdr.p_paddr = 0;
233 phdr.p_filesz = cpu_to_dump64(s, s->note_size);
234 phdr.p_memsz = cpu_to_dump64(s, s->note_size);
235 phdr.p_vaddr = 0;
237 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
238 if (ret < 0) {
239 dump_error(s, "dump: failed to write program header table", errp);
240 return -1;
243 return 0;
246 static inline int cpu_index(CPUState *cpu)
248 return cpu->cpu_index + 1;
251 static int write_elf64_notes(WriteCoreDumpFunction f, DumpState *s,
252 Error **errp)
254 CPUState *cpu;
255 int ret;
256 int id;
258 CPU_FOREACH(cpu) {
259 id = cpu_index(cpu);
260 ret = cpu_write_elf64_note(f, cpu, id, s);
261 if (ret < 0) {
262 dump_error(s, "dump: failed to write elf notes", errp);
263 return -1;
267 CPU_FOREACH(cpu) {
268 ret = cpu_write_elf64_qemunote(f, cpu, s);
269 if (ret < 0) {
270 dump_error(s, "dump: failed to write CPU status", errp);
271 return -1;
275 return 0;
278 static int write_elf32_note(DumpState *s, Error **errp)
280 hwaddr begin = s->memory_offset - s->note_size;
281 Elf32_Phdr phdr;
282 int ret;
284 memset(&phdr, 0, sizeof(Elf32_Phdr));
285 phdr.p_type = cpu_to_dump32(s, PT_NOTE);
286 phdr.p_offset = cpu_to_dump32(s, begin);
287 phdr.p_paddr = 0;
288 phdr.p_filesz = cpu_to_dump32(s, s->note_size);
289 phdr.p_memsz = cpu_to_dump32(s, s->note_size);
290 phdr.p_vaddr = 0;
292 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
293 if (ret < 0) {
294 dump_error(s, "dump: failed to write program header table", errp);
295 return -1;
298 return 0;
301 static int write_elf32_notes(WriteCoreDumpFunction f, DumpState *s,
302 Error **errp)
304 CPUState *cpu;
305 int ret;
306 int id;
308 CPU_FOREACH(cpu) {
309 id = cpu_index(cpu);
310 ret = cpu_write_elf32_note(f, cpu, id, s);
311 if (ret < 0) {
312 dump_error(s, "dump: failed to write elf notes", errp);
313 return -1;
317 CPU_FOREACH(cpu) {
318 ret = cpu_write_elf32_qemunote(f, cpu, s);
319 if (ret < 0) {
320 dump_error(s, "dump: failed to write CPU status", errp);
321 return -1;
325 return 0;
328 static int write_elf_section(DumpState *s, int type, Error **errp)
330 Elf32_Shdr shdr32;
331 Elf64_Shdr shdr64;
332 int shdr_size;
333 void *shdr;
334 int ret;
336 if (type == 0) {
337 shdr_size = sizeof(Elf32_Shdr);
338 memset(&shdr32, 0, shdr_size);
339 shdr32.sh_info = cpu_to_dump32(s, s->sh_info);
340 shdr = &shdr32;
341 } else {
342 shdr_size = sizeof(Elf64_Shdr);
343 memset(&shdr64, 0, shdr_size);
344 shdr64.sh_info = cpu_to_dump32(s, s->sh_info);
345 shdr = &shdr64;
348 ret = fd_write_vmcore(&shdr, shdr_size, s);
349 if (ret < 0) {
350 dump_error(s, "dump: failed to write section header table", errp);
351 return -1;
354 return 0;
357 static int write_data(DumpState *s, void *buf, int length, Error **errp)
359 int ret;
361 ret = fd_write_vmcore(buf, length, s);
362 if (ret < 0) {
363 dump_error(s, "dump: failed to save memory", errp);
364 return -1;
367 return 0;
370 /* write the memroy to vmcore. 1 page per I/O. */
371 static int write_memory(DumpState *s, GuestPhysBlock *block, ram_addr_t start,
372 int64_t size, Error **errp)
374 int64_t i;
375 int ret;
377 for (i = 0; i < size / TARGET_PAGE_SIZE; i++) {
378 ret = write_data(s, block->host_addr + start + i * TARGET_PAGE_SIZE,
379 TARGET_PAGE_SIZE, errp);
380 if (ret < 0) {
381 return ret;
385 if ((size % TARGET_PAGE_SIZE) != 0) {
386 ret = write_data(s, block->host_addr + start + i * TARGET_PAGE_SIZE,
387 size % TARGET_PAGE_SIZE, errp);
388 if (ret < 0) {
389 return ret;
393 return 0;
396 /* get the memory's offset and size in the vmcore */
397 static void get_offset_range(hwaddr phys_addr,
398 ram_addr_t mapping_length,
399 DumpState *s,
400 hwaddr *p_offset,
401 hwaddr *p_filesz)
403 GuestPhysBlock *block;
404 hwaddr offset = s->memory_offset;
405 int64_t size_in_block, start;
407 /* When the memory is not stored into vmcore, offset will be -1 */
408 *p_offset = -1;
409 *p_filesz = 0;
411 if (s->has_filter) {
412 if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
413 return;
417 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
418 if (s->has_filter) {
419 if (block->target_start >= s->begin + s->length ||
420 block->target_end <= s->begin) {
421 /* This block is out of the range */
422 continue;
425 if (s->begin <= block->target_start) {
426 start = block->target_start;
427 } else {
428 start = s->begin;
431 size_in_block = block->target_end - start;
432 if (s->begin + s->length < block->target_end) {
433 size_in_block -= block->target_end - (s->begin + s->length);
435 } else {
436 start = block->target_start;
437 size_in_block = block->target_end - block->target_start;
440 if (phys_addr >= start && phys_addr < start + size_in_block) {
441 *p_offset = phys_addr - start + offset;
443 /* The offset range mapped from the vmcore file must not spill over
444 * the GuestPhysBlock, clamp it. The rest of the mapping will be
445 * zero-filled in memory at load time; see
446 * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>.
448 *p_filesz = phys_addr + mapping_length <= start + size_in_block ?
449 mapping_length :
450 size_in_block - (phys_addr - start);
451 return;
454 offset += size_in_block;
458 static int write_elf_loads(DumpState *s, Error **errp)
460 hwaddr offset, filesz;
461 MemoryMapping *memory_mapping;
462 uint32_t phdr_index = 1;
463 int ret;
464 uint32_t max_index;
466 if (s->have_section) {
467 max_index = s->sh_info;
468 } else {
469 max_index = s->phdr_num;
472 QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
473 get_offset_range(memory_mapping->phys_addr,
474 memory_mapping->length,
475 s, &offset, &filesz);
476 if (s->dump_info.d_class == ELFCLASS64) {
477 ret = write_elf64_load(s, memory_mapping, phdr_index++, offset,
478 filesz, errp);
479 } else {
480 ret = write_elf32_load(s, memory_mapping, phdr_index++, offset,
481 filesz, errp);
484 if (ret < 0) {
485 return -1;
488 if (phdr_index >= max_index) {
489 break;
493 return 0;
496 /* write elf header, PT_NOTE and elf note to vmcore. */
497 static int dump_begin(DumpState *s, Error **errp)
499 int ret;
502 * the vmcore's format is:
503 * --------------
504 * | elf header |
505 * --------------
506 * | PT_NOTE |
507 * --------------
508 * | PT_LOAD |
509 * --------------
510 * | ...... |
511 * --------------
512 * | PT_LOAD |
513 * --------------
514 * | sec_hdr |
515 * --------------
516 * | elf note |
517 * --------------
518 * | memory |
519 * --------------
521 * we only know where the memory is saved after we write elf note into
522 * vmcore.
525 /* write elf header to vmcore */
526 if (s->dump_info.d_class == ELFCLASS64) {
527 ret = write_elf64_header(s, errp);
528 } else {
529 ret = write_elf32_header(s, errp);
531 if (ret < 0) {
532 return -1;
535 if (s->dump_info.d_class == ELFCLASS64) {
536 /* write PT_NOTE to vmcore */
537 if (write_elf64_note(s, errp) < 0) {
538 return -1;
541 /* write all PT_LOAD to vmcore */
542 if (write_elf_loads(s, errp) < 0) {
543 return -1;
546 /* write section to vmcore */
547 if (s->have_section) {
548 if (write_elf_section(s, 1, errp) < 0) {
549 return -1;
553 /* write notes to vmcore */
554 if (write_elf64_notes(fd_write_vmcore, s, errp) < 0) {
555 return -1;
558 } else {
559 /* write PT_NOTE to vmcore */
560 if (write_elf32_note(s, errp) < 0) {
561 return -1;
564 /* write all PT_LOAD to vmcore */
565 if (write_elf_loads(s, errp) < 0) {
566 return -1;
569 /* write section to vmcore */
570 if (s->have_section) {
571 if (write_elf_section(s, 0, errp) < 0) {
572 return -1;
576 /* write notes to vmcore */
577 if (write_elf32_notes(fd_write_vmcore, s, errp) < 0) {
578 return -1;
582 return 0;
585 /* write PT_LOAD to vmcore */
586 static int dump_completed(DumpState *s)
588 dump_cleanup(s);
589 return 0;
592 static int get_next_block(DumpState *s, GuestPhysBlock *block)
594 while (1) {
595 block = QTAILQ_NEXT(block, next);
596 if (!block) {
597 /* no more block */
598 return 1;
601 s->start = 0;
602 s->next_block = block;
603 if (s->has_filter) {
604 if (block->target_start >= s->begin + s->length ||
605 block->target_end <= s->begin) {
606 /* This block is out of the range */
607 continue;
610 if (s->begin > block->target_start) {
611 s->start = s->begin - block->target_start;
615 return 0;
619 /* write all memory to vmcore */
620 static int dump_iterate(DumpState *s, Error **errp)
622 GuestPhysBlock *block;
623 int64_t size;
624 int ret;
626 while (1) {
627 block = s->next_block;
629 size = block->target_end - block->target_start;
630 if (s->has_filter) {
631 size -= s->start;
632 if (s->begin + s->length < block->target_end) {
633 size -= block->target_end - (s->begin + s->length);
636 ret = write_memory(s, block, s->start, size, errp);
637 if (ret == -1) {
638 return ret;
641 ret = get_next_block(s, block);
642 if (ret == 1) {
643 dump_completed(s);
644 return 0;
649 static int create_vmcore(DumpState *s, Error **errp)
651 int ret;
653 ret = dump_begin(s, errp);
654 if (ret < 0) {
655 return -1;
658 ret = dump_iterate(s, errp);
659 if (ret < 0) {
660 return -1;
663 return 0;
666 static int write_start_flat_header(int fd)
668 MakedumpfileHeader *mh;
669 int ret = 0;
671 QEMU_BUILD_BUG_ON(sizeof *mh > MAX_SIZE_MDF_HEADER);
672 mh = g_malloc0(MAX_SIZE_MDF_HEADER);
674 memcpy(mh->signature, MAKEDUMPFILE_SIGNATURE,
675 MIN(sizeof mh->signature, sizeof MAKEDUMPFILE_SIGNATURE));
677 mh->type = cpu_to_be64(TYPE_FLAT_HEADER);
678 mh->version = cpu_to_be64(VERSION_FLAT_HEADER);
680 size_t written_size;
681 written_size = qemu_write_full(fd, mh, MAX_SIZE_MDF_HEADER);
682 if (written_size != MAX_SIZE_MDF_HEADER) {
683 ret = -1;
686 g_free(mh);
687 return ret;
690 static int write_end_flat_header(int fd)
692 MakedumpfileDataHeader mdh;
694 mdh.offset = END_FLAG_FLAT_HEADER;
695 mdh.buf_size = END_FLAG_FLAT_HEADER;
697 size_t written_size;
698 written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
699 if (written_size != sizeof(mdh)) {
700 return -1;
703 return 0;
706 static int write_buffer(int fd, off_t offset, const void *buf, size_t size)
708 size_t written_size;
709 MakedumpfileDataHeader mdh;
711 mdh.offset = cpu_to_be64(offset);
712 mdh.buf_size = cpu_to_be64(size);
714 written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
715 if (written_size != sizeof(mdh)) {
716 return -1;
719 written_size = qemu_write_full(fd, buf, size);
720 if (written_size != size) {
721 return -1;
724 return 0;
727 static int buf_write_note(const void *buf, size_t size, void *opaque)
729 DumpState *s = opaque;
731 /* note_buf is not enough */
732 if (s->note_buf_offset + size > s->note_size) {
733 return -1;
736 memcpy(s->note_buf + s->note_buf_offset, buf, size);
738 s->note_buf_offset += size;
740 return 0;
743 /* write common header, sub header and elf note to vmcore */
744 static int create_header32(DumpState *s, Error **errp)
746 int ret = 0;
747 DiskDumpHeader32 *dh = NULL;
748 KdumpSubHeader32 *kh = NULL;
749 size_t size;
750 uint32_t block_size;
751 uint32_t sub_hdr_size;
752 uint32_t bitmap_blocks;
753 uint32_t status = 0;
754 uint64_t offset_note;
756 /* write common header, the version of kdump-compressed format is 6th */
757 size = sizeof(DiskDumpHeader32);
758 dh = g_malloc0(size);
760 strncpy(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE));
761 dh->header_version = cpu_to_dump32(s, 6);
762 block_size = TARGET_PAGE_SIZE;
763 dh->block_size = cpu_to_dump32(s, block_size);
764 sub_hdr_size = sizeof(struct KdumpSubHeader32) + s->note_size;
765 sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
766 dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
767 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
768 dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
769 dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
770 bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
771 dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
772 strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
774 if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
775 status |= DUMP_DH_COMPRESSED_ZLIB;
777 #ifdef CONFIG_LZO
778 if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
779 status |= DUMP_DH_COMPRESSED_LZO;
781 #endif
782 #ifdef CONFIG_SNAPPY
783 if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
784 status |= DUMP_DH_COMPRESSED_SNAPPY;
786 #endif
787 dh->status = cpu_to_dump32(s, status);
789 if (write_buffer(s->fd, 0, dh, size) < 0) {
790 dump_error(s, "dump: failed to write disk dump header", errp);
791 ret = -1;
792 goto out;
795 /* write sub header */
796 size = sizeof(KdumpSubHeader32);
797 kh = g_malloc0(size);
799 /* 64bit max_mapnr_64 */
800 kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
801 kh->phys_base = cpu_to_dump32(s, PHYS_BASE);
802 kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
804 offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
805 kh->offset_note = cpu_to_dump64(s, offset_note);
806 kh->note_size = cpu_to_dump32(s, s->note_size);
808 if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
809 block_size, kh, size) < 0) {
810 dump_error(s, "dump: failed to write kdump sub header", errp);
811 ret = -1;
812 goto out;
815 /* write note */
816 s->note_buf = g_malloc0(s->note_size);
817 s->note_buf_offset = 0;
819 /* use s->note_buf to store notes temporarily */
820 if (write_elf32_notes(buf_write_note, s, errp) < 0) {
821 ret = -1;
822 goto out;
825 if (write_buffer(s->fd, offset_note, s->note_buf,
826 s->note_size) < 0) {
827 dump_error(s, "dump: failed to write notes", errp);
828 ret = -1;
829 goto out;
832 /* get offset of dump_bitmap */
833 s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
834 block_size;
836 /* get offset of page */
837 s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
838 block_size;
840 out:
841 g_free(dh);
842 g_free(kh);
843 g_free(s->note_buf);
845 return ret;
848 /* write common header, sub header and elf note to vmcore */
849 static int create_header64(DumpState *s, Error **errp)
851 int ret = 0;
852 DiskDumpHeader64 *dh = NULL;
853 KdumpSubHeader64 *kh = NULL;
854 size_t size;
855 uint32_t block_size;
856 uint32_t sub_hdr_size;
857 uint32_t bitmap_blocks;
858 uint32_t status = 0;
859 uint64_t offset_note;
861 /* write common header, the version of kdump-compressed format is 6th */
862 size = sizeof(DiskDumpHeader64);
863 dh = g_malloc0(size);
865 strncpy(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE));
866 dh->header_version = cpu_to_dump32(s, 6);
867 block_size = TARGET_PAGE_SIZE;
868 dh->block_size = cpu_to_dump32(s, block_size);
869 sub_hdr_size = sizeof(struct KdumpSubHeader64) + s->note_size;
870 sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
871 dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
872 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
873 dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
874 dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
875 bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
876 dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
877 strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
879 if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
880 status |= DUMP_DH_COMPRESSED_ZLIB;
882 #ifdef CONFIG_LZO
883 if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
884 status |= DUMP_DH_COMPRESSED_LZO;
886 #endif
887 #ifdef CONFIG_SNAPPY
888 if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
889 status |= DUMP_DH_COMPRESSED_SNAPPY;
891 #endif
892 dh->status = cpu_to_dump32(s, status);
894 if (write_buffer(s->fd, 0, dh, size) < 0) {
895 dump_error(s, "dump: failed to write disk dump header", errp);
896 ret = -1;
897 goto out;
900 /* write sub header */
901 size = sizeof(KdumpSubHeader64);
902 kh = g_malloc0(size);
904 /* 64bit max_mapnr_64 */
905 kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
906 kh->phys_base = cpu_to_dump64(s, PHYS_BASE);
907 kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
909 offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
910 kh->offset_note = cpu_to_dump64(s, offset_note);
911 kh->note_size = cpu_to_dump64(s, s->note_size);
913 if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
914 block_size, kh, size) < 0) {
915 dump_error(s, "dump: failed to write kdump sub header", errp);
916 ret = -1;
917 goto out;
920 /* write note */
921 s->note_buf = g_malloc0(s->note_size);
922 s->note_buf_offset = 0;
924 /* use s->note_buf to store notes temporarily */
925 if (write_elf64_notes(buf_write_note, s, errp) < 0) {
926 ret = -1;
927 goto out;
930 if (write_buffer(s->fd, offset_note, s->note_buf,
931 s->note_size) < 0) {
932 dump_error(s, "dump: failed to write notes", errp);
933 ret = -1;
934 goto out;
937 /* get offset of dump_bitmap */
938 s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
939 block_size;
941 /* get offset of page */
942 s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
943 block_size;
945 out:
946 g_free(dh);
947 g_free(kh);
948 g_free(s->note_buf);
950 return ret;
953 static int write_dump_header(DumpState *s, Error **errp)
955 if (s->dump_info.d_class == ELFCLASS32) {
956 return create_header32(s, errp);
957 } else {
958 return create_header64(s, errp);
963 * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be
964 * rewritten, so if need to set the first bit, set last_pfn and pfn to 0.
965 * set_dump_bitmap will always leave the recently set bit un-sync. And setting
966 * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into
967 * vmcore, ie. synchronizing un-sync bit into vmcore.
969 static int set_dump_bitmap(uint64_t last_pfn, uint64_t pfn, bool value,
970 uint8_t *buf, DumpState *s)
972 off_t old_offset, new_offset;
973 off_t offset_bitmap1, offset_bitmap2;
974 uint32_t byte, bit;
976 /* should not set the previous place */
977 assert(last_pfn <= pfn);
980 * if the bit needed to be set is not cached in buf, flush the data in buf
981 * to vmcore firstly.
982 * making new_offset be bigger than old_offset can also sync remained data
983 * into vmcore.
985 old_offset = BUFSIZE_BITMAP * (last_pfn / PFN_BUFBITMAP);
986 new_offset = BUFSIZE_BITMAP * (pfn / PFN_BUFBITMAP);
988 while (old_offset < new_offset) {
989 /* calculate the offset and write dump_bitmap */
990 offset_bitmap1 = s->offset_dump_bitmap + old_offset;
991 if (write_buffer(s->fd, offset_bitmap1, buf,
992 BUFSIZE_BITMAP) < 0) {
993 return -1;
996 /* dump level 1 is chosen, so 1st and 2nd bitmap are same */
997 offset_bitmap2 = s->offset_dump_bitmap + s->len_dump_bitmap +
998 old_offset;
999 if (write_buffer(s->fd, offset_bitmap2, buf,
1000 BUFSIZE_BITMAP) < 0) {
1001 return -1;
1004 memset(buf, 0, BUFSIZE_BITMAP);
1005 old_offset += BUFSIZE_BITMAP;
1008 /* get the exact place of the bit in the buf, and set it */
1009 byte = (pfn % PFN_BUFBITMAP) / CHAR_BIT;
1010 bit = (pfn % PFN_BUFBITMAP) % CHAR_BIT;
1011 if (value) {
1012 buf[byte] |= 1u << bit;
1013 } else {
1014 buf[byte] &= ~(1u << bit);
1017 return 0;
1021 * exam every page and return the page frame number and the address of the page.
1022 * bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys
1023 * blocks, so block->target_start and block->target_end should be interal
1024 * multiples of the target page size.
1026 static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr,
1027 uint8_t **bufptr, DumpState *s)
1029 GuestPhysBlock *block = *blockptr;
1030 hwaddr addr;
1031 uint8_t *buf;
1033 /* block == NULL means the start of the iteration */
1034 if (!block) {
1035 block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
1036 *blockptr = block;
1037 assert((block->target_start & ~TARGET_PAGE_MASK) == 0);
1038 assert((block->target_end & ~TARGET_PAGE_MASK) == 0);
1039 *pfnptr = paddr_to_pfn(block->target_start);
1040 if (bufptr) {
1041 *bufptr = block->host_addr;
1043 return true;
1046 *pfnptr = *pfnptr + 1;
1047 addr = pfn_to_paddr(*pfnptr);
1049 if ((addr >= block->target_start) &&
1050 (addr + TARGET_PAGE_SIZE <= block->target_end)) {
1051 buf = block->host_addr + (addr - block->target_start);
1052 } else {
1053 /* the next page is in the next block */
1054 block = QTAILQ_NEXT(block, next);
1055 *blockptr = block;
1056 if (!block) {
1057 return false;
1059 assert((block->target_start & ~TARGET_PAGE_MASK) == 0);
1060 assert((block->target_end & ~TARGET_PAGE_MASK) == 0);
1061 *pfnptr = paddr_to_pfn(block->target_start);
1062 buf = block->host_addr;
1065 if (bufptr) {
1066 *bufptr = buf;
1069 return true;
1072 static int write_dump_bitmap(DumpState *s, Error **errp)
1074 int ret = 0;
1075 uint64_t last_pfn, pfn;
1076 void *dump_bitmap_buf;
1077 size_t num_dumpable;
1078 GuestPhysBlock *block_iter = NULL;
1080 /* dump_bitmap_buf is used to store dump_bitmap temporarily */
1081 dump_bitmap_buf = g_malloc0(BUFSIZE_BITMAP);
1083 num_dumpable = 0;
1084 last_pfn = 0;
1087 * exam memory page by page, and set the bit in dump_bitmap corresponded
1088 * to the existing page.
1090 while (get_next_page(&block_iter, &pfn, NULL, s)) {
1091 ret = set_dump_bitmap(last_pfn, pfn, true, dump_bitmap_buf, s);
1092 if (ret < 0) {
1093 dump_error(s, "dump: failed to set dump_bitmap", errp);
1094 ret = -1;
1095 goto out;
1098 last_pfn = pfn;
1099 num_dumpable++;
1103 * set_dump_bitmap will always leave the recently set bit un-sync. Here we
1104 * set last_pfn + PFN_BUFBITMAP to 0 and those set but un-sync bit will be
1105 * synchronized into vmcore.
1107 if (num_dumpable > 0) {
1108 ret = set_dump_bitmap(last_pfn, last_pfn + PFN_BUFBITMAP, false,
1109 dump_bitmap_buf, s);
1110 if (ret < 0) {
1111 dump_error(s, "dump: failed to sync dump_bitmap", errp);
1112 ret = -1;
1113 goto out;
1117 /* number of dumpable pages that will be dumped later */
1118 s->num_dumpable = num_dumpable;
1120 out:
1121 g_free(dump_bitmap_buf);
1123 return ret;
1126 static void prepare_data_cache(DataCache *data_cache, DumpState *s,
1127 off_t offset)
1129 data_cache->fd = s->fd;
1130 data_cache->data_size = 0;
1131 data_cache->buf_size = BUFSIZE_DATA_CACHE;
1132 data_cache->buf = g_malloc0(BUFSIZE_DATA_CACHE);
1133 data_cache->offset = offset;
1136 static int write_cache(DataCache *dc, const void *buf, size_t size,
1137 bool flag_sync)
1140 * dc->buf_size should not be less than size, otherwise dc will never be
1141 * enough
1143 assert(size <= dc->buf_size);
1146 * if flag_sync is set, synchronize data in dc->buf into vmcore.
1147 * otherwise check if the space is enough for caching data in buf, if not,
1148 * write the data in dc->buf to dc->fd and reset dc->buf
1150 if ((!flag_sync && dc->data_size + size > dc->buf_size) ||
1151 (flag_sync && dc->data_size > 0)) {
1152 if (write_buffer(dc->fd, dc->offset, dc->buf, dc->data_size) < 0) {
1153 return -1;
1156 dc->offset += dc->data_size;
1157 dc->data_size = 0;
1160 if (!flag_sync) {
1161 memcpy(dc->buf + dc->data_size, buf, size);
1162 dc->data_size += size;
1165 return 0;
1168 static void free_data_cache(DataCache *data_cache)
1170 g_free(data_cache->buf);
1173 static size_t get_len_buf_out(size_t page_size, uint32_t flag_compress)
1175 switch (flag_compress) {
1176 case DUMP_DH_COMPRESSED_ZLIB:
1177 return compressBound(page_size);
1179 case DUMP_DH_COMPRESSED_LZO:
1181 * LZO will expand incompressible data by a little amount. Please check
1182 * the following URL to see the expansion calculation:
1183 * http://www.oberhumer.com/opensource/lzo/lzofaq.php
1185 return page_size + page_size / 16 + 64 + 3;
1187 #ifdef CONFIG_SNAPPY
1188 case DUMP_DH_COMPRESSED_SNAPPY:
1189 return snappy_max_compressed_length(page_size);
1190 #endif
1192 return 0;
1196 * check if the page is all 0
1198 static inline bool is_zero_page(const uint8_t *buf, size_t page_size)
1200 return buffer_is_zero(buf, page_size);
1203 static int write_dump_pages(DumpState *s, Error **errp)
1205 int ret = 0;
1206 DataCache page_desc, page_data;
1207 size_t len_buf_out, size_out;
1208 #ifdef CONFIG_LZO
1209 lzo_bytep wrkmem = NULL;
1210 #endif
1211 uint8_t *buf_out = NULL;
1212 off_t offset_desc, offset_data;
1213 PageDescriptor pd, pd_zero;
1214 uint8_t *buf;
1215 GuestPhysBlock *block_iter = NULL;
1216 uint64_t pfn_iter;
1218 /* get offset of page_desc and page_data in dump file */
1219 offset_desc = s->offset_page;
1220 offset_data = offset_desc + sizeof(PageDescriptor) * s->num_dumpable;
1222 prepare_data_cache(&page_desc, s, offset_desc);
1223 prepare_data_cache(&page_data, s, offset_data);
1225 /* prepare buffer to store compressed data */
1226 len_buf_out = get_len_buf_out(TARGET_PAGE_SIZE, s->flag_compress);
1227 assert(len_buf_out != 0);
1229 #ifdef CONFIG_LZO
1230 wrkmem = g_malloc(LZO1X_1_MEM_COMPRESS);
1231 #endif
1233 buf_out = g_malloc(len_buf_out);
1236 * init zero page's page_desc and page_data, because every zero page
1237 * uses the same page_data
1239 pd_zero.size = cpu_to_dump32(s, TARGET_PAGE_SIZE);
1240 pd_zero.flags = cpu_to_dump32(s, 0);
1241 pd_zero.offset = cpu_to_dump64(s, offset_data);
1242 pd_zero.page_flags = cpu_to_dump64(s, 0);
1243 buf = g_malloc0(TARGET_PAGE_SIZE);
1244 ret = write_cache(&page_data, buf, TARGET_PAGE_SIZE, false);
1245 g_free(buf);
1246 if (ret < 0) {
1247 dump_error(s, "dump: failed to write page data (zero page)", errp);
1248 goto out;
1251 offset_data += TARGET_PAGE_SIZE;
1254 * dump memory to vmcore page by page. zero page will all be resided in the
1255 * first page of page section
1257 while (get_next_page(&block_iter, &pfn_iter, &buf, s)) {
1258 /* check zero page */
1259 if (is_zero_page(buf, TARGET_PAGE_SIZE)) {
1260 ret = write_cache(&page_desc, &pd_zero, sizeof(PageDescriptor),
1261 false);
1262 if (ret < 0) {
1263 dump_error(s, "dump: failed to write page desc", errp);
1264 goto out;
1266 } else {
1268 * not zero page, then:
1269 * 1. compress the page
1270 * 2. write the compressed page into the cache of page_data
1271 * 3. get page desc of the compressed page and write it into the
1272 * cache of page_desc
1274 * only one compression format will be used here, for
1275 * s->flag_compress is set. But when compression fails to work,
1276 * we fall back to save in plaintext.
1278 size_out = len_buf_out;
1279 if ((s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) &&
1280 (compress2(buf_out, (uLongf *)&size_out, buf,
1281 TARGET_PAGE_SIZE, Z_BEST_SPEED) == Z_OK) &&
1282 (size_out < TARGET_PAGE_SIZE)) {
1283 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_ZLIB);
1284 pd.size = cpu_to_dump32(s, size_out);
1286 ret = write_cache(&page_data, buf_out, size_out, false);
1287 if (ret < 0) {
1288 dump_error(s, "dump: failed to write page data", errp);
1289 goto out;
1291 #ifdef CONFIG_LZO
1292 } else if ((s->flag_compress & DUMP_DH_COMPRESSED_LZO) &&
1293 (lzo1x_1_compress(buf, TARGET_PAGE_SIZE, buf_out,
1294 (lzo_uint *)&size_out, wrkmem) == LZO_E_OK) &&
1295 (size_out < TARGET_PAGE_SIZE)) {
1296 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_LZO);
1297 pd.size = cpu_to_dump32(s, size_out);
1299 ret = write_cache(&page_data, buf_out, size_out, false);
1300 if (ret < 0) {
1301 dump_error(s, "dump: failed to write page data", errp);
1302 goto out;
1304 #endif
1305 #ifdef CONFIG_SNAPPY
1306 } else if ((s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) &&
1307 (snappy_compress((char *)buf, TARGET_PAGE_SIZE,
1308 (char *)buf_out, &size_out) == SNAPPY_OK) &&
1309 (size_out < TARGET_PAGE_SIZE)) {
1310 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_SNAPPY);
1311 pd.size = cpu_to_dump32(s, size_out);
1313 ret = write_cache(&page_data, buf_out, size_out, false);
1314 if (ret < 0) {
1315 dump_error(s, "dump: failed to write page data", errp);
1316 goto out;
1318 #endif
1319 } else {
1321 * fall back to save in plaintext, size_out should be
1322 * assigned TARGET_PAGE_SIZE
1324 pd.flags = cpu_to_dump32(s, 0);
1325 size_out = TARGET_PAGE_SIZE;
1326 pd.size = cpu_to_dump32(s, size_out);
1328 ret = write_cache(&page_data, buf, TARGET_PAGE_SIZE, false);
1329 if (ret < 0) {
1330 dump_error(s, "dump: failed to write page data", errp);
1331 goto out;
1335 /* get and write page desc here */
1336 pd.page_flags = cpu_to_dump64(s, 0);
1337 pd.offset = cpu_to_dump64(s, offset_data);
1338 offset_data += size_out;
1340 ret = write_cache(&page_desc, &pd, sizeof(PageDescriptor), false);
1341 if (ret < 0) {
1342 dump_error(s, "dump: failed to write page desc", errp);
1343 goto out;
1348 ret = write_cache(&page_desc, NULL, 0, true);
1349 if (ret < 0) {
1350 dump_error(s, "dump: failed to sync cache for page_desc", errp);
1351 goto out;
1353 ret = write_cache(&page_data, NULL, 0, true);
1354 if (ret < 0) {
1355 dump_error(s, "dump: failed to sync cache for page_data", errp);
1356 goto out;
1359 out:
1360 free_data_cache(&page_desc);
1361 free_data_cache(&page_data);
1363 #ifdef CONFIG_LZO
1364 g_free(wrkmem);
1365 #endif
1367 g_free(buf_out);
1369 return ret;
1372 static int create_kdump_vmcore(DumpState *s, Error **errp)
1374 int ret;
1377 * the kdump-compressed format is:
1378 * File offset
1379 * +------------------------------------------+ 0x0
1380 * | main header (struct disk_dump_header) |
1381 * |------------------------------------------+ block 1
1382 * | sub header (struct kdump_sub_header) |
1383 * |------------------------------------------+ block 2
1384 * | 1st-dump_bitmap |
1385 * |------------------------------------------+ block 2 + X blocks
1386 * | 2nd-dump_bitmap | (aligned by block)
1387 * |------------------------------------------+ block 2 + 2 * X blocks
1388 * | page desc for pfn 0 (struct page_desc) | (aligned by block)
1389 * | page desc for pfn 1 (struct page_desc) |
1390 * | : |
1391 * |------------------------------------------| (not aligned by block)
1392 * | page data (pfn 0) |
1393 * | page data (pfn 1) |
1394 * | : |
1395 * +------------------------------------------+
1398 ret = write_start_flat_header(s->fd);
1399 if (ret < 0) {
1400 dump_error(s, "dump: failed to write start flat header", errp);
1401 return -1;
1404 ret = write_dump_header(s, errp);
1405 if (ret < 0) {
1406 return -1;
1409 ret = write_dump_bitmap(s, errp);
1410 if (ret < 0) {
1411 return -1;
1414 ret = write_dump_pages(s, errp);
1415 if (ret < 0) {
1416 return -1;
1419 ret = write_end_flat_header(s->fd);
1420 if (ret < 0) {
1421 dump_error(s, "dump: failed to write end flat header", errp);
1422 return -1;
1425 dump_completed(s);
1427 return 0;
1430 static ram_addr_t get_start_block(DumpState *s)
1432 GuestPhysBlock *block;
1434 if (!s->has_filter) {
1435 s->next_block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
1436 return 0;
1439 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
1440 if (block->target_start >= s->begin + s->length ||
1441 block->target_end <= s->begin) {
1442 /* This block is out of the range */
1443 continue;
1446 s->next_block = block;
1447 if (s->begin > block->target_start) {
1448 s->start = s->begin - block->target_start;
1449 } else {
1450 s->start = 0;
1452 return s->start;
1455 return -1;
1458 static void get_max_mapnr(DumpState *s)
1460 GuestPhysBlock *last_block;
1462 last_block = QTAILQ_LAST(&s->guest_phys_blocks.head, GuestPhysBlockHead);
1463 s->max_mapnr = paddr_to_pfn(last_block->target_end);
1466 static int dump_init(DumpState *s, int fd, bool has_format,
1467 DumpGuestMemoryFormat format, bool paging, bool has_filter,
1468 int64_t begin, int64_t length, Error **errp)
1470 CPUState *cpu;
1471 int nr_cpus;
1472 Error *err = NULL;
1473 int ret;
1475 /* kdump-compressed is conflict with paging and filter */
1476 if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1477 assert(!paging && !has_filter);
1480 if (runstate_is_running()) {
1481 vm_stop(RUN_STATE_SAVE_VM);
1482 s->resume = true;
1483 } else {
1484 s->resume = false;
1487 /* If we use KVM, we should synchronize the registers before we get dump
1488 * info or physmap info.
1490 cpu_synchronize_all_states();
1491 nr_cpus = 0;
1492 CPU_FOREACH(cpu) {
1493 nr_cpus++;
1496 s->fd = fd;
1497 s->has_filter = has_filter;
1498 s->begin = begin;
1499 s->length = length;
1501 memory_mapping_list_init(&s->list);
1503 guest_phys_blocks_init(&s->guest_phys_blocks);
1504 guest_phys_blocks_append(&s->guest_phys_blocks);
1506 s->start = get_start_block(s);
1507 if (s->start == -1) {
1508 error_set(errp, QERR_INVALID_PARAMETER, "begin");
1509 goto cleanup;
1512 /* get dump info: endian, class and architecture.
1513 * If the target architecture is not supported, cpu_get_dump_info() will
1514 * return -1.
1516 ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks);
1517 if (ret < 0) {
1518 error_set(errp, QERR_UNSUPPORTED);
1519 goto cleanup;
1522 s->note_size = cpu_get_note_size(s->dump_info.d_class,
1523 s->dump_info.d_machine, nr_cpus);
1524 if (s->note_size < 0) {
1525 error_set(errp, QERR_UNSUPPORTED);
1526 goto cleanup;
1529 /* get memory mapping */
1530 if (paging) {
1531 qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, &err);
1532 if (err != NULL) {
1533 error_propagate(errp, err);
1534 goto cleanup;
1536 } else {
1537 qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks);
1540 s->nr_cpus = nr_cpus;
1542 get_max_mapnr(s);
1544 uint64_t tmp;
1545 tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT), TARGET_PAGE_SIZE);
1546 s->len_dump_bitmap = tmp * TARGET_PAGE_SIZE;
1548 /* init for kdump-compressed format */
1549 if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1550 switch (format) {
1551 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB:
1552 s->flag_compress = DUMP_DH_COMPRESSED_ZLIB;
1553 break;
1555 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO:
1556 #ifdef CONFIG_LZO
1557 if (lzo_init() != LZO_E_OK) {
1558 error_setg(errp, "failed to initialize the LZO library");
1559 goto cleanup;
1561 #endif
1562 s->flag_compress = DUMP_DH_COMPRESSED_LZO;
1563 break;
1565 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY:
1566 s->flag_compress = DUMP_DH_COMPRESSED_SNAPPY;
1567 break;
1569 default:
1570 s->flag_compress = 0;
1573 return 0;
1576 if (s->has_filter) {
1577 memory_mapping_filter(&s->list, s->begin, s->length);
1581 * calculate phdr_num
1583 * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
1585 s->phdr_num = 1; /* PT_NOTE */
1586 if (s->list.num < UINT16_MAX - 2) {
1587 s->phdr_num += s->list.num;
1588 s->have_section = false;
1589 } else {
1590 s->have_section = true;
1591 s->phdr_num = PN_XNUM;
1592 s->sh_info = 1; /* PT_NOTE */
1594 /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
1595 if (s->list.num <= UINT32_MAX - 1) {
1596 s->sh_info += s->list.num;
1597 } else {
1598 s->sh_info = UINT32_MAX;
1602 if (s->dump_info.d_class == ELFCLASS64) {
1603 if (s->have_section) {
1604 s->memory_offset = sizeof(Elf64_Ehdr) +
1605 sizeof(Elf64_Phdr) * s->sh_info +
1606 sizeof(Elf64_Shdr) + s->note_size;
1607 } else {
1608 s->memory_offset = sizeof(Elf64_Ehdr) +
1609 sizeof(Elf64_Phdr) * s->phdr_num + s->note_size;
1611 } else {
1612 if (s->have_section) {
1613 s->memory_offset = sizeof(Elf32_Ehdr) +
1614 sizeof(Elf32_Phdr) * s->sh_info +
1615 sizeof(Elf32_Shdr) + s->note_size;
1616 } else {
1617 s->memory_offset = sizeof(Elf32_Ehdr) +
1618 sizeof(Elf32_Phdr) * s->phdr_num + s->note_size;
1622 return 0;
1624 cleanup:
1625 dump_cleanup(s);
1626 return -1;
1629 void qmp_dump_guest_memory(bool paging, const char *file, bool has_begin,
1630 int64_t begin, bool has_length,
1631 int64_t length, bool has_format,
1632 DumpGuestMemoryFormat format, Error **errp)
1634 const char *p;
1635 int fd = -1;
1636 DumpState *s;
1637 int ret;
1640 * kdump-compressed format need the whole memory dumped, so paging or
1641 * filter is not supported here.
1643 if ((has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) &&
1644 (paging || has_begin || has_length)) {
1645 error_setg(errp, "kdump-compressed format doesn't support paging or "
1646 "filter");
1647 return;
1649 if (has_begin && !has_length) {
1650 error_set(errp, QERR_MISSING_PARAMETER, "length");
1651 return;
1653 if (!has_begin && has_length) {
1654 error_set(errp, QERR_MISSING_PARAMETER, "begin");
1655 return;
1658 /* check whether lzo/snappy is supported */
1659 #ifndef CONFIG_LZO
1660 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO) {
1661 error_setg(errp, "kdump-lzo is not available now");
1662 return;
1664 #endif
1666 #ifndef CONFIG_SNAPPY
1667 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY) {
1668 error_setg(errp, "kdump-snappy is not available now");
1669 return;
1671 #endif
1673 #if !defined(WIN32)
1674 if (strstart(file, "fd:", &p)) {
1675 fd = monitor_get_fd(cur_mon, p, errp);
1676 if (fd == -1) {
1677 return;
1680 #endif
1682 if (strstart(file, "file:", &p)) {
1683 fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
1684 if (fd < 0) {
1685 error_setg_file_open(errp, errno, p);
1686 return;
1690 if (fd == -1) {
1691 error_set(errp, QERR_INVALID_PARAMETER, "protocol");
1692 return;
1695 s = g_malloc0(sizeof(DumpState));
1697 ret = dump_init(s, fd, has_format, format, paging, has_begin,
1698 begin, length, errp);
1699 if (ret < 0) {
1700 g_free(s);
1701 return;
1704 if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1705 create_kdump_vmcore(s, errp);
1706 } else {
1707 create_vmcore(s, errp);
1710 g_free(s);
1713 DumpGuestMemoryCapability *qmp_query_dump_guest_memory_capability(Error **errp)
1715 DumpGuestMemoryFormatList *item;
1716 DumpGuestMemoryCapability *cap =
1717 g_malloc0(sizeof(DumpGuestMemoryCapability));
1719 /* elf is always available */
1720 item = g_malloc0(sizeof(DumpGuestMemoryFormatList));
1721 cap->formats = item;
1722 item->value = DUMP_GUEST_MEMORY_FORMAT_ELF;
1724 /* kdump-zlib is always available */
1725 item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
1726 item = item->next;
1727 item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB;
1729 /* add new item if kdump-lzo is available */
1730 #ifdef CONFIG_LZO
1731 item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
1732 item = item->next;
1733 item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO;
1734 #endif
1736 /* add new item if kdump-snappy is available */
1737 #ifdef CONFIG_SNAPPY
1738 item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
1739 item = item->next;
1740 item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY;
1741 #endif
1743 return cap;