virtio-9p: fix virtio-9p child refcount in transports
[qemu/qmp-unstable.git] / dump.c
blob71d3e946e659c8da4ec2f7360d85b0b4b499b67a
1 /*
2 * QEMU dump
4 * Copyright Fujitsu, Corp. 2011, 2012
6 * Authors:
7 * Wen Congyang <wency@cn.fujitsu.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 #include "qemu-common.h"
15 #include "elf.h"
16 #include "cpu.h"
17 #include "exec/cpu-all.h"
18 #include "exec/hwaddr.h"
19 #include "monitor/monitor.h"
20 #include "sysemu/kvm.h"
21 #include "sysemu/dump.h"
22 #include "sysemu/sysemu.h"
23 #include "sysemu/memory_mapping.h"
24 #include "sysemu/cpus.h"
25 #include "qapi/error.h"
26 #include "qmp-commands.h"
28 #include <zlib.h>
29 #ifdef CONFIG_LZO
30 #include <lzo/lzo1x.h>
31 #endif
32 #ifdef CONFIG_SNAPPY
33 #include <snappy-c.h>
34 #endif
35 #ifndef ELF_MACHINE_UNAME
36 #define ELF_MACHINE_UNAME "Unknown"
37 #endif
39 uint16_t cpu_to_dump16(DumpState *s, uint16_t val)
41 if (s->dump_info.d_endian == ELFDATA2LSB) {
42 val = cpu_to_le16(val);
43 } else {
44 val = cpu_to_be16(val);
47 return val;
50 uint32_t cpu_to_dump32(DumpState *s, uint32_t val)
52 if (s->dump_info.d_endian == ELFDATA2LSB) {
53 val = cpu_to_le32(val);
54 } else {
55 val = cpu_to_be32(val);
58 return val;
61 uint64_t cpu_to_dump64(DumpState *s, uint64_t val)
63 if (s->dump_info.d_endian == ELFDATA2LSB) {
64 val = cpu_to_le64(val);
65 } else {
66 val = cpu_to_be64(val);
69 return val;
72 static int dump_cleanup(DumpState *s)
74 guest_phys_blocks_free(&s->guest_phys_blocks);
75 memory_mapping_list_free(&s->list);
76 close(s->fd);
77 if (s->resume) {
78 vm_start();
81 return 0;
84 static void dump_error(DumpState *s, const char *reason)
86 dump_cleanup(s);
89 static int fd_write_vmcore(const void *buf, size_t size, void *opaque)
91 DumpState *s = opaque;
92 size_t written_size;
94 written_size = qemu_write_full(s->fd, buf, size);
95 if (written_size != size) {
96 return -1;
99 return 0;
102 static int write_elf64_header(DumpState *s)
104 Elf64_Ehdr elf_header;
105 int ret;
107 memset(&elf_header, 0, sizeof(Elf64_Ehdr));
108 memcpy(&elf_header, ELFMAG, SELFMAG);
109 elf_header.e_ident[EI_CLASS] = ELFCLASS64;
110 elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
111 elf_header.e_ident[EI_VERSION] = EV_CURRENT;
112 elf_header.e_type = cpu_to_dump16(s, ET_CORE);
113 elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
114 elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
115 elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
116 elf_header.e_phoff = cpu_to_dump64(s, sizeof(Elf64_Ehdr));
117 elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf64_Phdr));
118 elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num);
119 if (s->have_section) {
120 uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info;
122 elf_header.e_shoff = cpu_to_dump64(s, shoff);
123 elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf64_Shdr));
124 elf_header.e_shnum = cpu_to_dump16(s, 1);
127 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
128 if (ret < 0) {
129 dump_error(s, "dump: failed to write elf header.\n");
130 return -1;
133 return 0;
136 static int write_elf32_header(DumpState *s)
138 Elf32_Ehdr elf_header;
139 int ret;
141 memset(&elf_header, 0, sizeof(Elf32_Ehdr));
142 memcpy(&elf_header, ELFMAG, SELFMAG);
143 elf_header.e_ident[EI_CLASS] = ELFCLASS32;
144 elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
145 elf_header.e_ident[EI_VERSION] = EV_CURRENT;
146 elf_header.e_type = cpu_to_dump16(s, ET_CORE);
147 elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
148 elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
149 elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
150 elf_header.e_phoff = cpu_to_dump32(s, sizeof(Elf32_Ehdr));
151 elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf32_Phdr));
152 elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num);
153 if (s->have_section) {
154 uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info;
156 elf_header.e_shoff = cpu_to_dump32(s, shoff);
157 elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf32_Shdr));
158 elf_header.e_shnum = cpu_to_dump16(s, 1);
161 ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
162 if (ret < 0) {
163 dump_error(s, "dump: failed to write elf header.\n");
164 return -1;
167 return 0;
170 static int write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
171 int phdr_index, hwaddr offset,
172 hwaddr filesz)
174 Elf64_Phdr phdr;
175 int ret;
177 memset(&phdr, 0, sizeof(Elf64_Phdr));
178 phdr.p_type = cpu_to_dump32(s, PT_LOAD);
179 phdr.p_offset = cpu_to_dump64(s, offset);
180 phdr.p_paddr = cpu_to_dump64(s, memory_mapping->phys_addr);
181 phdr.p_filesz = cpu_to_dump64(s, filesz);
182 phdr.p_memsz = cpu_to_dump64(s, memory_mapping->length);
183 phdr.p_vaddr = cpu_to_dump64(s, memory_mapping->virt_addr);
185 assert(memory_mapping->length >= filesz);
187 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
188 if (ret < 0) {
189 dump_error(s, "dump: failed to write program header table.\n");
190 return -1;
193 return 0;
196 static int write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
197 int phdr_index, hwaddr offset,
198 hwaddr filesz)
200 Elf32_Phdr phdr;
201 int ret;
203 memset(&phdr, 0, sizeof(Elf32_Phdr));
204 phdr.p_type = cpu_to_dump32(s, PT_LOAD);
205 phdr.p_offset = cpu_to_dump32(s, offset);
206 phdr.p_paddr = cpu_to_dump32(s, memory_mapping->phys_addr);
207 phdr.p_filesz = cpu_to_dump32(s, filesz);
208 phdr.p_memsz = cpu_to_dump32(s, memory_mapping->length);
209 phdr.p_vaddr = cpu_to_dump32(s, memory_mapping->virt_addr);
211 assert(memory_mapping->length >= filesz);
213 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
214 if (ret < 0) {
215 dump_error(s, "dump: failed to write program header table.\n");
216 return -1;
219 return 0;
222 static int write_elf64_note(DumpState *s)
224 Elf64_Phdr phdr;
225 hwaddr begin = s->memory_offset - s->note_size;
226 int ret;
228 memset(&phdr, 0, sizeof(Elf64_Phdr));
229 phdr.p_type = cpu_to_dump32(s, PT_NOTE);
230 phdr.p_offset = cpu_to_dump64(s, begin);
231 phdr.p_paddr = 0;
232 phdr.p_filesz = cpu_to_dump64(s, s->note_size);
233 phdr.p_memsz = cpu_to_dump64(s, s->note_size);
234 phdr.p_vaddr = 0;
236 ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
237 if (ret < 0) {
238 dump_error(s, "dump: failed to write program header table.\n");
239 return -1;
242 return 0;
245 static inline int cpu_index(CPUState *cpu)
247 return cpu->cpu_index + 1;
250 static int write_elf64_notes(WriteCoreDumpFunction f, DumpState *s)
252 CPUState *cpu;
253 int ret;
254 int id;
256 CPU_FOREACH(cpu) {
257 id = cpu_index(cpu);
258 ret = cpu_write_elf64_note(f, cpu, id, s);
259 if (ret < 0) {
260 dump_error(s, "dump: failed to write elf notes.\n");
261 return -1;
265 CPU_FOREACH(cpu) {
266 ret = cpu_write_elf64_qemunote(f, cpu, s);
267 if (ret < 0) {
268 dump_error(s, "dump: failed to write CPU status.\n");
269 return -1;
273 return 0;
276 static int write_elf32_note(DumpState *s)
278 hwaddr begin = s->memory_offset - s->note_size;
279 Elf32_Phdr phdr;
280 int ret;
282 memset(&phdr, 0, sizeof(Elf32_Phdr));
283 phdr.p_type = cpu_to_dump32(s, PT_NOTE);
284 phdr.p_offset = cpu_to_dump32(s, begin);
285 phdr.p_paddr = 0;
286 phdr.p_filesz = cpu_to_dump32(s, s->note_size);
287 phdr.p_memsz = cpu_to_dump32(s, s->note_size);
288 phdr.p_vaddr = 0;
290 ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
291 if (ret < 0) {
292 dump_error(s, "dump: failed to write program header table.\n");
293 return -1;
296 return 0;
299 static int write_elf32_notes(WriteCoreDumpFunction f, DumpState *s)
301 CPUState *cpu;
302 int ret;
303 int id;
305 CPU_FOREACH(cpu) {
306 id = cpu_index(cpu);
307 ret = cpu_write_elf32_note(f, cpu, id, s);
308 if (ret < 0) {
309 dump_error(s, "dump: failed to write elf notes.\n");
310 return -1;
314 CPU_FOREACH(cpu) {
315 ret = cpu_write_elf32_qemunote(f, cpu, s);
316 if (ret < 0) {
317 dump_error(s, "dump: failed to write CPU status.\n");
318 return -1;
322 return 0;
325 static int write_elf_section(DumpState *s, int type)
327 Elf32_Shdr shdr32;
328 Elf64_Shdr shdr64;
329 int shdr_size;
330 void *shdr;
331 int ret;
333 if (type == 0) {
334 shdr_size = sizeof(Elf32_Shdr);
335 memset(&shdr32, 0, shdr_size);
336 shdr32.sh_info = cpu_to_dump32(s, s->sh_info);
337 shdr = &shdr32;
338 } else {
339 shdr_size = sizeof(Elf64_Shdr);
340 memset(&shdr64, 0, shdr_size);
341 shdr64.sh_info = cpu_to_dump32(s, s->sh_info);
342 shdr = &shdr64;
345 ret = fd_write_vmcore(&shdr, shdr_size, s);
346 if (ret < 0) {
347 dump_error(s, "dump: failed to write section header table.\n");
348 return -1;
351 return 0;
354 static int write_data(DumpState *s, void *buf, int length)
356 int ret;
358 ret = fd_write_vmcore(buf, length, s);
359 if (ret < 0) {
360 dump_error(s, "dump: failed to save memory.\n");
361 return -1;
364 return 0;
367 /* write the memroy to vmcore. 1 page per I/O. */
368 static int write_memory(DumpState *s, GuestPhysBlock *block, ram_addr_t start,
369 int64_t size)
371 int64_t i;
372 int ret;
374 for (i = 0; i < size / TARGET_PAGE_SIZE; i++) {
375 ret = write_data(s, block->host_addr + start + i * TARGET_PAGE_SIZE,
376 TARGET_PAGE_SIZE);
377 if (ret < 0) {
378 return ret;
382 if ((size % TARGET_PAGE_SIZE) != 0) {
383 ret = write_data(s, block->host_addr + start + i * TARGET_PAGE_SIZE,
384 size % TARGET_PAGE_SIZE);
385 if (ret < 0) {
386 return ret;
390 return 0;
393 /* get the memory's offset and size in the vmcore */
394 static void get_offset_range(hwaddr phys_addr,
395 ram_addr_t mapping_length,
396 DumpState *s,
397 hwaddr *p_offset,
398 hwaddr *p_filesz)
400 GuestPhysBlock *block;
401 hwaddr offset = s->memory_offset;
402 int64_t size_in_block, start;
404 /* When the memory is not stored into vmcore, offset will be -1 */
405 *p_offset = -1;
406 *p_filesz = 0;
408 if (s->has_filter) {
409 if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
410 return;
414 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
415 if (s->has_filter) {
416 if (block->target_start >= s->begin + s->length ||
417 block->target_end <= s->begin) {
418 /* This block is out of the range */
419 continue;
422 if (s->begin <= block->target_start) {
423 start = block->target_start;
424 } else {
425 start = s->begin;
428 size_in_block = block->target_end - start;
429 if (s->begin + s->length < block->target_end) {
430 size_in_block -= block->target_end - (s->begin + s->length);
432 } else {
433 start = block->target_start;
434 size_in_block = block->target_end - block->target_start;
437 if (phys_addr >= start && phys_addr < start + size_in_block) {
438 *p_offset = phys_addr - start + offset;
440 /* The offset range mapped from the vmcore file must not spill over
441 * the GuestPhysBlock, clamp it. The rest of the mapping will be
442 * zero-filled in memory at load time; see
443 * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>.
445 *p_filesz = phys_addr + mapping_length <= start + size_in_block ?
446 mapping_length :
447 size_in_block - (phys_addr - start);
448 return;
451 offset += size_in_block;
455 static int write_elf_loads(DumpState *s)
457 hwaddr offset, filesz;
458 MemoryMapping *memory_mapping;
459 uint32_t phdr_index = 1;
460 int ret;
461 uint32_t max_index;
463 if (s->have_section) {
464 max_index = s->sh_info;
465 } else {
466 max_index = s->phdr_num;
469 QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
470 get_offset_range(memory_mapping->phys_addr,
471 memory_mapping->length,
472 s, &offset, &filesz);
473 if (s->dump_info.d_class == ELFCLASS64) {
474 ret = write_elf64_load(s, memory_mapping, phdr_index++, offset,
475 filesz);
476 } else {
477 ret = write_elf32_load(s, memory_mapping, phdr_index++, offset,
478 filesz);
481 if (ret < 0) {
482 return -1;
485 if (phdr_index >= max_index) {
486 break;
490 return 0;
493 /* write elf header, PT_NOTE and elf note to vmcore. */
494 static int dump_begin(DumpState *s)
496 int ret;
499 * the vmcore's format is:
500 * --------------
501 * | elf header |
502 * --------------
503 * | PT_NOTE |
504 * --------------
505 * | PT_LOAD |
506 * --------------
507 * | ...... |
508 * --------------
509 * | PT_LOAD |
510 * --------------
511 * | sec_hdr |
512 * --------------
513 * | elf note |
514 * --------------
515 * | memory |
516 * --------------
518 * we only know where the memory is saved after we write elf note into
519 * vmcore.
522 /* write elf header to vmcore */
523 if (s->dump_info.d_class == ELFCLASS64) {
524 ret = write_elf64_header(s);
525 } else {
526 ret = write_elf32_header(s);
528 if (ret < 0) {
529 return -1;
532 if (s->dump_info.d_class == ELFCLASS64) {
533 /* write PT_NOTE to vmcore */
534 if (write_elf64_note(s) < 0) {
535 return -1;
538 /* write all PT_LOAD to vmcore */
539 if (write_elf_loads(s) < 0) {
540 return -1;
543 /* write section to vmcore */
544 if (s->have_section) {
545 if (write_elf_section(s, 1) < 0) {
546 return -1;
550 /* write notes to vmcore */
551 if (write_elf64_notes(fd_write_vmcore, s) < 0) {
552 return -1;
555 } else {
556 /* write PT_NOTE to vmcore */
557 if (write_elf32_note(s) < 0) {
558 return -1;
561 /* write all PT_LOAD to vmcore */
562 if (write_elf_loads(s) < 0) {
563 return -1;
566 /* write section to vmcore */
567 if (s->have_section) {
568 if (write_elf_section(s, 0) < 0) {
569 return -1;
573 /* write notes to vmcore */
574 if (write_elf32_notes(fd_write_vmcore, s) < 0) {
575 return -1;
579 return 0;
582 /* write PT_LOAD to vmcore */
583 static int dump_completed(DumpState *s)
585 dump_cleanup(s);
586 return 0;
589 static int get_next_block(DumpState *s, GuestPhysBlock *block)
591 while (1) {
592 block = QTAILQ_NEXT(block, next);
593 if (!block) {
594 /* no more block */
595 return 1;
598 s->start = 0;
599 s->next_block = block;
600 if (s->has_filter) {
601 if (block->target_start >= s->begin + s->length ||
602 block->target_end <= s->begin) {
603 /* This block is out of the range */
604 continue;
607 if (s->begin > block->target_start) {
608 s->start = s->begin - block->target_start;
612 return 0;
616 /* write all memory to vmcore */
617 static int dump_iterate(DumpState *s)
619 GuestPhysBlock *block;
620 int64_t size;
621 int ret;
623 while (1) {
624 block = s->next_block;
626 size = block->target_end - block->target_start;
627 if (s->has_filter) {
628 size -= s->start;
629 if (s->begin + s->length < block->target_end) {
630 size -= block->target_end - (s->begin + s->length);
633 ret = write_memory(s, block, s->start, size);
634 if (ret == -1) {
635 return ret;
638 ret = get_next_block(s, block);
639 if (ret == 1) {
640 dump_completed(s);
641 return 0;
646 static int create_vmcore(DumpState *s)
648 int ret;
650 ret = dump_begin(s);
651 if (ret < 0) {
652 return -1;
655 ret = dump_iterate(s);
656 if (ret < 0) {
657 return -1;
660 return 0;
663 static int write_start_flat_header(int fd)
665 MakedumpfileHeader *mh;
666 int ret = 0;
668 QEMU_BUILD_BUG_ON(sizeof *mh > MAX_SIZE_MDF_HEADER);
669 mh = g_malloc0(MAX_SIZE_MDF_HEADER);
671 memcpy(mh->signature, MAKEDUMPFILE_SIGNATURE,
672 MIN(sizeof mh->signature, sizeof MAKEDUMPFILE_SIGNATURE));
674 mh->type = cpu_to_be64(TYPE_FLAT_HEADER);
675 mh->version = cpu_to_be64(VERSION_FLAT_HEADER);
677 size_t written_size;
678 written_size = qemu_write_full(fd, mh, MAX_SIZE_MDF_HEADER);
679 if (written_size != MAX_SIZE_MDF_HEADER) {
680 ret = -1;
683 g_free(mh);
684 return ret;
687 static int write_end_flat_header(int fd)
689 MakedumpfileDataHeader mdh;
691 mdh.offset = END_FLAG_FLAT_HEADER;
692 mdh.buf_size = END_FLAG_FLAT_HEADER;
694 size_t written_size;
695 written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
696 if (written_size != sizeof(mdh)) {
697 return -1;
700 return 0;
703 static int write_buffer(int fd, off_t offset, const void *buf, size_t size)
705 size_t written_size;
706 MakedumpfileDataHeader mdh;
708 mdh.offset = cpu_to_be64(offset);
709 mdh.buf_size = cpu_to_be64(size);
711 written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
712 if (written_size != sizeof(mdh)) {
713 return -1;
716 written_size = qemu_write_full(fd, buf, size);
717 if (written_size != size) {
718 return -1;
721 return 0;
724 static int buf_write_note(const void *buf, size_t size, void *opaque)
726 DumpState *s = opaque;
728 /* note_buf is not enough */
729 if (s->note_buf_offset + size > s->note_size) {
730 return -1;
733 memcpy(s->note_buf + s->note_buf_offset, buf, size);
735 s->note_buf_offset += size;
737 return 0;
740 /* write common header, sub header and elf note to vmcore */
741 static int create_header32(DumpState *s)
743 int ret = 0;
744 DiskDumpHeader32 *dh = NULL;
745 KdumpSubHeader32 *kh = NULL;
746 size_t size;
747 uint32_t block_size;
748 uint32_t sub_hdr_size;
749 uint32_t bitmap_blocks;
750 uint32_t status = 0;
751 uint64_t offset_note;
753 /* write common header, the version of kdump-compressed format is 6th */
754 size = sizeof(DiskDumpHeader32);
755 dh = g_malloc0(size);
757 strncpy(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE));
758 dh->header_version = cpu_to_dump32(s, 6);
759 block_size = TARGET_PAGE_SIZE;
760 dh->block_size = cpu_to_dump32(s, block_size);
761 sub_hdr_size = sizeof(struct KdumpSubHeader32) + s->note_size;
762 sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
763 dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
764 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
765 dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
766 dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
767 bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
768 dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
769 strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
771 if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
772 status |= DUMP_DH_COMPRESSED_ZLIB;
774 #ifdef CONFIG_LZO
775 if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
776 status |= DUMP_DH_COMPRESSED_LZO;
778 #endif
779 #ifdef CONFIG_SNAPPY
780 if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
781 status |= DUMP_DH_COMPRESSED_SNAPPY;
783 #endif
784 dh->status = cpu_to_dump32(s, status);
786 if (write_buffer(s->fd, 0, dh, size) < 0) {
787 dump_error(s, "dump: failed to write disk dump header.\n");
788 ret = -1;
789 goto out;
792 /* write sub header */
793 size = sizeof(KdumpSubHeader32);
794 kh = g_malloc0(size);
796 /* 64bit max_mapnr_64 */
797 kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
798 kh->phys_base = cpu_to_dump32(s, PHYS_BASE);
799 kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
801 offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
802 kh->offset_note = cpu_to_dump64(s, offset_note);
803 kh->note_size = cpu_to_dump32(s, s->note_size);
805 if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
806 block_size, kh, size) < 0) {
807 dump_error(s, "dump: failed to write kdump sub header.\n");
808 ret = -1;
809 goto out;
812 /* write note */
813 s->note_buf = g_malloc0(s->note_size);
814 s->note_buf_offset = 0;
816 /* use s->note_buf to store notes temporarily */
817 if (write_elf32_notes(buf_write_note, s) < 0) {
818 ret = -1;
819 goto out;
822 if (write_buffer(s->fd, offset_note, s->note_buf,
823 s->note_size) < 0) {
824 dump_error(s, "dump: failed to write notes");
825 ret = -1;
826 goto out;
829 /* get offset of dump_bitmap */
830 s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
831 block_size;
833 /* get offset of page */
834 s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
835 block_size;
837 out:
838 g_free(dh);
839 g_free(kh);
840 g_free(s->note_buf);
842 return ret;
845 /* write common header, sub header and elf note to vmcore */
846 static int create_header64(DumpState *s)
848 int ret = 0;
849 DiskDumpHeader64 *dh = NULL;
850 KdumpSubHeader64 *kh = NULL;
851 size_t size;
852 uint32_t block_size;
853 uint32_t sub_hdr_size;
854 uint32_t bitmap_blocks;
855 uint32_t status = 0;
856 uint64_t offset_note;
858 /* write common header, the version of kdump-compressed format is 6th */
859 size = sizeof(DiskDumpHeader64);
860 dh = g_malloc0(size);
862 strncpy(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE));
863 dh->header_version = cpu_to_dump32(s, 6);
864 block_size = TARGET_PAGE_SIZE;
865 dh->block_size = cpu_to_dump32(s, block_size);
866 sub_hdr_size = sizeof(struct KdumpSubHeader64) + s->note_size;
867 sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
868 dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
869 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
870 dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
871 dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
872 bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
873 dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
874 strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
876 if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
877 status |= DUMP_DH_COMPRESSED_ZLIB;
879 #ifdef CONFIG_LZO
880 if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
881 status |= DUMP_DH_COMPRESSED_LZO;
883 #endif
884 #ifdef CONFIG_SNAPPY
885 if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
886 status |= DUMP_DH_COMPRESSED_SNAPPY;
888 #endif
889 dh->status = cpu_to_dump32(s, status);
891 if (write_buffer(s->fd, 0, dh, size) < 0) {
892 dump_error(s, "dump: failed to write disk dump header.\n");
893 ret = -1;
894 goto out;
897 /* write sub header */
898 size = sizeof(KdumpSubHeader64);
899 kh = g_malloc0(size);
901 /* 64bit max_mapnr_64 */
902 kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
903 kh->phys_base = cpu_to_dump64(s, PHYS_BASE);
904 kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
906 offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
907 kh->offset_note = cpu_to_dump64(s, offset_note);
908 kh->note_size = cpu_to_dump64(s, s->note_size);
910 if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
911 block_size, kh, size) < 0) {
912 dump_error(s, "dump: failed to write kdump sub header.\n");
913 ret = -1;
914 goto out;
917 /* write note */
918 s->note_buf = g_malloc0(s->note_size);
919 s->note_buf_offset = 0;
921 /* use s->note_buf to store notes temporarily */
922 if (write_elf64_notes(buf_write_note, s) < 0) {
923 ret = -1;
924 goto out;
927 if (write_buffer(s->fd, offset_note, s->note_buf,
928 s->note_size) < 0) {
929 dump_error(s, "dump: failed to write notes");
930 ret = -1;
931 goto out;
934 /* get offset of dump_bitmap */
935 s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
936 block_size;
938 /* get offset of page */
939 s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
940 block_size;
942 out:
943 g_free(dh);
944 g_free(kh);
945 g_free(s->note_buf);
947 return ret;
950 static int write_dump_header(DumpState *s)
952 if (s->dump_info.d_class == ELFCLASS32) {
953 return create_header32(s);
954 } else {
955 return create_header64(s);
960 * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be
961 * rewritten, so if need to set the first bit, set last_pfn and pfn to 0.
962 * set_dump_bitmap will always leave the recently set bit un-sync. And setting
963 * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into
964 * vmcore, ie. synchronizing un-sync bit into vmcore.
966 static int set_dump_bitmap(uint64_t last_pfn, uint64_t pfn, bool value,
967 uint8_t *buf, DumpState *s)
969 off_t old_offset, new_offset;
970 off_t offset_bitmap1, offset_bitmap2;
971 uint32_t byte, bit;
973 /* should not set the previous place */
974 assert(last_pfn <= pfn);
977 * if the bit needed to be set is not cached in buf, flush the data in buf
978 * to vmcore firstly.
979 * making new_offset be bigger than old_offset can also sync remained data
980 * into vmcore.
982 old_offset = BUFSIZE_BITMAP * (last_pfn / PFN_BUFBITMAP);
983 new_offset = BUFSIZE_BITMAP * (pfn / PFN_BUFBITMAP);
985 while (old_offset < new_offset) {
986 /* calculate the offset and write dump_bitmap */
987 offset_bitmap1 = s->offset_dump_bitmap + old_offset;
988 if (write_buffer(s->fd, offset_bitmap1, buf,
989 BUFSIZE_BITMAP) < 0) {
990 return -1;
993 /* dump level 1 is chosen, so 1st and 2nd bitmap are same */
994 offset_bitmap2 = s->offset_dump_bitmap + s->len_dump_bitmap +
995 old_offset;
996 if (write_buffer(s->fd, offset_bitmap2, buf,
997 BUFSIZE_BITMAP) < 0) {
998 return -1;
1001 memset(buf, 0, BUFSIZE_BITMAP);
1002 old_offset += BUFSIZE_BITMAP;
1005 /* get the exact place of the bit in the buf, and set it */
1006 byte = (pfn % PFN_BUFBITMAP) / CHAR_BIT;
1007 bit = (pfn % PFN_BUFBITMAP) % CHAR_BIT;
1008 if (value) {
1009 buf[byte] |= 1u << bit;
1010 } else {
1011 buf[byte] &= ~(1u << bit);
1014 return 0;
1018 * exam every page and return the page frame number and the address of the page.
1019 * bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys
1020 * blocks, so block->target_start and block->target_end should be interal
1021 * multiples of the target page size.
1023 static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr,
1024 uint8_t **bufptr, DumpState *s)
1026 GuestPhysBlock *block = *blockptr;
1027 hwaddr addr;
1028 uint8_t *buf;
1030 /* block == NULL means the start of the iteration */
1031 if (!block) {
1032 block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
1033 *blockptr = block;
1034 assert((block->target_start & ~TARGET_PAGE_MASK) == 0);
1035 assert((block->target_end & ~TARGET_PAGE_MASK) == 0);
1036 *pfnptr = paddr_to_pfn(block->target_start);
1037 if (bufptr) {
1038 *bufptr = block->host_addr;
1040 return true;
1043 *pfnptr = *pfnptr + 1;
1044 addr = pfn_to_paddr(*pfnptr);
1046 if ((addr >= block->target_start) &&
1047 (addr + TARGET_PAGE_SIZE <= block->target_end)) {
1048 buf = block->host_addr + (addr - block->target_start);
1049 } else {
1050 /* the next page is in the next block */
1051 block = QTAILQ_NEXT(block, next);
1052 *blockptr = block;
1053 if (!block) {
1054 return false;
1056 assert((block->target_start & ~TARGET_PAGE_MASK) == 0);
1057 assert((block->target_end & ~TARGET_PAGE_MASK) == 0);
1058 *pfnptr = paddr_to_pfn(block->target_start);
1059 buf = block->host_addr;
1062 if (bufptr) {
1063 *bufptr = buf;
1066 return true;
1069 static int write_dump_bitmap(DumpState *s)
1071 int ret = 0;
1072 uint64_t last_pfn, pfn;
1073 void *dump_bitmap_buf;
1074 size_t num_dumpable;
1075 GuestPhysBlock *block_iter = NULL;
1077 /* dump_bitmap_buf is used to store dump_bitmap temporarily */
1078 dump_bitmap_buf = g_malloc0(BUFSIZE_BITMAP);
1080 num_dumpable = 0;
1081 last_pfn = 0;
1084 * exam memory page by page, and set the bit in dump_bitmap corresponded
1085 * to the existing page.
1087 while (get_next_page(&block_iter, &pfn, NULL, s)) {
1088 ret = set_dump_bitmap(last_pfn, pfn, true, dump_bitmap_buf, s);
1089 if (ret < 0) {
1090 dump_error(s, "dump: failed to set dump_bitmap.\n");
1091 ret = -1;
1092 goto out;
1095 last_pfn = pfn;
1096 num_dumpable++;
1100 * set_dump_bitmap will always leave the recently set bit un-sync. Here we
1101 * set last_pfn + PFN_BUFBITMAP to 0 and those set but un-sync bit will be
1102 * synchronized into vmcore.
1104 if (num_dumpable > 0) {
1105 ret = set_dump_bitmap(last_pfn, last_pfn + PFN_BUFBITMAP, false,
1106 dump_bitmap_buf, s);
1107 if (ret < 0) {
1108 dump_error(s, "dump: failed to sync dump_bitmap.\n");
1109 ret = -1;
1110 goto out;
1114 /* number of dumpable pages that will be dumped later */
1115 s->num_dumpable = num_dumpable;
1117 out:
1118 g_free(dump_bitmap_buf);
1120 return ret;
1123 static void prepare_data_cache(DataCache *data_cache, DumpState *s,
1124 off_t offset)
1126 data_cache->fd = s->fd;
1127 data_cache->data_size = 0;
1128 data_cache->buf_size = BUFSIZE_DATA_CACHE;
1129 data_cache->buf = g_malloc0(BUFSIZE_DATA_CACHE);
1130 data_cache->offset = offset;
1133 static int write_cache(DataCache *dc, const void *buf, size_t size,
1134 bool flag_sync)
1137 * dc->buf_size should not be less than size, otherwise dc will never be
1138 * enough
1140 assert(size <= dc->buf_size);
1143 * if flag_sync is set, synchronize data in dc->buf into vmcore.
1144 * otherwise check if the space is enough for caching data in buf, if not,
1145 * write the data in dc->buf to dc->fd and reset dc->buf
1147 if ((!flag_sync && dc->data_size + size > dc->buf_size) ||
1148 (flag_sync && dc->data_size > 0)) {
1149 if (write_buffer(dc->fd, dc->offset, dc->buf, dc->data_size) < 0) {
1150 return -1;
1153 dc->offset += dc->data_size;
1154 dc->data_size = 0;
1157 if (!flag_sync) {
1158 memcpy(dc->buf + dc->data_size, buf, size);
1159 dc->data_size += size;
1162 return 0;
1165 static void free_data_cache(DataCache *data_cache)
1167 g_free(data_cache->buf);
1170 static size_t get_len_buf_out(size_t page_size, uint32_t flag_compress)
1172 switch (flag_compress) {
1173 case DUMP_DH_COMPRESSED_ZLIB:
1174 return compressBound(page_size);
1176 case DUMP_DH_COMPRESSED_LZO:
1178 * LZO will expand incompressible data by a little amount. Please check
1179 * the following URL to see the expansion calculation:
1180 * http://www.oberhumer.com/opensource/lzo/lzofaq.php
1182 return page_size + page_size / 16 + 64 + 3;
1184 #ifdef CONFIG_SNAPPY
1185 case DUMP_DH_COMPRESSED_SNAPPY:
1186 return snappy_max_compressed_length(page_size);
1187 #endif
1189 return 0;
1193 * check if the page is all 0
1195 static inline bool is_zero_page(const uint8_t *buf, size_t page_size)
1197 return buffer_is_zero(buf, page_size);
1200 static int write_dump_pages(DumpState *s)
1202 int ret = 0;
1203 DataCache page_desc, page_data;
1204 size_t len_buf_out, size_out;
1205 #ifdef CONFIG_LZO
1206 lzo_bytep wrkmem = NULL;
1207 #endif
1208 uint8_t *buf_out = NULL;
1209 off_t offset_desc, offset_data;
1210 PageDescriptor pd, pd_zero;
1211 uint8_t *buf;
1212 GuestPhysBlock *block_iter = NULL;
1213 uint64_t pfn_iter;
1215 /* get offset of page_desc and page_data in dump file */
1216 offset_desc = s->offset_page;
1217 offset_data = offset_desc + sizeof(PageDescriptor) * s->num_dumpable;
1219 prepare_data_cache(&page_desc, s, offset_desc);
1220 prepare_data_cache(&page_data, s, offset_data);
1222 /* prepare buffer to store compressed data */
1223 len_buf_out = get_len_buf_out(TARGET_PAGE_SIZE, s->flag_compress);
1224 assert(len_buf_out != 0);
1226 #ifdef CONFIG_LZO
1227 wrkmem = g_malloc(LZO1X_1_MEM_COMPRESS);
1228 #endif
1230 buf_out = g_malloc(len_buf_out);
1233 * init zero page's page_desc and page_data, because every zero page
1234 * uses the same page_data
1236 pd_zero.size = cpu_to_dump32(s, TARGET_PAGE_SIZE);
1237 pd_zero.flags = cpu_to_dump32(s, 0);
1238 pd_zero.offset = cpu_to_dump64(s, offset_data);
1239 pd_zero.page_flags = cpu_to_dump64(s, 0);
1240 buf = g_malloc0(TARGET_PAGE_SIZE);
1241 ret = write_cache(&page_data, buf, TARGET_PAGE_SIZE, false);
1242 g_free(buf);
1243 if (ret < 0) {
1244 dump_error(s, "dump: failed to write page data(zero page).\n");
1245 goto out;
1248 offset_data += TARGET_PAGE_SIZE;
1251 * dump memory to vmcore page by page. zero page will all be resided in the
1252 * first page of page section
1254 while (get_next_page(&block_iter, &pfn_iter, &buf, s)) {
1255 /* check zero page */
1256 if (is_zero_page(buf, TARGET_PAGE_SIZE)) {
1257 ret = write_cache(&page_desc, &pd_zero, sizeof(PageDescriptor),
1258 false);
1259 if (ret < 0) {
1260 dump_error(s, "dump: failed to write page desc.\n");
1261 goto out;
1263 } else {
1265 * not zero page, then:
1266 * 1. compress the page
1267 * 2. write the compressed page into the cache of page_data
1268 * 3. get page desc of the compressed page and write it into the
1269 * cache of page_desc
1271 * only one compression format will be used here, for
1272 * s->flag_compress is set. But when compression fails to work,
1273 * we fall back to save in plaintext.
1275 size_out = len_buf_out;
1276 if ((s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) &&
1277 (compress2(buf_out, (uLongf *)&size_out, buf,
1278 TARGET_PAGE_SIZE, Z_BEST_SPEED) == Z_OK) &&
1279 (size_out < TARGET_PAGE_SIZE)) {
1280 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_ZLIB);
1281 pd.size = cpu_to_dump32(s, size_out);
1283 ret = write_cache(&page_data, buf_out, size_out, false);
1284 if (ret < 0) {
1285 dump_error(s, "dump: failed to write page data.\n");
1286 goto out;
1288 #ifdef CONFIG_LZO
1289 } else if ((s->flag_compress & DUMP_DH_COMPRESSED_LZO) &&
1290 (lzo1x_1_compress(buf, TARGET_PAGE_SIZE, buf_out,
1291 (lzo_uint *)&size_out, wrkmem) == LZO_E_OK) &&
1292 (size_out < TARGET_PAGE_SIZE)) {
1293 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_LZO);
1294 pd.size = cpu_to_dump32(s, size_out);
1296 ret = write_cache(&page_data, buf_out, size_out, false);
1297 if (ret < 0) {
1298 dump_error(s, "dump: failed to write page data.\n");
1299 goto out;
1301 #endif
1302 #ifdef CONFIG_SNAPPY
1303 } else if ((s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) &&
1304 (snappy_compress((char *)buf, TARGET_PAGE_SIZE,
1305 (char *)buf_out, &size_out) == SNAPPY_OK) &&
1306 (size_out < TARGET_PAGE_SIZE)) {
1307 pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_SNAPPY);
1308 pd.size = cpu_to_dump32(s, size_out);
1310 ret = write_cache(&page_data, buf_out, size_out, false);
1311 if (ret < 0) {
1312 dump_error(s, "dump: failed to write page data.\n");
1313 goto out;
1315 #endif
1316 } else {
1318 * fall back to save in plaintext, size_out should be
1319 * assigned TARGET_PAGE_SIZE
1321 pd.flags = cpu_to_dump32(s, 0);
1322 size_out = TARGET_PAGE_SIZE;
1323 pd.size = cpu_to_dump32(s, size_out);
1325 ret = write_cache(&page_data, buf, TARGET_PAGE_SIZE, false);
1326 if (ret < 0) {
1327 dump_error(s, "dump: failed to write page data.\n");
1328 goto out;
1332 /* get and write page desc here */
1333 pd.page_flags = cpu_to_dump64(s, 0);
1334 pd.offset = cpu_to_dump64(s, offset_data);
1335 offset_data += size_out;
1337 ret = write_cache(&page_desc, &pd, sizeof(PageDescriptor), false);
1338 if (ret < 0) {
1339 dump_error(s, "dump: failed to write page desc.\n");
1340 goto out;
1345 ret = write_cache(&page_desc, NULL, 0, true);
1346 if (ret < 0) {
1347 dump_error(s, "dump: failed to sync cache for page_desc.\n");
1348 goto out;
1350 ret = write_cache(&page_data, NULL, 0, true);
1351 if (ret < 0) {
1352 dump_error(s, "dump: failed to sync cache for page_data.\n");
1353 goto out;
1356 out:
1357 free_data_cache(&page_desc);
1358 free_data_cache(&page_data);
1360 #ifdef CONFIG_LZO
1361 g_free(wrkmem);
1362 #endif
1364 g_free(buf_out);
1366 return ret;
1369 static int create_kdump_vmcore(DumpState *s)
1371 int ret;
1374 * the kdump-compressed format is:
1375 * File offset
1376 * +------------------------------------------+ 0x0
1377 * | main header (struct disk_dump_header) |
1378 * |------------------------------------------+ block 1
1379 * | sub header (struct kdump_sub_header) |
1380 * |------------------------------------------+ block 2
1381 * | 1st-dump_bitmap |
1382 * |------------------------------------------+ block 2 + X blocks
1383 * | 2nd-dump_bitmap | (aligned by block)
1384 * |------------------------------------------+ block 2 + 2 * X blocks
1385 * | page desc for pfn 0 (struct page_desc) | (aligned by block)
1386 * | page desc for pfn 1 (struct page_desc) |
1387 * | : |
1388 * |------------------------------------------| (not aligned by block)
1389 * | page data (pfn 0) |
1390 * | page data (pfn 1) |
1391 * | : |
1392 * +------------------------------------------+
1395 ret = write_start_flat_header(s->fd);
1396 if (ret < 0) {
1397 dump_error(s, "dump: failed to write start flat header.\n");
1398 return -1;
1401 ret = write_dump_header(s);
1402 if (ret < 0) {
1403 return -1;
1406 ret = write_dump_bitmap(s);
1407 if (ret < 0) {
1408 return -1;
1411 ret = write_dump_pages(s);
1412 if (ret < 0) {
1413 return -1;
1416 ret = write_end_flat_header(s->fd);
1417 if (ret < 0) {
1418 dump_error(s, "dump: failed to write end flat header.\n");
1419 return -1;
1422 dump_completed(s);
1424 return 0;
1427 static ram_addr_t get_start_block(DumpState *s)
1429 GuestPhysBlock *block;
1431 if (!s->has_filter) {
1432 s->next_block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
1433 return 0;
1436 QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
1437 if (block->target_start >= s->begin + s->length ||
1438 block->target_end <= s->begin) {
1439 /* This block is out of the range */
1440 continue;
1443 s->next_block = block;
1444 if (s->begin > block->target_start) {
1445 s->start = s->begin - block->target_start;
1446 } else {
1447 s->start = 0;
1449 return s->start;
1452 return -1;
1455 static void get_max_mapnr(DumpState *s)
1457 GuestPhysBlock *last_block;
1459 last_block = QTAILQ_LAST(&s->guest_phys_blocks.head, GuestPhysBlockHead);
1460 s->max_mapnr = paddr_to_pfn(last_block->target_end);
1463 static int dump_init(DumpState *s, int fd, bool has_format,
1464 DumpGuestMemoryFormat format, bool paging, bool has_filter,
1465 int64_t begin, int64_t length, Error **errp)
1467 CPUState *cpu;
1468 int nr_cpus;
1469 Error *err = NULL;
1470 int ret;
1472 /* kdump-compressed is conflict with paging and filter */
1473 if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1474 assert(!paging && !has_filter);
1477 if (runstate_is_running()) {
1478 vm_stop(RUN_STATE_SAVE_VM);
1479 s->resume = true;
1480 } else {
1481 s->resume = false;
1484 /* If we use KVM, we should synchronize the registers before we get dump
1485 * info or physmap info.
1487 cpu_synchronize_all_states();
1488 nr_cpus = 0;
1489 CPU_FOREACH(cpu) {
1490 nr_cpus++;
1493 s->fd = fd;
1494 s->has_filter = has_filter;
1495 s->begin = begin;
1496 s->length = length;
1498 memory_mapping_list_init(&s->list);
1500 guest_phys_blocks_init(&s->guest_phys_blocks);
1501 guest_phys_blocks_append(&s->guest_phys_blocks);
1503 s->start = get_start_block(s);
1504 if (s->start == -1) {
1505 error_set(errp, QERR_INVALID_PARAMETER, "begin");
1506 goto cleanup;
1509 /* get dump info: endian, class and architecture.
1510 * If the target architecture is not supported, cpu_get_dump_info() will
1511 * return -1.
1513 ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks);
1514 if (ret < 0) {
1515 error_set(errp, QERR_UNSUPPORTED);
1516 goto cleanup;
1519 s->note_size = cpu_get_note_size(s->dump_info.d_class,
1520 s->dump_info.d_machine, nr_cpus);
1521 if (s->note_size < 0) {
1522 error_set(errp, QERR_UNSUPPORTED);
1523 goto cleanup;
1526 /* get memory mapping */
1527 if (paging) {
1528 qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, &err);
1529 if (err != NULL) {
1530 error_propagate(errp, err);
1531 goto cleanup;
1533 } else {
1534 qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks);
1537 s->nr_cpus = nr_cpus;
1539 get_max_mapnr(s);
1541 uint64_t tmp;
1542 tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT), TARGET_PAGE_SIZE);
1543 s->len_dump_bitmap = tmp * TARGET_PAGE_SIZE;
1545 /* init for kdump-compressed format */
1546 if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1547 switch (format) {
1548 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB:
1549 s->flag_compress = DUMP_DH_COMPRESSED_ZLIB;
1550 break;
1552 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO:
1553 #ifdef CONFIG_LZO
1554 if (lzo_init() != LZO_E_OK) {
1555 error_setg(errp, "failed to initialize the LZO library");
1556 goto cleanup;
1558 #endif
1559 s->flag_compress = DUMP_DH_COMPRESSED_LZO;
1560 break;
1562 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY:
1563 s->flag_compress = DUMP_DH_COMPRESSED_SNAPPY;
1564 break;
1566 default:
1567 s->flag_compress = 0;
1570 return 0;
1573 if (s->has_filter) {
1574 memory_mapping_filter(&s->list, s->begin, s->length);
1578 * calculate phdr_num
1580 * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
1582 s->phdr_num = 1; /* PT_NOTE */
1583 if (s->list.num < UINT16_MAX - 2) {
1584 s->phdr_num += s->list.num;
1585 s->have_section = false;
1586 } else {
1587 s->have_section = true;
1588 s->phdr_num = PN_XNUM;
1589 s->sh_info = 1; /* PT_NOTE */
1591 /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
1592 if (s->list.num <= UINT32_MAX - 1) {
1593 s->sh_info += s->list.num;
1594 } else {
1595 s->sh_info = UINT32_MAX;
1599 if (s->dump_info.d_class == ELFCLASS64) {
1600 if (s->have_section) {
1601 s->memory_offset = sizeof(Elf64_Ehdr) +
1602 sizeof(Elf64_Phdr) * s->sh_info +
1603 sizeof(Elf64_Shdr) + s->note_size;
1604 } else {
1605 s->memory_offset = sizeof(Elf64_Ehdr) +
1606 sizeof(Elf64_Phdr) * s->phdr_num + s->note_size;
1608 } else {
1609 if (s->have_section) {
1610 s->memory_offset = sizeof(Elf32_Ehdr) +
1611 sizeof(Elf32_Phdr) * s->sh_info +
1612 sizeof(Elf32_Shdr) + s->note_size;
1613 } else {
1614 s->memory_offset = sizeof(Elf32_Ehdr) +
1615 sizeof(Elf32_Phdr) * s->phdr_num + s->note_size;
1619 return 0;
1621 cleanup:
1622 dump_cleanup(s);
1623 return -1;
1626 void qmp_dump_guest_memory(bool paging, const char *file, bool has_begin,
1627 int64_t begin, bool has_length,
1628 int64_t length, bool has_format,
1629 DumpGuestMemoryFormat format, Error **errp)
1631 const char *p;
1632 int fd = -1;
1633 DumpState *s;
1634 int ret;
1637 * kdump-compressed format need the whole memory dumped, so paging or
1638 * filter is not supported here.
1640 if ((has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) &&
1641 (paging || has_begin || has_length)) {
1642 error_setg(errp, "kdump-compressed format doesn't support paging or "
1643 "filter");
1644 return;
1646 if (has_begin && !has_length) {
1647 error_set(errp, QERR_MISSING_PARAMETER, "length");
1648 return;
1650 if (!has_begin && has_length) {
1651 error_set(errp, QERR_MISSING_PARAMETER, "begin");
1652 return;
1655 /* check whether lzo/snappy is supported */
1656 #ifndef CONFIG_LZO
1657 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO) {
1658 error_setg(errp, "kdump-lzo is not available now");
1659 return;
1661 #endif
1663 #ifndef CONFIG_SNAPPY
1664 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY) {
1665 error_setg(errp, "kdump-snappy is not available now");
1666 return;
1668 #endif
1670 #if !defined(WIN32)
1671 if (strstart(file, "fd:", &p)) {
1672 fd = monitor_get_fd(cur_mon, p, errp);
1673 if (fd == -1) {
1674 return;
1677 #endif
1679 if (strstart(file, "file:", &p)) {
1680 fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
1681 if (fd < 0) {
1682 error_setg_file_open(errp, errno, p);
1683 return;
1687 if (fd == -1) {
1688 error_set(errp, QERR_INVALID_PARAMETER, "protocol");
1689 return;
1692 s = g_malloc0(sizeof(DumpState));
1694 ret = dump_init(s, fd, has_format, format, paging, has_begin,
1695 begin, length, errp);
1696 if (ret < 0) {
1697 g_free(s);
1698 return;
1701 if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
1702 if (create_kdump_vmcore(s) < 0) {
1703 error_set(errp, QERR_IO_ERROR);
1705 } else {
1706 if (create_vmcore(s) < 0) {
1707 error_set(errp, QERR_IO_ERROR);
1711 g_free(s);
1714 DumpGuestMemoryCapability *qmp_query_dump_guest_memory_capability(Error **errp)
1716 DumpGuestMemoryFormatList *item;
1717 DumpGuestMemoryCapability *cap =
1718 g_malloc0(sizeof(DumpGuestMemoryCapability));
1720 /* elf is always available */
1721 item = g_malloc0(sizeof(DumpGuestMemoryFormatList));
1722 cap->formats = item;
1723 item->value = DUMP_GUEST_MEMORY_FORMAT_ELF;
1725 /* kdump-zlib is always available */
1726 item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
1727 item = item->next;
1728 item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB;
1730 /* add new item if kdump-lzo is available */
1731 #ifdef CONFIG_LZO
1732 item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
1733 item = item->next;
1734 item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO;
1735 #endif
1737 /* add new item if kdump-snappy is available */
1738 #ifdef CONFIG_SNAPPY
1739 item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
1740 item = item->next;
1741 item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY;
1742 #endif
1744 return cap;