4 * Copyright Fujitsu, Corp. 2011, 2012
7 * Wen Congyang <wency@cn.fujitsu.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 #include "qemu-common.h"
17 #include "exec/cpu-all.h"
18 #include "exec/hwaddr.h"
19 #include "monitor/monitor.h"
20 #include "sysemu/kvm.h"
21 #include "sysemu/dump.h"
22 #include "sysemu/sysemu.h"
23 #include "sysemu/memory_mapping.h"
24 #include "sysemu/cpus.h"
25 #include "qapi/error.h"
26 #include "qmp-commands.h"
30 #include <lzo/lzo1x.h>
35 #ifndef ELF_MACHINE_UNAME
36 #define ELF_MACHINE_UNAME "Unknown"
39 uint16_t cpu_to_dump16(DumpState
*s
, uint16_t val
)
41 if (s
->dump_info
.d_endian
== ELFDATA2LSB
) {
42 val
= cpu_to_le16(val
);
44 val
= cpu_to_be16(val
);
50 uint32_t cpu_to_dump32(DumpState
*s
, uint32_t val
)
52 if (s
->dump_info
.d_endian
== ELFDATA2LSB
) {
53 val
= cpu_to_le32(val
);
55 val
= cpu_to_be32(val
);
61 uint64_t cpu_to_dump64(DumpState
*s
, uint64_t val
)
63 if (s
->dump_info
.d_endian
== ELFDATA2LSB
) {
64 val
= cpu_to_le64(val
);
66 val
= cpu_to_be64(val
);
72 static int dump_cleanup(DumpState
*s
)
76 guest_phys_blocks_free(&s
->guest_phys_blocks
);
77 memory_mapping_list_free(&s
->list
);
88 static void dump_error(DumpState
*s
, const char *reason
)
93 static int fd_write_vmcore(const void *buf
, size_t size
, void *opaque
)
95 DumpState
*s
= opaque
;
98 written_size
= qemu_write_full(s
->fd
, buf
, size
);
99 if (written_size
!= size
) {
106 static int write_elf64_header(DumpState
*s
)
108 Elf64_Ehdr elf_header
;
111 memset(&elf_header
, 0, sizeof(Elf64_Ehdr
));
112 memcpy(&elf_header
, ELFMAG
, SELFMAG
);
113 elf_header
.e_ident
[EI_CLASS
] = ELFCLASS64
;
114 elf_header
.e_ident
[EI_DATA
] = s
->dump_info
.d_endian
;
115 elf_header
.e_ident
[EI_VERSION
] = EV_CURRENT
;
116 elf_header
.e_type
= cpu_to_dump16(s
, ET_CORE
);
117 elf_header
.e_machine
= cpu_to_dump16(s
, s
->dump_info
.d_machine
);
118 elf_header
.e_version
= cpu_to_dump32(s
, EV_CURRENT
);
119 elf_header
.e_ehsize
= cpu_to_dump16(s
, sizeof(elf_header
));
120 elf_header
.e_phoff
= cpu_to_dump64(s
, sizeof(Elf64_Ehdr
));
121 elf_header
.e_phentsize
= cpu_to_dump16(s
, sizeof(Elf64_Phdr
));
122 elf_header
.e_phnum
= cpu_to_dump16(s
, s
->phdr_num
);
123 if (s
->have_section
) {
124 uint64_t shoff
= sizeof(Elf64_Ehdr
) + sizeof(Elf64_Phdr
) * s
->sh_info
;
126 elf_header
.e_shoff
= cpu_to_dump64(s
, shoff
);
127 elf_header
.e_shentsize
= cpu_to_dump16(s
, sizeof(Elf64_Shdr
));
128 elf_header
.e_shnum
= cpu_to_dump16(s
, 1);
131 ret
= fd_write_vmcore(&elf_header
, sizeof(elf_header
), s
);
133 dump_error(s
, "dump: failed to write elf header.\n");
140 static int write_elf32_header(DumpState
*s
)
142 Elf32_Ehdr elf_header
;
145 memset(&elf_header
, 0, sizeof(Elf32_Ehdr
));
146 memcpy(&elf_header
, ELFMAG
, SELFMAG
);
147 elf_header
.e_ident
[EI_CLASS
] = ELFCLASS32
;
148 elf_header
.e_ident
[EI_DATA
] = s
->dump_info
.d_endian
;
149 elf_header
.e_ident
[EI_VERSION
] = EV_CURRENT
;
150 elf_header
.e_type
= cpu_to_dump16(s
, ET_CORE
);
151 elf_header
.e_machine
= cpu_to_dump16(s
, s
->dump_info
.d_machine
);
152 elf_header
.e_version
= cpu_to_dump32(s
, EV_CURRENT
);
153 elf_header
.e_ehsize
= cpu_to_dump16(s
, sizeof(elf_header
));
154 elf_header
.e_phoff
= cpu_to_dump32(s
, sizeof(Elf32_Ehdr
));
155 elf_header
.e_phentsize
= cpu_to_dump16(s
, sizeof(Elf32_Phdr
));
156 elf_header
.e_phnum
= cpu_to_dump16(s
, s
->phdr_num
);
157 if (s
->have_section
) {
158 uint32_t shoff
= sizeof(Elf32_Ehdr
) + sizeof(Elf32_Phdr
) * s
->sh_info
;
160 elf_header
.e_shoff
= cpu_to_dump32(s
, shoff
);
161 elf_header
.e_shentsize
= cpu_to_dump16(s
, sizeof(Elf32_Shdr
));
162 elf_header
.e_shnum
= cpu_to_dump16(s
, 1);
165 ret
= fd_write_vmcore(&elf_header
, sizeof(elf_header
), s
);
167 dump_error(s
, "dump: failed to write elf header.\n");
174 static int write_elf64_load(DumpState
*s
, MemoryMapping
*memory_mapping
,
175 int phdr_index
, hwaddr offset
,
181 memset(&phdr
, 0, sizeof(Elf64_Phdr
));
182 phdr
.p_type
= cpu_to_dump32(s
, PT_LOAD
);
183 phdr
.p_offset
= cpu_to_dump64(s
, offset
);
184 phdr
.p_paddr
= cpu_to_dump64(s
, memory_mapping
->phys_addr
);
185 phdr
.p_filesz
= cpu_to_dump64(s
, filesz
);
186 phdr
.p_memsz
= cpu_to_dump64(s
, memory_mapping
->length
);
187 phdr
.p_vaddr
= cpu_to_dump64(s
, memory_mapping
->virt_addr
);
189 assert(memory_mapping
->length
>= filesz
);
191 ret
= fd_write_vmcore(&phdr
, sizeof(Elf64_Phdr
), s
);
193 dump_error(s
, "dump: failed to write program header table.\n");
200 static int write_elf32_load(DumpState
*s
, MemoryMapping
*memory_mapping
,
201 int phdr_index
, hwaddr offset
,
207 memset(&phdr
, 0, sizeof(Elf32_Phdr
));
208 phdr
.p_type
= cpu_to_dump32(s
, PT_LOAD
);
209 phdr
.p_offset
= cpu_to_dump32(s
, offset
);
210 phdr
.p_paddr
= cpu_to_dump32(s
, memory_mapping
->phys_addr
);
211 phdr
.p_filesz
= cpu_to_dump32(s
, filesz
);
212 phdr
.p_memsz
= cpu_to_dump32(s
, memory_mapping
->length
);
213 phdr
.p_vaddr
= cpu_to_dump32(s
, memory_mapping
->virt_addr
);
215 assert(memory_mapping
->length
>= filesz
);
217 ret
= fd_write_vmcore(&phdr
, sizeof(Elf32_Phdr
), s
);
219 dump_error(s
, "dump: failed to write program header table.\n");
226 static int write_elf64_note(DumpState
*s
)
229 hwaddr begin
= s
->memory_offset
- s
->note_size
;
232 memset(&phdr
, 0, sizeof(Elf64_Phdr
));
233 phdr
.p_type
= cpu_to_dump32(s
, PT_NOTE
);
234 phdr
.p_offset
= cpu_to_dump64(s
, begin
);
236 phdr
.p_filesz
= cpu_to_dump64(s
, s
->note_size
);
237 phdr
.p_memsz
= cpu_to_dump64(s
, s
->note_size
);
240 ret
= fd_write_vmcore(&phdr
, sizeof(Elf64_Phdr
), s
);
242 dump_error(s
, "dump: failed to write program header table.\n");
249 static inline int cpu_index(CPUState
*cpu
)
251 return cpu
->cpu_index
+ 1;
254 static int write_elf64_notes(WriteCoreDumpFunction f
, DumpState
*s
)
262 ret
= cpu_write_elf64_note(f
, cpu
, id
, s
);
264 dump_error(s
, "dump: failed to write elf notes.\n");
270 ret
= cpu_write_elf64_qemunote(f
, cpu
, s
);
272 dump_error(s
, "dump: failed to write CPU status.\n");
280 static int write_elf32_note(DumpState
*s
)
282 hwaddr begin
= s
->memory_offset
- s
->note_size
;
286 memset(&phdr
, 0, sizeof(Elf32_Phdr
));
287 phdr
.p_type
= cpu_to_dump32(s
, PT_NOTE
);
288 phdr
.p_offset
= cpu_to_dump32(s
, begin
);
290 phdr
.p_filesz
= cpu_to_dump32(s
, s
->note_size
);
291 phdr
.p_memsz
= cpu_to_dump32(s
, s
->note_size
);
294 ret
= fd_write_vmcore(&phdr
, sizeof(Elf32_Phdr
), s
);
296 dump_error(s
, "dump: failed to write program header table.\n");
303 static int write_elf32_notes(WriteCoreDumpFunction f
, DumpState
*s
)
311 ret
= cpu_write_elf32_note(f
, cpu
, id
, s
);
313 dump_error(s
, "dump: failed to write elf notes.\n");
319 ret
= cpu_write_elf32_qemunote(f
, cpu
, s
);
321 dump_error(s
, "dump: failed to write CPU status.\n");
329 static int write_elf_section(DumpState
*s
, int type
)
338 shdr_size
= sizeof(Elf32_Shdr
);
339 memset(&shdr32
, 0, shdr_size
);
340 shdr32
.sh_info
= cpu_to_dump32(s
, s
->sh_info
);
343 shdr_size
= sizeof(Elf64_Shdr
);
344 memset(&shdr64
, 0, shdr_size
);
345 shdr64
.sh_info
= cpu_to_dump32(s
, s
->sh_info
);
349 ret
= fd_write_vmcore(&shdr
, shdr_size
, s
);
351 dump_error(s
, "dump: failed to write section header table.\n");
358 static int write_data(DumpState
*s
, void *buf
, int length
)
362 ret
= fd_write_vmcore(buf
, length
, s
);
364 dump_error(s
, "dump: failed to save memory.\n");
371 /* write the memroy to vmcore. 1 page per I/O. */
372 static int write_memory(DumpState
*s
, GuestPhysBlock
*block
, ram_addr_t start
,
378 for (i
= 0; i
< size
/ TARGET_PAGE_SIZE
; i
++) {
379 ret
= write_data(s
, block
->host_addr
+ start
+ i
* TARGET_PAGE_SIZE
,
386 if ((size
% TARGET_PAGE_SIZE
) != 0) {
387 ret
= write_data(s
, block
->host_addr
+ start
+ i
* TARGET_PAGE_SIZE
,
388 size
% TARGET_PAGE_SIZE
);
397 /* get the memory's offset and size in the vmcore */
398 static void get_offset_range(hwaddr phys_addr
,
399 ram_addr_t mapping_length
,
404 GuestPhysBlock
*block
;
405 hwaddr offset
= s
->memory_offset
;
406 int64_t size_in_block
, start
;
408 /* When the memory is not stored into vmcore, offset will be -1 */
413 if (phys_addr
< s
->begin
|| phys_addr
>= s
->begin
+ s
->length
) {
418 QTAILQ_FOREACH(block
, &s
->guest_phys_blocks
.head
, next
) {
420 if (block
->target_start
>= s
->begin
+ s
->length
||
421 block
->target_end
<= s
->begin
) {
422 /* This block is out of the range */
426 if (s
->begin
<= block
->target_start
) {
427 start
= block
->target_start
;
432 size_in_block
= block
->target_end
- start
;
433 if (s
->begin
+ s
->length
< block
->target_end
) {
434 size_in_block
-= block
->target_end
- (s
->begin
+ s
->length
);
437 start
= block
->target_start
;
438 size_in_block
= block
->target_end
- block
->target_start
;
441 if (phys_addr
>= start
&& phys_addr
< start
+ size_in_block
) {
442 *p_offset
= phys_addr
- start
+ offset
;
444 /* The offset range mapped from the vmcore file must not spill over
445 * the GuestPhysBlock, clamp it. The rest of the mapping will be
446 * zero-filled in memory at load time; see
447 * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>.
449 *p_filesz
= phys_addr
+ mapping_length
<= start
+ size_in_block
?
451 size_in_block
- (phys_addr
- start
);
455 offset
+= size_in_block
;
459 static int write_elf_loads(DumpState
*s
)
461 hwaddr offset
, filesz
;
462 MemoryMapping
*memory_mapping
;
463 uint32_t phdr_index
= 1;
467 if (s
->have_section
) {
468 max_index
= s
->sh_info
;
470 max_index
= s
->phdr_num
;
473 QTAILQ_FOREACH(memory_mapping
, &s
->list
.head
, next
) {
474 get_offset_range(memory_mapping
->phys_addr
,
475 memory_mapping
->length
,
476 s
, &offset
, &filesz
);
477 if (s
->dump_info
.d_class
== ELFCLASS64
) {
478 ret
= write_elf64_load(s
, memory_mapping
, phdr_index
++, offset
,
481 ret
= write_elf32_load(s
, memory_mapping
, phdr_index
++, offset
,
489 if (phdr_index
>= max_index
) {
497 /* write elf header, PT_NOTE and elf note to vmcore. */
498 static int dump_begin(DumpState
*s
)
503 * the vmcore's format is:
522 * we only know where the memory is saved after we write elf note into
526 /* write elf header to vmcore */
527 if (s
->dump_info
.d_class
== ELFCLASS64
) {
528 ret
= write_elf64_header(s
);
530 ret
= write_elf32_header(s
);
536 if (s
->dump_info
.d_class
== ELFCLASS64
) {
537 /* write PT_NOTE to vmcore */
538 if (write_elf64_note(s
) < 0) {
542 /* write all PT_LOAD to vmcore */
543 if (write_elf_loads(s
) < 0) {
547 /* write section to vmcore */
548 if (s
->have_section
) {
549 if (write_elf_section(s
, 1) < 0) {
554 /* write notes to vmcore */
555 if (write_elf64_notes(fd_write_vmcore
, s
) < 0) {
560 /* write PT_NOTE to vmcore */
561 if (write_elf32_note(s
) < 0) {
565 /* write all PT_LOAD to vmcore */
566 if (write_elf_loads(s
) < 0) {
570 /* write section to vmcore */
571 if (s
->have_section
) {
572 if (write_elf_section(s
, 0) < 0) {
577 /* write notes to vmcore */
578 if (write_elf32_notes(fd_write_vmcore
, s
) < 0) {
586 /* write PT_LOAD to vmcore */
587 static int dump_completed(DumpState
*s
)
593 static int get_next_block(DumpState
*s
, GuestPhysBlock
*block
)
596 block
= QTAILQ_NEXT(block
, next
);
603 s
->next_block
= block
;
605 if (block
->target_start
>= s
->begin
+ s
->length
||
606 block
->target_end
<= s
->begin
) {
607 /* This block is out of the range */
611 if (s
->begin
> block
->target_start
) {
612 s
->start
= s
->begin
- block
->target_start
;
620 /* write all memory to vmcore */
621 static int dump_iterate(DumpState
*s
)
623 GuestPhysBlock
*block
;
628 block
= s
->next_block
;
630 size
= block
->target_end
- block
->target_start
;
633 if (s
->begin
+ s
->length
< block
->target_end
) {
634 size
-= block
->target_end
- (s
->begin
+ s
->length
);
637 ret
= write_memory(s
, block
, s
->start
, size
);
642 ret
= get_next_block(s
, block
);
650 static int create_vmcore(DumpState
*s
)
659 ret
= dump_iterate(s
);
667 static int write_start_flat_header(int fd
)
669 MakedumpfileHeader
*mh
;
672 QEMU_BUILD_BUG_ON(sizeof *mh
> MAX_SIZE_MDF_HEADER
);
673 mh
= g_malloc0(MAX_SIZE_MDF_HEADER
);
675 memcpy(mh
->signature
, MAKEDUMPFILE_SIGNATURE
,
676 MIN(sizeof mh
->signature
, sizeof MAKEDUMPFILE_SIGNATURE
));
678 mh
->type
= cpu_to_be64(TYPE_FLAT_HEADER
);
679 mh
->version
= cpu_to_be64(VERSION_FLAT_HEADER
);
682 written_size
= qemu_write_full(fd
, mh
, MAX_SIZE_MDF_HEADER
);
683 if (written_size
!= MAX_SIZE_MDF_HEADER
) {
691 static int write_end_flat_header(int fd
)
693 MakedumpfileDataHeader mdh
;
695 mdh
.offset
= END_FLAG_FLAT_HEADER
;
696 mdh
.buf_size
= END_FLAG_FLAT_HEADER
;
699 written_size
= qemu_write_full(fd
, &mdh
, sizeof(mdh
));
700 if (written_size
!= sizeof(mdh
)) {
707 static int write_buffer(int fd
, off_t offset
, const void *buf
, size_t size
)
710 MakedumpfileDataHeader mdh
;
712 mdh
.offset
= cpu_to_be64(offset
);
713 mdh
.buf_size
= cpu_to_be64(size
);
715 written_size
= qemu_write_full(fd
, &mdh
, sizeof(mdh
));
716 if (written_size
!= sizeof(mdh
)) {
720 written_size
= qemu_write_full(fd
, buf
, size
);
721 if (written_size
!= size
) {
728 static int buf_write_note(const void *buf
, size_t size
, void *opaque
)
730 DumpState
*s
= opaque
;
732 /* note_buf is not enough */
733 if (s
->note_buf_offset
+ size
> s
->note_size
) {
737 memcpy(s
->note_buf
+ s
->note_buf_offset
, buf
, size
);
739 s
->note_buf_offset
+= size
;
744 /* write common header, sub header and elf note to vmcore */
745 static int create_header32(DumpState
*s
)
748 DiskDumpHeader32
*dh
= NULL
;
749 KdumpSubHeader32
*kh
= NULL
;
752 uint32_t sub_hdr_size
;
753 uint32_t bitmap_blocks
;
755 uint64_t offset_note
;
757 /* write common header, the version of kdump-compressed format is 6th */
758 size
= sizeof(DiskDumpHeader32
);
759 dh
= g_malloc0(size
);
761 strncpy(dh
->signature
, KDUMP_SIGNATURE
, strlen(KDUMP_SIGNATURE
));
762 dh
->header_version
= cpu_to_dump32(s
, 6);
763 block_size
= TARGET_PAGE_SIZE
;
764 dh
->block_size
= cpu_to_dump32(s
, block_size
);
765 sub_hdr_size
= sizeof(struct KdumpSubHeader32
) + s
->note_size
;
766 sub_hdr_size
= DIV_ROUND_UP(sub_hdr_size
, block_size
);
767 dh
->sub_hdr_size
= cpu_to_dump32(s
, sub_hdr_size
);
768 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
769 dh
->max_mapnr
= cpu_to_dump32(s
, MIN(s
->max_mapnr
, UINT_MAX
));
770 dh
->nr_cpus
= cpu_to_dump32(s
, s
->nr_cpus
);
771 bitmap_blocks
= DIV_ROUND_UP(s
->len_dump_bitmap
, block_size
) * 2;
772 dh
->bitmap_blocks
= cpu_to_dump32(s
, bitmap_blocks
);
773 strncpy(dh
->utsname
.machine
, ELF_MACHINE_UNAME
, sizeof(dh
->utsname
.machine
));
775 if (s
->flag_compress
& DUMP_DH_COMPRESSED_ZLIB
) {
776 status
|= DUMP_DH_COMPRESSED_ZLIB
;
779 if (s
->flag_compress
& DUMP_DH_COMPRESSED_LZO
) {
780 status
|= DUMP_DH_COMPRESSED_LZO
;
784 if (s
->flag_compress
& DUMP_DH_COMPRESSED_SNAPPY
) {
785 status
|= DUMP_DH_COMPRESSED_SNAPPY
;
788 dh
->status
= cpu_to_dump32(s
, status
);
790 if (write_buffer(s
->fd
, 0, dh
, size
) < 0) {
791 dump_error(s
, "dump: failed to write disk dump header.\n");
796 /* write sub header */
797 size
= sizeof(KdumpSubHeader32
);
798 kh
= g_malloc0(size
);
800 /* 64bit max_mapnr_64 */
801 kh
->max_mapnr_64
= cpu_to_dump64(s
, s
->max_mapnr
);
802 kh
->phys_base
= cpu_to_dump32(s
, PHYS_BASE
);
803 kh
->dump_level
= cpu_to_dump32(s
, DUMP_LEVEL
);
805 offset_note
= DISKDUMP_HEADER_BLOCKS
* block_size
+ size
;
806 kh
->offset_note
= cpu_to_dump64(s
, offset_note
);
807 kh
->note_size
= cpu_to_dump32(s
, s
->note_size
);
809 if (write_buffer(s
->fd
, DISKDUMP_HEADER_BLOCKS
*
810 block_size
, kh
, size
) < 0) {
811 dump_error(s
, "dump: failed to write kdump sub header.\n");
817 s
->note_buf
= g_malloc0(s
->note_size
);
818 s
->note_buf_offset
= 0;
820 /* use s->note_buf to store notes temporarily */
821 if (write_elf32_notes(buf_write_note
, s
) < 0) {
826 if (write_buffer(s
->fd
, offset_note
, s
->note_buf
,
828 dump_error(s
, "dump: failed to write notes");
833 /* get offset of dump_bitmap */
834 s
->offset_dump_bitmap
= (DISKDUMP_HEADER_BLOCKS
+ sub_hdr_size
) *
837 /* get offset of page */
838 s
->offset_page
= (DISKDUMP_HEADER_BLOCKS
+ sub_hdr_size
+ bitmap_blocks
) *
849 /* write common header, sub header and elf note to vmcore */
850 static int create_header64(DumpState
*s
)
853 DiskDumpHeader64
*dh
= NULL
;
854 KdumpSubHeader64
*kh
= NULL
;
857 uint32_t sub_hdr_size
;
858 uint32_t bitmap_blocks
;
860 uint64_t offset_note
;
862 /* write common header, the version of kdump-compressed format is 6th */
863 size
= sizeof(DiskDumpHeader64
);
864 dh
= g_malloc0(size
);
866 strncpy(dh
->signature
, KDUMP_SIGNATURE
, strlen(KDUMP_SIGNATURE
));
867 dh
->header_version
= cpu_to_dump32(s
, 6);
868 block_size
= TARGET_PAGE_SIZE
;
869 dh
->block_size
= cpu_to_dump32(s
, block_size
);
870 sub_hdr_size
= sizeof(struct KdumpSubHeader64
) + s
->note_size
;
871 sub_hdr_size
= DIV_ROUND_UP(sub_hdr_size
, block_size
);
872 dh
->sub_hdr_size
= cpu_to_dump32(s
, sub_hdr_size
);
873 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
874 dh
->max_mapnr
= cpu_to_dump32(s
, MIN(s
->max_mapnr
, UINT_MAX
));
875 dh
->nr_cpus
= cpu_to_dump32(s
, s
->nr_cpus
);
876 bitmap_blocks
= DIV_ROUND_UP(s
->len_dump_bitmap
, block_size
) * 2;
877 dh
->bitmap_blocks
= cpu_to_dump32(s
, bitmap_blocks
);
878 strncpy(dh
->utsname
.machine
, ELF_MACHINE_UNAME
, sizeof(dh
->utsname
.machine
));
880 if (s
->flag_compress
& DUMP_DH_COMPRESSED_ZLIB
) {
881 status
|= DUMP_DH_COMPRESSED_ZLIB
;
884 if (s
->flag_compress
& DUMP_DH_COMPRESSED_LZO
) {
885 status
|= DUMP_DH_COMPRESSED_LZO
;
889 if (s
->flag_compress
& DUMP_DH_COMPRESSED_SNAPPY
) {
890 status
|= DUMP_DH_COMPRESSED_SNAPPY
;
893 dh
->status
= cpu_to_dump32(s
, status
);
895 if (write_buffer(s
->fd
, 0, dh
, size
) < 0) {
896 dump_error(s
, "dump: failed to write disk dump header.\n");
901 /* write sub header */
902 size
= sizeof(KdumpSubHeader64
);
903 kh
= g_malloc0(size
);
905 /* 64bit max_mapnr_64 */
906 kh
->max_mapnr_64
= cpu_to_dump64(s
, s
->max_mapnr
);
907 kh
->phys_base
= cpu_to_dump64(s
, PHYS_BASE
);
908 kh
->dump_level
= cpu_to_dump32(s
, DUMP_LEVEL
);
910 offset_note
= DISKDUMP_HEADER_BLOCKS
* block_size
+ size
;
911 kh
->offset_note
= cpu_to_dump64(s
, offset_note
);
912 kh
->note_size
= cpu_to_dump64(s
, s
->note_size
);
914 if (write_buffer(s
->fd
, DISKDUMP_HEADER_BLOCKS
*
915 block_size
, kh
, size
) < 0) {
916 dump_error(s
, "dump: failed to write kdump sub header.\n");
922 s
->note_buf
= g_malloc0(s
->note_size
);
923 s
->note_buf_offset
= 0;
925 /* use s->note_buf to store notes temporarily */
926 if (write_elf64_notes(buf_write_note
, s
) < 0) {
931 if (write_buffer(s
->fd
, offset_note
, s
->note_buf
,
933 dump_error(s
, "dump: failed to write notes");
938 /* get offset of dump_bitmap */
939 s
->offset_dump_bitmap
= (DISKDUMP_HEADER_BLOCKS
+ sub_hdr_size
) *
942 /* get offset of page */
943 s
->offset_page
= (DISKDUMP_HEADER_BLOCKS
+ sub_hdr_size
+ bitmap_blocks
) *
954 static int write_dump_header(DumpState
*s
)
956 if (s
->dump_info
.d_class
== ELFCLASS32
) {
957 return create_header32(s
);
959 return create_header64(s
);
964 * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be
965 * rewritten, so if need to set the first bit, set last_pfn and pfn to 0.
966 * set_dump_bitmap will always leave the recently set bit un-sync. And setting
967 * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into
968 * vmcore, ie. synchronizing un-sync bit into vmcore.
970 static int set_dump_bitmap(uint64_t last_pfn
, uint64_t pfn
, bool value
,
971 uint8_t *buf
, DumpState
*s
)
973 off_t old_offset
, new_offset
;
974 off_t offset_bitmap1
, offset_bitmap2
;
977 /* should not set the previous place */
978 assert(last_pfn
<= pfn
);
981 * if the bit needed to be set is not cached in buf, flush the data in buf
983 * making new_offset be bigger than old_offset can also sync remained data
986 old_offset
= BUFSIZE_BITMAP
* (last_pfn
/ PFN_BUFBITMAP
);
987 new_offset
= BUFSIZE_BITMAP
* (pfn
/ PFN_BUFBITMAP
);
989 while (old_offset
< new_offset
) {
990 /* calculate the offset and write dump_bitmap */
991 offset_bitmap1
= s
->offset_dump_bitmap
+ old_offset
;
992 if (write_buffer(s
->fd
, offset_bitmap1
, buf
,
993 BUFSIZE_BITMAP
) < 0) {
997 /* dump level 1 is chosen, so 1st and 2nd bitmap are same */
998 offset_bitmap2
= s
->offset_dump_bitmap
+ s
->len_dump_bitmap
+
1000 if (write_buffer(s
->fd
, offset_bitmap2
, buf
,
1001 BUFSIZE_BITMAP
) < 0) {
1005 memset(buf
, 0, BUFSIZE_BITMAP
);
1006 old_offset
+= BUFSIZE_BITMAP
;
1009 /* get the exact place of the bit in the buf, and set it */
1010 byte
= (pfn
% PFN_BUFBITMAP
) / CHAR_BIT
;
1011 bit
= (pfn
% PFN_BUFBITMAP
) % CHAR_BIT
;
1013 buf
[byte
] |= 1u << bit
;
1015 buf
[byte
] &= ~(1u << bit
);
1022 * exam every page and return the page frame number and the address of the page.
1023 * bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys
1024 * blocks, so block->target_start and block->target_end should be interal
1025 * multiples of the target page size.
1027 static bool get_next_page(GuestPhysBlock
**blockptr
, uint64_t *pfnptr
,
1028 uint8_t **bufptr
, DumpState
*s
)
1030 GuestPhysBlock
*block
= *blockptr
;
1034 /* block == NULL means the start of the iteration */
1036 block
= QTAILQ_FIRST(&s
->guest_phys_blocks
.head
);
1038 assert((block
->target_start
& ~TARGET_PAGE_MASK
) == 0);
1039 assert((block
->target_end
& ~TARGET_PAGE_MASK
) == 0);
1040 *pfnptr
= paddr_to_pfn(block
->target_start
);
1042 *bufptr
= block
->host_addr
;
1047 *pfnptr
= *pfnptr
+ 1;
1048 addr
= pfn_to_paddr(*pfnptr
);
1050 if ((addr
>= block
->target_start
) &&
1051 (addr
+ TARGET_PAGE_SIZE
<= block
->target_end
)) {
1052 buf
= block
->host_addr
+ (addr
- block
->target_start
);
1054 /* the next page is in the next block */
1055 block
= QTAILQ_NEXT(block
, next
);
1060 assert((block
->target_start
& ~TARGET_PAGE_MASK
) == 0);
1061 assert((block
->target_end
& ~TARGET_PAGE_MASK
) == 0);
1062 *pfnptr
= paddr_to_pfn(block
->target_start
);
1063 buf
= block
->host_addr
;
1073 static int write_dump_bitmap(DumpState
*s
)
1076 uint64_t last_pfn
, pfn
;
1077 void *dump_bitmap_buf
;
1078 size_t num_dumpable
;
1079 GuestPhysBlock
*block_iter
= NULL
;
1081 /* dump_bitmap_buf is used to store dump_bitmap temporarily */
1082 dump_bitmap_buf
= g_malloc0(BUFSIZE_BITMAP
);
1088 * exam memory page by page, and set the bit in dump_bitmap corresponded
1089 * to the existing page.
1091 while (get_next_page(&block_iter
, &pfn
, NULL
, s
)) {
1092 ret
= set_dump_bitmap(last_pfn
, pfn
, true, dump_bitmap_buf
, s
);
1094 dump_error(s
, "dump: failed to set dump_bitmap.\n");
1104 * set_dump_bitmap will always leave the recently set bit un-sync. Here we
1105 * set last_pfn + PFN_BUFBITMAP to 0 and those set but un-sync bit will be
1106 * synchronized into vmcore.
1108 if (num_dumpable
> 0) {
1109 ret
= set_dump_bitmap(last_pfn
, last_pfn
+ PFN_BUFBITMAP
, false,
1110 dump_bitmap_buf
, s
);
1112 dump_error(s
, "dump: failed to sync dump_bitmap.\n");
1118 /* number of dumpable pages that will be dumped later */
1119 s
->num_dumpable
= num_dumpable
;
1122 g_free(dump_bitmap_buf
);
1127 static void prepare_data_cache(DataCache
*data_cache
, DumpState
*s
,
1130 data_cache
->fd
= s
->fd
;
1131 data_cache
->data_size
= 0;
1132 data_cache
->buf_size
= BUFSIZE_DATA_CACHE
;
1133 data_cache
->buf
= g_malloc0(BUFSIZE_DATA_CACHE
);
1134 data_cache
->offset
= offset
;
1137 static int write_cache(DataCache
*dc
, const void *buf
, size_t size
,
1141 * dc->buf_size should not be less than size, otherwise dc will never be
1144 assert(size
<= dc
->buf_size
);
1147 * if flag_sync is set, synchronize data in dc->buf into vmcore.
1148 * otherwise check if the space is enough for caching data in buf, if not,
1149 * write the data in dc->buf to dc->fd and reset dc->buf
1151 if ((!flag_sync
&& dc
->data_size
+ size
> dc
->buf_size
) ||
1152 (flag_sync
&& dc
->data_size
> 0)) {
1153 if (write_buffer(dc
->fd
, dc
->offset
, dc
->buf
, dc
->data_size
) < 0) {
1157 dc
->offset
+= dc
->data_size
;
1162 memcpy(dc
->buf
+ dc
->data_size
, buf
, size
);
1163 dc
->data_size
+= size
;
1169 static void free_data_cache(DataCache
*data_cache
)
1171 g_free(data_cache
->buf
);
1174 static size_t get_len_buf_out(size_t page_size
, uint32_t flag_compress
)
1176 switch (flag_compress
) {
1177 case DUMP_DH_COMPRESSED_ZLIB
:
1178 return compressBound(page_size
);
1180 case DUMP_DH_COMPRESSED_LZO
:
1182 * LZO will expand incompressible data by a little amount. Please check
1183 * the following URL to see the expansion calculation:
1184 * http://www.oberhumer.com/opensource/lzo/lzofaq.php
1186 return page_size
+ page_size
/ 16 + 64 + 3;
1188 #ifdef CONFIG_SNAPPY
1189 case DUMP_DH_COMPRESSED_SNAPPY
:
1190 return snappy_max_compressed_length(page_size
);
1197 * check if the page is all 0
1199 static inline bool is_zero_page(const uint8_t *buf
, size_t page_size
)
1201 return buffer_is_zero(buf
, page_size
);
1204 static int write_dump_pages(DumpState
*s
)
1207 DataCache page_desc
, page_data
;
1208 size_t len_buf_out
, size_out
;
1210 lzo_bytep wrkmem
= NULL
;
1212 uint8_t *buf_out
= NULL
;
1213 off_t offset_desc
, offset_data
;
1214 PageDescriptor pd
, pd_zero
;
1216 GuestPhysBlock
*block_iter
= NULL
;
1219 /* get offset of page_desc and page_data in dump file */
1220 offset_desc
= s
->offset_page
;
1221 offset_data
= offset_desc
+ sizeof(PageDescriptor
) * s
->num_dumpable
;
1223 prepare_data_cache(&page_desc
, s
, offset_desc
);
1224 prepare_data_cache(&page_data
, s
, offset_data
);
1226 /* prepare buffer to store compressed data */
1227 len_buf_out
= get_len_buf_out(TARGET_PAGE_SIZE
, s
->flag_compress
);
1228 assert(len_buf_out
!= 0);
1231 wrkmem
= g_malloc(LZO1X_1_MEM_COMPRESS
);
1234 buf_out
= g_malloc(len_buf_out
);
1237 * init zero page's page_desc and page_data, because every zero page
1238 * uses the same page_data
1240 pd_zero
.size
= cpu_to_dump32(s
, TARGET_PAGE_SIZE
);
1241 pd_zero
.flags
= cpu_to_dump32(s
, 0);
1242 pd_zero
.offset
= cpu_to_dump64(s
, offset_data
);
1243 pd_zero
.page_flags
= cpu_to_dump64(s
, 0);
1244 buf
= g_malloc0(TARGET_PAGE_SIZE
);
1245 ret
= write_cache(&page_data
, buf
, TARGET_PAGE_SIZE
, false);
1248 dump_error(s
, "dump: failed to write page data(zero page).\n");
1252 offset_data
+= TARGET_PAGE_SIZE
;
1255 * dump memory to vmcore page by page. zero page will all be resided in the
1256 * first page of page section
1258 while (get_next_page(&block_iter
, &pfn_iter
, &buf
, s
)) {
1259 /* check zero page */
1260 if (is_zero_page(buf
, TARGET_PAGE_SIZE
)) {
1261 ret
= write_cache(&page_desc
, &pd_zero
, sizeof(PageDescriptor
),
1264 dump_error(s
, "dump: failed to write page desc.\n");
1269 * not zero page, then:
1270 * 1. compress the page
1271 * 2. write the compressed page into the cache of page_data
1272 * 3. get page desc of the compressed page and write it into the
1273 * cache of page_desc
1275 * only one compression format will be used here, for
1276 * s->flag_compress is set. But when compression fails to work,
1277 * we fall back to save in plaintext.
1279 size_out
= len_buf_out
;
1280 if ((s
->flag_compress
& DUMP_DH_COMPRESSED_ZLIB
) &&
1281 (compress2(buf_out
, (uLongf
*)&size_out
, buf
,
1282 TARGET_PAGE_SIZE
, Z_BEST_SPEED
) == Z_OK
) &&
1283 (size_out
< TARGET_PAGE_SIZE
)) {
1284 pd
.flags
= cpu_to_dump32(s
, DUMP_DH_COMPRESSED_ZLIB
);
1285 pd
.size
= cpu_to_dump32(s
, size_out
);
1287 ret
= write_cache(&page_data
, buf_out
, size_out
, false);
1289 dump_error(s
, "dump: failed to write page data.\n");
1293 } else if ((s
->flag_compress
& DUMP_DH_COMPRESSED_LZO
) &&
1294 (lzo1x_1_compress(buf
, TARGET_PAGE_SIZE
, buf_out
,
1295 (lzo_uint
*)&size_out
, wrkmem
) == LZO_E_OK
) &&
1296 (size_out
< TARGET_PAGE_SIZE
)) {
1297 pd
.flags
= cpu_to_dump32(s
, DUMP_DH_COMPRESSED_LZO
);
1298 pd
.size
= cpu_to_dump32(s
, size_out
);
1300 ret
= write_cache(&page_data
, buf_out
, size_out
, false);
1302 dump_error(s
, "dump: failed to write page data.\n");
1306 #ifdef CONFIG_SNAPPY
1307 } else if ((s
->flag_compress
& DUMP_DH_COMPRESSED_SNAPPY
) &&
1308 (snappy_compress((char *)buf
, TARGET_PAGE_SIZE
,
1309 (char *)buf_out
, &size_out
) == SNAPPY_OK
) &&
1310 (size_out
< TARGET_PAGE_SIZE
)) {
1311 pd
.flags
= cpu_to_dump32(s
, DUMP_DH_COMPRESSED_SNAPPY
);
1312 pd
.size
= cpu_to_dump32(s
, size_out
);
1314 ret
= write_cache(&page_data
, buf_out
, size_out
, false);
1316 dump_error(s
, "dump: failed to write page data.\n");
1322 * fall back to save in plaintext, size_out should be
1323 * assigned TARGET_PAGE_SIZE
1325 pd
.flags
= cpu_to_dump32(s
, 0);
1326 size_out
= TARGET_PAGE_SIZE
;
1327 pd
.size
= cpu_to_dump32(s
, size_out
);
1329 ret
= write_cache(&page_data
, buf
, TARGET_PAGE_SIZE
, false);
1331 dump_error(s
, "dump: failed to write page data.\n");
1336 /* get and write page desc here */
1337 pd
.page_flags
= cpu_to_dump64(s
, 0);
1338 pd
.offset
= cpu_to_dump64(s
, offset_data
);
1339 offset_data
+= size_out
;
1341 ret
= write_cache(&page_desc
, &pd
, sizeof(PageDescriptor
), false);
1343 dump_error(s
, "dump: failed to write page desc.\n");
1349 ret
= write_cache(&page_desc
, NULL
, 0, true);
1351 dump_error(s
, "dump: failed to sync cache for page_desc.\n");
1354 ret
= write_cache(&page_data
, NULL
, 0, true);
1356 dump_error(s
, "dump: failed to sync cache for page_data.\n");
1361 free_data_cache(&page_desc
);
1362 free_data_cache(&page_data
);
1373 static int create_kdump_vmcore(DumpState
*s
)
1378 * the kdump-compressed format is:
1380 * +------------------------------------------+ 0x0
1381 * | main header (struct disk_dump_header) |
1382 * |------------------------------------------+ block 1
1383 * | sub header (struct kdump_sub_header) |
1384 * |------------------------------------------+ block 2
1385 * | 1st-dump_bitmap |
1386 * |------------------------------------------+ block 2 + X blocks
1387 * | 2nd-dump_bitmap | (aligned by block)
1388 * |------------------------------------------+ block 2 + 2 * X blocks
1389 * | page desc for pfn 0 (struct page_desc) | (aligned by block)
1390 * | page desc for pfn 1 (struct page_desc) |
1392 * |------------------------------------------| (not aligned by block)
1393 * | page data (pfn 0) |
1394 * | page data (pfn 1) |
1396 * +------------------------------------------+
1399 ret
= write_start_flat_header(s
->fd
);
1401 dump_error(s
, "dump: failed to write start flat header.\n");
1405 ret
= write_dump_header(s
);
1410 ret
= write_dump_bitmap(s
);
1415 ret
= write_dump_pages(s
);
1420 ret
= write_end_flat_header(s
->fd
);
1422 dump_error(s
, "dump: failed to write end flat header.\n");
1431 static ram_addr_t
get_start_block(DumpState
*s
)
1433 GuestPhysBlock
*block
;
1435 if (!s
->has_filter
) {
1436 s
->next_block
= QTAILQ_FIRST(&s
->guest_phys_blocks
.head
);
1440 QTAILQ_FOREACH(block
, &s
->guest_phys_blocks
.head
, next
) {
1441 if (block
->target_start
>= s
->begin
+ s
->length
||
1442 block
->target_end
<= s
->begin
) {
1443 /* This block is out of the range */
1447 s
->next_block
= block
;
1448 if (s
->begin
> block
->target_start
) {
1449 s
->start
= s
->begin
- block
->target_start
;
1459 static void get_max_mapnr(DumpState
*s
)
1461 GuestPhysBlock
*last_block
;
1463 last_block
= QTAILQ_LAST(&s
->guest_phys_blocks
.head
, GuestPhysBlockHead
);
1464 s
->max_mapnr
= paddr_to_pfn(last_block
->target_end
);
1467 static int dump_init(DumpState
*s
, int fd
, bool has_format
,
1468 DumpGuestMemoryFormat format
, bool paging
, bool has_filter
,
1469 int64_t begin
, int64_t length
, Error
**errp
)
1476 /* kdump-compressed is conflict with paging and filter */
1477 if (has_format
&& format
!= DUMP_GUEST_MEMORY_FORMAT_ELF
) {
1478 assert(!paging
&& !has_filter
);
1481 if (runstate_is_running()) {
1482 vm_stop(RUN_STATE_SAVE_VM
);
1488 /* If we use KVM, we should synchronize the registers before we get dump
1489 * info or physmap info.
1491 cpu_synchronize_all_states();
1498 s
->has_filter
= has_filter
;
1502 guest_phys_blocks_init(&s
->guest_phys_blocks
);
1503 guest_phys_blocks_append(&s
->guest_phys_blocks
);
1505 s
->start
= get_start_block(s
);
1506 if (s
->start
== -1) {
1507 error_set(errp
, QERR_INVALID_PARAMETER
, "begin");
1511 /* get dump info: endian, class and architecture.
1512 * If the target architecture is not supported, cpu_get_dump_info() will
1515 ret
= cpu_get_dump_info(&s
->dump_info
, &s
->guest_phys_blocks
);
1517 error_set(errp
, QERR_UNSUPPORTED
);
1521 s
->note_size
= cpu_get_note_size(s
->dump_info
.d_class
,
1522 s
->dump_info
.d_machine
, nr_cpus
);
1523 if (s
->note_size
< 0) {
1524 error_set(errp
, QERR_UNSUPPORTED
);
1528 /* get memory mapping */
1529 memory_mapping_list_init(&s
->list
);
1531 qemu_get_guest_memory_mapping(&s
->list
, &s
->guest_phys_blocks
, &err
);
1533 error_propagate(errp
, err
);
1537 qemu_get_guest_simple_memory_mapping(&s
->list
, &s
->guest_phys_blocks
);
1540 s
->nr_cpus
= nr_cpus
;
1545 tmp
= DIV_ROUND_UP(DIV_ROUND_UP(s
->max_mapnr
, CHAR_BIT
), TARGET_PAGE_SIZE
);
1546 s
->len_dump_bitmap
= tmp
* TARGET_PAGE_SIZE
;
1548 /* init for kdump-compressed format */
1549 if (has_format
&& format
!= DUMP_GUEST_MEMORY_FORMAT_ELF
) {
1551 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB
:
1552 s
->flag_compress
= DUMP_DH_COMPRESSED_ZLIB
;
1555 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO
:
1557 if (lzo_init() != LZO_E_OK
) {
1558 error_setg(errp
, "failed to initialize the LZO library");
1562 s
->flag_compress
= DUMP_DH_COMPRESSED_LZO
;
1565 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY
:
1566 s
->flag_compress
= DUMP_DH_COMPRESSED_SNAPPY
;
1570 s
->flag_compress
= 0;
1576 if (s
->has_filter
) {
1577 memory_mapping_filter(&s
->list
, s
->begin
, s
->length
);
1581 * calculate phdr_num
1583 * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
1585 s
->phdr_num
= 1; /* PT_NOTE */
1586 if (s
->list
.num
< UINT16_MAX
- 2) {
1587 s
->phdr_num
+= s
->list
.num
;
1588 s
->have_section
= false;
1590 s
->have_section
= true;
1591 s
->phdr_num
= PN_XNUM
;
1592 s
->sh_info
= 1; /* PT_NOTE */
1594 /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
1595 if (s
->list
.num
<= UINT32_MAX
- 1) {
1596 s
->sh_info
+= s
->list
.num
;
1598 s
->sh_info
= UINT32_MAX
;
1602 if (s
->dump_info
.d_class
== ELFCLASS64
) {
1603 if (s
->have_section
) {
1604 s
->memory_offset
= sizeof(Elf64_Ehdr
) +
1605 sizeof(Elf64_Phdr
) * s
->sh_info
+
1606 sizeof(Elf64_Shdr
) + s
->note_size
;
1608 s
->memory_offset
= sizeof(Elf64_Ehdr
) +
1609 sizeof(Elf64_Phdr
) * s
->phdr_num
+ s
->note_size
;
1612 if (s
->have_section
) {
1613 s
->memory_offset
= sizeof(Elf32_Ehdr
) +
1614 sizeof(Elf32_Phdr
) * s
->sh_info
+
1615 sizeof(Elf32_Shdr
) + s
->note_size
;
1617 s
->memory_offset
= sizeof(Elf32_Ehdr
) +
1618 sizeof(Elf32_Phdr
) * s
->phdr_num
+ s
->note_size
;
1625 guest_phys_blocks_free(&s
->guest_phys_blocks
);
1634 void qmp_dump_guest_memory(bool paging
, const char *file
, bool has_begin
,
1635 int64_t begin
, bool has_length
,
1636 int64_t length
, bool has_format
,
1637 DumpGuestMemoryFormat format
, Error
**errp
)
1645 * kdump-compressed format need the whole memory dumped, so paging or
1646 * filter is not supported here.
1648 if ((has_format
&& format
!= DUMP_GUEST_MEMORY_FORMAT_ELF
) &&
1649 (paging
|| has_begin
|| has_length
)) {
1650 error_setg(errp
, "kdump-compressed format doesn't support paging or "
1654 if (has_begin
&& !has_length
) {
1655 error_set(errp
, QERR_MISSING_PARAMETER
, "length");
1658 if (!has_begin
&& has_length
) {
1659 error_set(errp
, QERR_MISSING_PARAMETER
, "begin");
1663 /* check whether lzo/snappy is supported */
1665 if (has_format
&& format
== DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO
) {
1666 error_setg(errp
, "kdump-lzo is not available now");
1671 #ifndef CONFIG_SNAPPY
1672 if (has_format
&& format
== DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY
) {
1673 error_setg(errp
, "kdump-snappy is not available now");
1679 if (strstart(file
, "fd:", &p
)) {
1680 fd
= monitor_get_fd(cur_mon
, p
, errp
);
1687 if (strstart(file
, "file:", &p
)) {
1688 fd
= qemu_open(p
, O_WRONLY
| O_CREAT
| O_TRUNC
| O_BINARY
, S_IRUSR
);
1690 error_setg_file_open(errp
, errno
, p
);
1696 error_set(errp
, QERR_INVALID_PARAMETER
, "protocol");
1700 s
= g_malloc0(sizeof(DumpState
));
1702 ret
= dump_init(s
, fd
, has_format
, format
, paging
, has_begin
,
1703 begin
, length
, errp
);
1709 if (has_format
&& format
!= DUMP_GUEST_MEMORY_FORMAT_ELF
) {
1710 if (create_kdump_vmcore(s
) < 0) {
1711 error_set(errp
, QERR_IO_ERROR
);
1714 if (create_vmcore(s
) < 0) {
1715 error_set(errp
, QERR_IO_ERROR
);
1722 DumpGuestMemoryCapability
*qmp_query_dump_guest_memory_capability(Error
**errp
)
1724 DumpGuestMemoryFormatList
*item
;
1725 DumpGuestMemoryCapability
*cap
=
1726 g_malloc0(sizeof(DumpGuestMemoryCapability
));
1728 /* elf is always available */
1729 item
= g_malloc0(sizeof(DumpGuestMemoryFormatList
));
1730 cap
->formats
= item
;
1731 item
->value
= DUMP_GUEST_MEMORY_FORMAT_ELF
;
1733 /* kdump-zlib is always available */
1734 item
->next
= g_malloc0(sizeof(DumpGuestMemoryFormatList
));
1736 item
->value
= DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB
;
1738 /* add new item if kdump-lzo is available */
1740 item
->next
= g_malloc0(sizeof(DumpGuestMemoryFormatList
));
1742 item
->value
= DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO
;
1745 /* add new item if kdump-snappy is available */
1746 #ifdef CONFIG_SNAPPY
1747 item
->next
= g_malloc0(sizeof(DumpGuestMemoryFormatList
));
1749 item
->value
= DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY
;