4 * Copyright Fujitsu, Corp. 2011, 2012
7 * Wen Congyang <wency@cn.fujitsu.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
17 #include "exec/hwaddr.h"
18 #include "monitor/monitor.h"
19 #include "sysemu/kvm.h"
20 #include "sysemu/dump.h"
21 #include "sysemu/memory_mapping.h"
22 #include "sysemu/runstate.h"
23 #include "sysemu/cpus.h"
24 #include "qapi/error.h"
25 #include "qapi/qapi-commands-dump.h"
26 #include "qapi/qapi-events-dump.h"
27 #include "qapi/qmp/qerror.h"
28 #include "qemu/error-report.h"
29 #include "qemu/main-loop.h"
30 #include "hw/misc/vmcoreinfo.h"
31 #include "migration/blocker.h"
39 #include <lzo/lzo1x.h>
44 #ifndef ELF_MACHINE_UNAME
45 #define ELF_MACHINE_UNAME "Unknown"
48 #define MAX_GUEST_NOTE_SIZE (1 << 20) /* 1MB should be enough */
50 static Error
*dump_migration_blocker
;
52 #define ELF_NOTE_SIZE(hdr_size, name_size, desc_size) \
53 ((DIV_ROUND_UP((hdr_size), 4) + \
54 DIV_ROUND_UP((name_size), 4) + \
55 DIV_ROUND_UP((desc_size), 4)) * 4)
57 static inline bool dump_is_64bit(DumpState
*s
)
59 return s
->dump_info
.d_class
== ELFCLASS64
;
62 static inline bool dump_has_filter(DumpState
*s
)
64 return s
->filter_area_length
> 0;
67 uint16_t cpu_to_dump16(DumpState
*s
, uint16_t val
)
69 if (s
->dump_info
.d_endian
== ELFDATA2LSB
) {
70 val
= cpu_to_le16(val
);
72 val
= cpu_to_be16(val
);
78 uint32_t cpu_to_dump32(DumpState
*s
, uint32_t val
)
80 if (s
->dump_info
.d_endian
== ELFDATA2LSB
) {
81 val
= cpu_to_le32(val
);
83 val
= cpu_to_be32(val
);
89 uint64_t cpu_to_dump64(DumpState
*s
, uint64_t val
)
91 if (s
->dump_info
.d_endian
== ELFDATA2LSB
) {
92 val
= cpu_to_le64(val
);
94 val
= cpu_to_be64(val
);
100 static int dump_cleanup(DumpState
*s
)
102 guest_phys_blocks_free(&s
->guest_phys_blocks
);
103 memory_mapping_list_free(&s
->list
);
105 g_free(s
->guest_note
);
106 s
->guest_note
= NULL
;
109 qemu_mutex_lock_iothread();
113 qemu_mutex_unlock_iothread();
116 migrate_del_blocker(dump_migration_blocker
);
121 static int fd_write_vmcore(const void *buf
, size_t size
, void *opaque
)
123 DumpState
*s
= opaque
;
126 written_size
= qemu_write_full(s
->fd
, buf
, size
);
127 if (written_size
!= size
) {
134 static void prepare_elf64_header(DumpState
*s
, Elf64_Ehdr
*elf_header
)
137 * phnum in the elf header is 16 bit, if we have more segments we
138 * set phnum to PN_XNUM and write the real number of segments to a
141 uint16_t phnum
= MIN(s
->phdr_num
, PN_XNUM
);
143 memset(elf_header
, 0, sizeof(Elf64_Ehdr
));
144 memcpy(elf_header
, ELFMAG
, SELFMAG
);
145 elf_header
->e_ident
[EI_CLASS
] = ELFCLASS64
;
146 elf_header
->e_ident
[EI_DATA
] = s
->dump_info
.d_endian
;
147 elf_header
->e_ident
[EI_VERSION
] = EV_CURRENT
;
148 elf_header
->e_type
= cpu_to_dump16(s
, ET_CORE
);
149 elf_header
->e_machine
= cpu_to_dump16(s
, s
->dump_info
.d_machine
);
150 elf_header
->e_version
= cpu_to_dump32(s
, EV_CURRENT
);
151 elf_header
->e_ehsize
= cpu_to_dump16(s
, sizeof(elf_header
));
152 elf_header
->e_phoff
= cpu_to_dump64(s
, s
->phdr_offset
);
153 elf_header
->e_phentsize
= cpu_to_dump16(s
, sizeof(Elf64_Phdr
));
154 elf_header
->e_phnum
= cpu_to_dump16(s
, phnum
);
156 elf_header
->e_shoff
= cpu_to_dump64(s
, s
->shdr_offset
);
157 elf_header
->e_shentsize
= cpu_to_dump16(s
, sizeof(Elf64_Shdr
));
158 elf_header
->e_shnum
= cpu_to_dump16(s
, s
->shdr_num
);
162 static void prepare_elf32_header(DumpState
*s
, Elf32_Ehdr
*elf_header
)
165 * phnum in the elf header is 16 bit, if we have more segments we
166 * set phnum to PN_XNUM and write the real number of segments to a
169 uint16_t phnum
= MIN(s
->phdr_num
, PN_XNUM
);
171 memset(elf_header
, 0, sizeof(Elf32_Ehdr
));
172 memcpy(elf_header
, ELFMAG
, SELFMAG
);
173 elf_header
->e_ident
[EI_CLASS
] = ELFCLASS32
;
174 elf_header
->e_ident
[EI_DATA
] = s
->dump_info
.d_endian
;
175 elf_header
->e_ident
[EI_VERSION
] = EV_CURRENT
;
176 elf_header
->e_type
= cpu_to_dump16(s
, ET_CORE
);
177 elf_header
->e_machine
= cpu_to_dump16(s
, s
->dump_info
.d_machine
);
178 elf_header
->e_version
= cpu_to_dump32(s
, EV_CURRENT
);
179 elf_header
->e_ehsize
= cpu_to_dump16(s
, sizeof(elf_header
));
180 elf_header
->e_phoff
= cpu_to_dump32(s
, s
->phdr_offset
);
181 elf_header
->e_phentsize
= cpu_to_dump16(s
, sizeof(Elf32_Phdr
));
182 elf_header
->e_phnum
= cpu_to_dump16(s
, phnum
);
184 elf_header
->e_shoff
= cpu_to_dump32(s
, s
->shdr_offset
);
185 elf_header
->e_shentsize
= cpu_to_dump16(s
, sizeof(Elf32_Shdr
));
186 elf_header
->e_shnum
= cpu_to_dump16(s
, s
->shdr_num
);
190 static void write_elf_header(DumpState
*s
, Error
**errp
)
192 Elf32_Ehdr elf32_header
;
193 Elf64_Ehdr elf64_header
;
198 if (dump_is_64bit(s
)) {
199 prepare_elf64_header(s
, &elf64_header
);
200 header_size
= sizeof(elf64_header
);
201 header_ptr
= &elf64_header
;
203 prepare_elf32_header(s
, &elf32_header
);
204 header_size
= sizeof(elf32_header
);
205 header_ptr
= &elf32_header
;
208 ret
= fd_write_vmcore(header_ptr
, header_size
, s
);
210 error_setg_errno(errp
, -ret
, "dump: failed to write elf header");
214 static void write_elf64_load(DumpState
*s
, MemoryMapping
*memory_mapping
,
215 int phdr_index
, hwaddr offset
,
216 hwaddr filesz
, Error
**errp
)
221 memset(&phdr
, 0, sizeof(Elf64_Phdr
));
222 phdr
.p_type
= cpu_to_dump32(s
, PT_LOAD
);
223 phdr
.p_offset
= cpu_to_dump64(s
, offset
);
224 phdr
.p_paddr
= cpu_to_dump64(s
, memory_mapping
->phys_addr
);
225 phdr
.p_filesz
= cpu_to_dump64(s
, filesz
);
226 phdr
.p_memsz
= cpu_to_dump64(s
, memory_mapping
->length
);
227 phdr
.p_vaddr
= cpu_to_dump64(s
, memory_mapping
->virt_addr
) ?: phdr
.p_paddr
;
229 assert(memory_mapping
->length
>= filesz
);
231 ret
= fd_write_vmcore(&phdr
, sizeof(Elf64_Phdr
), s
);
233 error_setg_errno(errp
, -ret
,
234 "dump: failed to write program header table");
238 static void write_elf32_load(DumpState
*s
, MemoryMapping
*memory_mapping
,
239 int phdr_index
, hwaddr offset
,
240 hwaddr filesz
, Error
**errp
)
245 memset(&phdr
, 0, sizeof(Elf32_Phdr
));
246 phdr
.p_type
= cpu_to_dump32(s
, PT_LOAD
);
247 phdr
.p_offset
= cpu_to_dump32(s
, offset
);
248 phdr
.p_paddr
= cpu_to_dump32(s
, memory_mapping
->phys_addr
);
249 phdr
.p_filesz
= cpu_to_dump32(s
, filesz
);
250 phdr
.p_memsz
= cpu_to_dump32(s
, memory_mapping
->length
);
252 cpu_to_dump32(s
, memory_mapping
->virt_addr
) ?: phdr
.p_paddr
;
254 assert(memory_mapping
->length
>= filesz
);
256 ret
= fd_write_vmcore(&phdr
, sizeof(Elf32_Phdr
), s
);
258 error_setg_errno(errp
, -ret
,
259 "dump: failed to write program header table");
263 static void prepare_elf64_phdr_note(DumpState
*s
, Elf64_Phdr
*phdr
)
265 memset(phdr
, 0, sizeof(*phdr
));
266 phdr
->p_type
= cpu_to_dump32(s
, PT_NOTE
);
267 phdr
->p_offset
= cpu_to_dump64(s
, s
->note_offset
);
269 phdr
->p_filesz
= cpu_to_dump64(s
, s
->note_size
);
270 phdr
->p_memsz
= cpu_to_dump64(s
, s
->note_size
);
274 static inline int cpu_index(CPUState
*cpu
)
276 return cpu
->cpu_index
+ 1;
279 static void write_guest_note(WriteCoreDumpFunction f
, DumpState
*s
,
285 ret
= f(s
->guest_note
, s
->guest_note_size
, s
);
287 error_setg(errp
, "dump: failed to write guest note");
292 static void write_elf64_notes(WriteCoreDumpFunction f
, DumpState
*s
,
301 ret
= cpu_write_elf64_note(f
, cpu
, id
, s
);
303 error_setg(errp
, "dump: failed to write elf notes");
309 ret
= cpu_write_elf64_qemunote(f
, cpu
, s
);
311 error_setg(errp
, "dump: failed to write CPU status");
316 write_guest_note(f
, s
, errp
);
319 static void prepare_elf32_phdr_note(DumpState
*s
, Elf32_Phdr
*phdr
)
321 memset(phdr
, 0, sizeof(*phdr
));
322 phdr
->p_type
= cpu_to_dump32(s
, PT_NOTE
);
323 phdr
->p_offset
= cpu_to_dump32(s
, s
->note_offset
);
325 phdr
->p_filesz
= cpu_to_dump32(s
, s
->note_size
);
326 phdr
->p_memsz
= cpu_to_dump32(s
, s
->note_size
);
330 static void write_elf32_notes(WriteCoreDumpFunction f
, DumpState
*s
,
339 ret
= cpu_write_elf32_note(f
, cpu
, id
, s
);
341 error_setg(errp
, "dump: failed to write elf notes");
347 ret
= cpu_write_elf32_qemunote(f
, cpu
, s
);
349 error_setg(errp
, "dump: failed to write CPU status");
354 write_guest_note(f
, s
, errp
);
357 static void write_elf_phdr_note(DumpState
*s
, Error
**errp
)
366 if (dump_is_64bit(s
)) {
367 prepare_elf64_phdr_note(s
, &phdr64
);
368 size
= sizeof(phdr64
);
371 prepare_elf32_phdr_note(s
, &phdr32
);
372 size
= sizeof(phdr32
);
376 ret
= fd_write_vmcore(phdr
, size
, s
);
378 error_setg_errno(errp
, -ret
,
379 "dump: failed to write program header table");
383 static void write_elf_section(DumpState
*s
, int type
, Error
**errp
)
392 shdr_size
= sizeof(Elf32_Shdr
);
393 memset(&shdr32
, 0, shdr_size
);
394 shdr32
.sh_info
= cpu_to_dump32(s
, s
->phdr_num
);
397 shdr_size
= sizeof(Elf64_Shdr
);
398 memset(&shdr64
, 0, shdr_size
);
399 shdr64
.sh_info
= cpu_to_dump32(s
, s
->phdr_num
);
403 ret
= fd_write_vmcore(shdr
, shdr_size
, s
);
405 error_setg_errno(errp
, -ret
,
406 "dump: failed to write section header table");
410 static void write_data(DumpState
*s
, void *buf
, int length
, Error
**errp
)
414 ret
= fd_write_vmcore(buf
, length
, s
);
416 error_setg_errno(errp
, -ret
, "dump: failed to save memory");
418 s
->written_size
+= length
;
422 /* write the memory to vmcore. 1 page per I/O. */
423 static void write_memory(DumpState
*s
, GuestPhysBlock
*block
, ram_addr_t start
,
424 int64_t size
, Error
**errp
)
429 for (i
= 0; i
< size
/ s
->dump_info
.page_size
; i
++) {
430 write_data(s
, block
->host_addr
+ start
+ i
* s
->dump_info
.page_size
,
431 s
->dump_info
.page_size
, errp
);
437 if ((size
% s
->dump_info
.page_size
) != 0) {
438 write_data(s
, block
->host_addr
+ start
+ i
* s
->dump_info
.page_size
,
439 size
% s
->dump_info
.page_size
, errp
);
446 /* get the memory's offset and size in the vmcore */
447 static void get_offset_range(hwaddr phys_addr
,
448 ram_addr_t mapping_length
,
453 GuestPhysBlock
*block
;
454 hwaddr offset
= s
->memory_offset
;
455 int64_t size_in_block
, start
;
457 /* When the memory is not stored into vmcore, offset will be -1 */
461 if (dump_has_filter(s
)) {
462 if (phys_addr
< s
->filter_area_begin
||
463 phys_addr
>= s
->filter_area_begin
+ s
->filter_area_length
) {
468 QTAILQ_FOREACH(block
, &s
->guest_phys_blocks
.head
, next
) {
469 if (dump_has_filter(s
)) {
470 if (block
->target_start
>= s
->filter_area_begin
+ s
->filter_area_length
||
471 block
->target_end
<= s
->filter_area_begin
) {
472 /* This block is out of the range */
476 if (s
->filter_area_begin
<= block
->target_start
) {
477 start
= block
->target_start
;
479 start
= s
->filter_area_begin
;
482 size_in_block
= block
->target_end
- start
;
483 if (s
->filter_area_begin
+ s
->filter_area_length
< block
->target_end
) {
484 size_in_block
-= block
->target_end
- (s
->filter_area_begin
+ s
->filter_area_length
);
487 start
= block
->target_start
;
488 size_in_block
= block
->target_end
- block
->target_start
;
491 if (phys_addr
>= start
&& phys_addr
< start
+ size_in_block
) {
492 *p_offset
= phys_addr
- start
+ offset
;
494 /* The offset range mapped from the vmcore file must not spill over
495 * the GuestPhysBlock, clamp it. The rest of the mapping will be
496 * zero-filled in memory at load time; see
497 * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>.
499 *p_filesz
= phys_addr
+ mapping_length
<= start
+ size_in_block
?
501 size_in_block
- (phys_addr
- start
);
505 offset
+= size_in_block
;
509 static void write_elf_phdr_loads(DumpState
*s
, Error
**errp
)
512 hwaddr offset
, filesz
;
513 MemoryMapping
*memory_mapping
;
514 uint32_t phdr_index
= 1;
516 QTAILQ_FOREACH(memory_mapping
, &s
->list
.head
, next
) {
517 get_offset_range(memory_mapping
->phys_addr
,
518 memory_mapping
->length
,
519 s
, &offset
, &filesz
);
520 if (dump_is_64bit(s
)) {
521 write_elf64_load(s
, memory_mapping
, phdr_index
++, offset
,
524 write_elf32_load(s
, memory_mapping
, phdr_index
++, offset
,
532 if (phdr_index
>= s
->phdr_num
) {
538 static void write_elf_notes(DumpState
*s
, Error
**errp
)
540 if (dump_is_64bit(s
)) {
541 write_elf64_notes(fd_write_vmcore
, s
, errp
);
543 write_elf32_notes(fd_write_vmcore
, s
, errp
);
547 /* write elf header, PT_NOTE and elf note to vmcore. */
548 static void dump_begin(DumpState
*s
, Error
**errp
)
553 * the vmcore's format is:
572 * we only know where the memory is saved after we write elf note into
576 /* write elf header to vmcore */
577 write_elf_header(s
, errp
);
582 /* write PT_NOTE to vmcore */
583 write_elf_phdr_note(s
, errp
);
588 /* write all PT_LOADs to vmcore */
589 write_elf_phdr_loads(s
, errp
);
594 /* write section to vmcore */
596 write_elf_section(s
, 1, errp
);
602 /* write notes to vmcore */
603 write_elf_notes(s
, errp
);
606 static int64_t dump_filtered_memblock_size(GuestPhysBlock
*block
,
607 int64_t filter_area_start
,
608 int64_t filter_area_length
)
610 int64_t size
, left
, right
;
612 /* No filter, return full size */
613 if (!filter_area_length
) {
614 return block
->target_end
- block
->target_start
;
617 /* calculate the overlapped region. */
618 left
= MAX(filter_area_start
, block
->target_start
);
619 right
= MIN(filter_area_start
+ filter_area_length
, block
->target_end
);
621 size
= size
> 0 ? size
: 0;
626 static int64_t dump_filtered_memblock_start(GuestPhysBlock
*block
,
627 int64_t filter_area_start
,
628 int64_t filter_area_length
)
630 if (filter_area_length
) {
631 /* return -1 if the block is not within filter area */
632 if (block
->target_start
>= filter_area_start
+ filter_area_length
||
633 block
->target_end
<= filter_area_start
) {
637 if (filter_area_start
> block
->target_start
) {
638 return filter_area_start
- block
->target_start
;
645 /* write all memory to vmcore */
646 static void dump_iterate(DumpState
*s
, Error
**errp
)
649 GuestPhysBlock
*block
;
650 int64_t memblock_size
, memblock_start
;
652 QTAILQ_FOREACH(block
, &s
->guest_phys_blocks
.head
, next
) {
653 memblock_start
= dump_filtered_memblock_start(block
, s
->filter_area_begin
, s
->filter_area_length
);
654 if (memblock_start
== -1) {
658 memblock_size
= dump_filtered_memblock_size(block
, s
->filter_area_begin
, s
->filter_area_length
);
660 /* Write the memory to file */
661 write_memory(s
, block
, memblock_start
, memblock_size
, errp
);
668 static void create_vmcore(DumpState
*s
, Error
**errp
)
677 dump_iterate(s
, errp
);
680 static int write_start_flat_header(int fd
)
682 MakedumpfileHeader
*mh
;
685 QEMU_BUILD_BUG_ON(sizeof *mh
> MAX_SIZE_MDF_HEADER
);
686 mh
= g_malloc0(MAX_SIZE_MDF_HEADER
);
688 memcpy(mh
->signature
, MAKEDUMPFILE_SIGNATURE
,
689 MIN(sizeof mh
->signature
, sizeof MAKEDUMPFILE_SIGNATURE
));
691 mh
->type
= cpu_to_be64(TYPE_FLAT_HEADER
);
692 mh
->version
= cpu_to_be64(VERSION_FLAT_HEADER
);
695 written_size
= qemu_write_full(fd
, mh
, MAX_SIZE_MDF_HEADER
);
696 if (written_size
!= MAX_SIZE_MDF_HEADER
) {
704 static int write_end_flat_header(int fd
)
706 MakedumpfileDataHeader mdh
;
708 mdh
.offset
= END_FLAG_FLAT_HEADER
;
709 mdh
.buf_size
= END_FLAG_FLAT_HEADER
;
712 written_size
= qemu_write_full(fd
, &mdh
, sizeof(mdh
));
713 if (written_size
!= sizeof(mdh
)) {
720 static int write_buffer(int fd
, off_t offset
, const void *buf
, size_t size
)
723 MakedumpfileDataHeader mdh
;
725 mdh
.offset
= cpu_to_be64(offset
);
726 mdh
.buf_size
= cpu_to_be64(size
);
728 written_size
= qemu_write_full(fd
, &mdh
, sizeof(mdh
));
729 if (written_size
!= sizeof(mdh
)) {
733 written_size
= qemu_write_full(fd
, buf
, size
);
734 if (written_size
!= size
) {
741 static int buf_write_note(const void *buf
, size_t size
, void *opaque
)
743 DumpState
*s
= opaque
;
745 /* note_buf is not enough */
746 if (s
->note_buf_offset
+ size
> s
->note_size
) {
750 memcpy(s
->note_buf
+ s
->note_buf_offset
, buf
, size
);
752 s
->note_buf_offset
+= size
;
758 * This function retrieves various sizes from an elf header.
760 * @note has to be a valid ELF note. The return sizes are unmodified
761 * (not padded or rounded up to be multiple of 4).
763 static void get_note_sizes(DumpState
*s
, const void *note
,
764 uint64_t *note_head_size
,
768 uint64_t note_head_sz
;
772 if (dump_is_64bit(s
)) {
773 const Elf64_Nhdr
*hdr
= note
;
774 note_head_sz
= sizeof(Elf64_Nhdr
);
775 name_sz
= tswap64(hdr
->n_namesz
);
776 desc_sz
= tswap64(hdr
->n_descsz
);
778 const Elf32_Nhdr
*hdr
= note
;
779 note_head_sz
= sizeof(Elf32_Nhdr
);
780 name_sz
= tswap32(hdr
->n_namesz
);
781 desc_sz
= tswap32(hdr
->n_descsz
);
784 if (note_head_size
) {
785 *note_head_size
= note_head_sz
;
788 *name_size
= name_sz
;
791 *desc_size
= desc_sz
;
795 static bool note_name_equal(DumpState
*s
,
796 const uint8_t *note
, const char *name
)
798 int len
= strlen(name
) + 1;
799 uint64_t head_size
, name_size
;
801 get_note_sizes(s
, note
, &head_size
, &name_size
, NULL
);
802 head_size
= ROUND_UP(head_size
, 4);
804 return name_size
== len
&& memcmp(note
+ head_size
, name
, len
) == 0;
807 /* write common header, sub header and elf note to vmcore */
808 static void create_header32(DumpState
*s
, Error
**errp
)
811 DiskDumpHeader32
*dh
= NULL
;
812 KdumpSubHeader32
*kh
= NULL
;
815 uint32_t sub_hdr_size
;
816 uint32_t bitmap_blocks
;
818 uint64_t offset_note
;
820 /* write common header, the version of kdump-compressed format is 6th */
821 size
= sizeof(DiskDumpHeader32
);
822 dh
= g_malloc0(size
);
824 memcpy(dh
->signature
, KDUMP_SIGNATURE
, SIG_LEN
);
825 dh
->header_version
= cpu_to_dump32(s
, 6);
826 block_size
= s
->dump_info
.page_size
;
827 dh
->block_size
= cpu_to_dump32(s
, block_size
);
828 sub_hdr_size
= sizeof(struct KdumpSubHeader32
) + s
->note_size
;
829 sub_hdr_size
= DIV_ROUND_UP(sub_hdr_size
, block_size
);
830 dh
->sub_hdr_size
= cpu_to_dump32(s
, sub_hdr_size
);
831 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
832 dh
->max_mapnr
= cpu_to_dump32(s
, MIN(s
->max_mapnr
, UINT_MAX
));
833 dh
->nr_cpus
= cpu_to_dump32(s
, s
->nr_cpus
);
834 bitmap_blocks
= DIV_ROUND_UP(s
->len_dump_bitmap
, block_size
) * 2;
835 dh
->bitmap_blocks
= cpu_to_dump32(s
, bitmap_blocks
);
836 strncpy(dh
->utsname
.machine
, ELF_MACHINE_UNAME
, sizeof(dh
->utsname
.machine
));
838 if (s
->flag_compress
& DUMP_DH_COMPRESSED_ZLIB
) {
839 status
|= DUMP_DH_COMPRESSED_ZLIB
;
842 if (s
->flag_compress
& DUMP_DH_COMPRESSED_LZO
) {
843 status
|= DUMP_DH_COMPRESSED_LZO
;
847 if (s
->flag_compress
& DUMP_DH_COMPRESSED_SNAPPY
) {
848 status
|= DUMP_DH_COMPRESSED_SNAPPY
;
851 dh
->status
= cpu_to_dump32(s
, status
);
853 if (write_buffer(s
->fd
, 0, dh
, size
) < 0) {
854 error_setg(errp
, "dump: failed to write disk dump header");
858 /* write sub header */
859 size
= sizeof(KdumpSubHeader32
);
860 kh
= g_malloc0(size
);
862 /* 64bit max_mapnr_64 */
863 kh
->max_mapnr_64
= cpu_to_dump64(s
, s
->max_mapnr
);
864 kh
->phys_base
= cpu_to_dump32(s
, s
->dump_info
.phys_base
);
865 kh
->dump_level
= cpu_to_dump32(s
, DUMP_LEVEL
);
867 offset_note
= DISKDUMP_HEADER_BLOCKS
* block_size
+ size
;
869 note_name_equal(s
, s
->guest_note
, "VMCOREINFO")) {
870 uint64_t hsize
, name_size
, size_vmcoreinfo_desc
, offset_vmcoreinfo
;
872 get_note_sizes(s
, s
->guest_note
,
873 &hsize
, &name_size
, &size_vmcoreinfo_desc
);
874 offset_vmcoreinfo
= offset_note
+ s
->note_size
- s
->guest_note_size
+
875 (DIV_ROUND_UP(hsize
, 4) + DIV_ROUND_UP(name_size
, 4)) * 4;
876 kh
->offset_vmcoreinfo
= cpu_to_dump64(s
, offset_vmcoreinfo
);
877 kh
->size_vmcoreinfo
= cpu_to_dump32(s
, size_vmcoreinfo_desc
);
880 kh
->offset_note
= cpu_to_dump64(s
, offset_note
);
881 kh
->note_size
= cpu_to_dump32(s
, s
->note_size
);
883 if (write_buffer(s
->fd
, DISKDUMP_HEADER_BLOCKS
*
884 block_size
, kh
, size
) < 0) {
885 error_setg(errp
, "dump: failed to write kdump sub header");
890 s
->note_buf
= g_malloc0(s
->note_size
);
891 s
->note_buf_offset
= 0;
893 /* use s->note_buf to store notes temporarily */
894 write_elf32_notes(buf_write_note
, s
, errp
);
898 if (write_buffer(s
->fd
, offset_note
, s
->note_buf
,
900 error_setg(errp
, "dump: failed to write notes");
904 /* get offset of dump_bitmap */
905 s
->offset_dump_bitmap
= (DISKDUMP_HEADER_BLOCKS
+ sub_hdr_size
) *
908 /* get offset of page */
909 s
->offset_page
= (DISKDUMP_HEADER_BLOCKS
+ sub_hdr_size
+ bitmap_blocks
) *
918 /* write common header, sub header and elf note to vmcore */
919 static void create_header64(DumpState
*s
, Error
**errp
)
922 DiskDumpHeader64
*dh
= NULL
;
923 KdumpSubHeader64
*kh
= NULL
;
926 uint32_t sub_hdr_size
;
927 uint32_t bitmap_blocks
;
929 uint64_t offset_note
;
931 /* write common header, the version of kdump-compressed format is 6th */
932 size
= sizeof(DiskDumpHeader64
);
933 dh
= g_malloc0(size
);
935 memcpy(dh
->signature
, KDUMP_SIGNATURE
, SIG_LEN
);
936 dh
->header_version
= cpu_to_dump32(s
, 6);
937 block_size
= s
->dump_info
.page_size
;
938 dh
->block_size
= cpu_to_dump32(s
, block_size
);
939 sub_hdr_size
= sizeof(struct KdumpSubHeader64
) + s
->note_size
;
940 sub_hdr_size
= DIV_ROUND_UP(sub_hdr_size
, block_size
);
941 dh
->sub_hdr_size
= cpu_to_dump32(s
, sub_hdr_size
);
942 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
943 dh
->max_mapnr
= cpu_to_dump32(s
, MIN(s
->max_mapnr
, UINT_MAX
));
944 dh
->nr_cpus
= cpu_to_dump32(s
, s
->nr_cpus
);
945 bitmap_blocks
= DIV_ROUND_UP(s
->len_dump_bitmap
, block_size
) * 2;
946 dh
->bitmap_blocks
= cpu_to_dump32(s
, bitmap_blocks
);
947 strncpy(dh
->utsname
.machine
, ELF_MACHINE_UNAME
, sizeof(dh
->utsname
.machine
));
949 if (s
->flag_compress
& DUMP_DH_COMPRESSED_ZLIB
) {
950 status
|= DUMP_DH_COMPRESSED_ZLIB
;
953 if (s
->flag_compress
& DUMP_DH_COMPRESSED_LZO
) {
954 status
|= DUMP_DH_COMPRESSED_LZO
;
958 if (s
->flag_compress
& DUMP_DH_COMPRESSED_SNAPPY
) {
959 status
|= DUMP_DH_COMPRESSED_SNAPPY
;
962 dh
->status
= cpu_to_dump32(s
, status
);
964 if (write_buffer(s
->fd
, 0, dh
, size
) < 0) {
965 error_setg(errp
, "dump: failed to write disk dump header");
969 /* write sub header */
970 size
= sizeof(KdumpSubHeader64
);
971 kh
= g_malloc0(size
);
973 /* 64bit max_mapnr_64 */
974 kh
->max_mapnr_64
= cpu_to_dump64(s
, s
->max_mapnr
);
975 kh
->phys_base
= cpu_to_dump64(s
, s
->dump_info
.phys_base
);
976 kh
->dump_level
= cpu_to_dump32(s
, DUMP_LEVEL
);
978 offset_note
= DISKDUMP_HEADER_BLOCKS
* block_size
+ size
;
980 note_name_equal(s
, s
->guest_note
, "VMCOREINFO")) {
981 uint64_t hsize
, name_size
, size_vmcoreinfo_desc
, offset_vmcoreinfo
;
983 get_note_sizes(s
, s
->guest_note
,
984 &hsize
, &name_size
, &size_vmcoreinfo_desc
);
985 offset_vmcoreinfo
= offset_note
+ s
->note_size
- s
->guest_note_size
+
986 (DIV_ROUND_UP(hsize
, 4) + DIV_ROUND_UP(name_size
, 4)) * 4;
987 kh
->offset_vmcoreinfo
= cpu_to_dump64(s
, offset_vmcoreinfo
);
988 kh
->size_vmcoreinfo
= cpu_to_dump64(s
, size_vmcoreinfo_desc
);
991 kh
->offset_note
= cpu_to_dump64(s
, offset_note
);
992 kh
->note_size
= cpu_to_dump64(s
, s
->note_size
);
994 if (write_buffer(s
->fd
, DISKDUMP_HEADER_BLOCKS
*
995 block_size
, kh
, size
) < 0) {
996 error_setg(errp
, "dump: failed to write kdump sub header");
1001 s
->note_buf
= g_malloc0(s
->note_size
);
1002 s
->note_buf_offset
= 0;
1004 /* use s->note_buf to store notes temporarily */
1005 write_elf64_notes(buf_write_note
, s
, errp
);
1010 if (write_buffer(s
->fd
, offset_note
, s
->note_buf
,
1011 s
->note_size
) < 0) {
1012 error_setg(errp
, "dump: failed to write notes");
1016 /* get offset of dump_bitmap */
1017 s
->offset_dump_bitmap
= (DISKDUMP_HEADER_BLOCKS
+ sub_hdr_size
) *
1020 /* get offset of page */
1021 s
->offset_page
= (DISKDUMP_HEADER_BLOCKS
+ sub_hdr_size
+ bitmap_blocks
) *
1027 g_free(s
->note_buf
);
1030 static void write_dump_header(DumpState
*s
, Error
**errp
)
1032 if (dump_is_64bit(s
)) {
1033 create_header64(s
, errp
);
1035 create_header32(s
, errp
);
1039 static size_t dump_bitmap_get_bufsize(DumpState
*s
)
1041 return s
->dump_info
.page_size
;
1045 * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be
1046 * rewritten, so if need to set the first bit, set last_pfn and pfn to 0.
1047 * set_dump_bitmap will always leave the recently set bit un-sync. And setting
1048 * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into
1049 * vmcore, ie. synchronizing un-sync bit into vmcore.
1051 static int set_dump_bitmap(uint64_t last_pfn
, uint64_t pfn
, bool value
,
1052 uint8_t *buf
, DumpState
*s
)
1054 off_t old_offset
, new_offset
;
1055 off_t offset_bitmap1
, offset_bitmap2
;
1057 size_t bitmap_bufsize
= dump_bitmap_get_bufsize(s
);
1058 size_t bits_per_buf
= bitmap_bufsize
* CHAR_BIT
;
1060 /* should not set the previous place */
1061 assert(last_pfn
<= pfn
);
1064 * if the bit needed to be set is not cached in buf, flush the data in buf
1065 * to vmcore firstly.
1066 * making new_offset be bigger than old_offset can also sync remained data
1069 old_offset
= bitmap_bufsize
* (last_pfn
/ bits_per_buf
);
1070 new_offset
= bitmap_bufsize
* (pfn
/ bits_per_buf
);
1072 while (old_offset
< new_offset
) {
1073 /* calculate the offset and write dump_bitmap */
1074 offset_bitmap1
= s
->offset_dump_bitmap
+ old_offset
;
1075 if (write_buffer(s
->fd
, offset_bitmap1
, buf
,
1076 bitmap_bufsize
) < 0) {
1080 /* dump level 1 is chosen, so 1st and 2nd bitmap are same */
1081 offset_bitmap2
= s
->offset_dump_bitmap
+ s
->len_dump_bitmap
+
1083 if (write_buffer(s
->fd
, offset_bitmap2
, buf
,
1084 bitmap_bufsize
) < 0) {
1088 memset(buf
, 0, bitmap_bufsize
);
1089 old_offset
+= bitmap_bufsize
;
1092 /* get the exact place of the bit in the buf, and set it */
1093 byte
= (pfn
% bits_per_buf
) / CHAR_BIT
;
1094 bit
= (pfn
% bits_per_buf
) % CHAR_BIT
;
1096 buf
[byte
] |= 1u << bit
;
1098 buf
[byte
] &= ~(1u << bit
);
1104 static uint64_t dump_paddr_to_pfn(DumpState
*s
, uint64_t addr
)
1106 int target_page_shift
= ctz32(s
->dump_info
.page_size
);
1108 return (addr
>> target_page_shift
) - ARCH_PFN_OFFSET
;
1111 static uint64_t dump_pfn_to_paddr(DumpState
*s
, uint64_t pfn
)
1113 int target_page_shift
= ctz32(s
->dump_info
.page_size
);
1115 return (pfn
+ ARCH_PFN_OFFSET
) << target_page_shift
;
1119 * exam every page and return the page frame number and the address of the page.
1120 * bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys
1121 * blocks, so block->target_start and block->target_end should be interal
1122 * multiples of the target page size.
1124 static bool get_next_page(GuestPhysBlock
**blockptr
, uint64_t *pfnptr
,
1125 uint8_t **bufptr
, DumpState
*s
)
1127 GuestPhysBlock
*block
= *blockptr
;
1128 hwaddr addr
, target_page_mask
= ~((hwaddr
)s
->dump_info
.page_size
- 1);
1131 /* block == NULL means the start of the iteration */
1133 block
= QTAILQ_FIRST(&s
->guest_phys_blocks
.head
);
1135 addr
= block
->target_start
;
1137 addr
= dump_pfn_to_paddr(s
, *pfnptr
+ 1);
1139 assert(block
!= NULL
);
1141 if ((addr
>= block
->target_start
) &&
1142 (addr
+ s
->dump_info
.page_size
<= block
->target_end
)) {
1143 buf
= block
->host_addr
+ (addr
- block
->target_start
);
1145 /* the next page is in the next block */
1146 block
= QTAILQ_NEXT(block
, next
);
1151 addr
= block
->target_start
;
1152 buf
= block
->host_addr
;
1155 assert((block
->target_start
& ~target_page_mask
) == 0);
1156 assert((block
->target_end
& ~target_page_mask
) == 0);
1157 *pfnptr
= dump_paddr_to_pfn(s
, addr
);
1165 static void write_dump_bitmap(DumpState
*s
, Error
**errp
)
1168 uint64_t last_pfn
, pfn
;
1169 void *dump_bitmap_buf
;
1170 size_t num_dumpable
;
1171 GuestPhysBlock
*block_iter
= NULL
;
1172 size_t bitmap_bufsize
= dump_bitmap_get_bufsize(s
);
1173 size_t bits_per_buf
= bitmap_bufsize
* CHAR_BIT
;
1175 /* dump_bitmap_buf is used to store dump_bitmap temporarily */
1176 dump_bitmap_buf
= g_malloc0(bitmap_bufsize
);
1182 * exam memory page by page, and set the bit in dump_bitmap corresponded
1183 * to the existing page.
1185 while (get_next_page(&block_iter
, &pfn
, NULL
, s
)) {
1186 ret
= set_dump_bitmap(last_pfn
, pfn
, true, dump_bitmap_buf
, s
);
1188 error_setg(errp
, "dump: failed to set dump_bitmap");
1197 * set_dump_bitmap will always leave the recently set bit un-sync. Here we
1198 * set the remaining bits from last_pfn to the end of the bitmap buffer to
1199 * 0. With those set, the un-sync bit will be synchronized into the vmcore.
1201 if (num_dumpable
> 0) {
1202 ret
= set_dump_bitmap(last_pfn
, last_pfn
+ bits_per_buf
, false,
1203 dump_bitmap_buf
, s
);
1205 error_setg(errp
, "dump: failed to sync dump_bitmap");
1210 /* number of dumpable pages that will be dumped later */
1211 s
->num_dumpable
= num_dumpable
;
1214 g_free(dump_bitmap_buf
);
1217 static void prepare_data_cache(DataCache
*data_cache
, DumpState
*s
,
1220 data_cache
->fd
= s
->fd
;
1221 data_cache
->data_size
= 0;
1222 data_cache
->buf_size
= 4 * dump_bitmap_get_bufsize(s
);
1223 data_cache
->buf
= g_malloc0(data_cache
->buf_size
);
1224 data_cache
->offset
= offset
;
1227 static int write_cache(DataCache
*dc
, const void *buf
, size_t size
,
1231 * dc->buf_size should not be less than size, otherwise dc will never be
1234 assert(size
<= dc
->buf_size
);
1237 * if flag_sync is set, synchronize data in dc->buf into vmcore.
1238 * otherwise check if the space is enough for caching data in buf, if not,
1239 * write the data in dc->buf to dc->fd and reset dc->buf
1241 if ((!flag_sync
&& dc
->data_size
+ size
> dc
->buf_size
) ||
1242 (flag_sync
&& dc
->data_size
> 0)) {
1243 if (write_buffer(dc
->fd
, dc
->offset
, dc
->buf
, dc
->data_size
) < 0) {
1247 dc
->offset
+= dc
->data_size
;
1252 memcpy(dc
->buf
+ dc
->data_size
, buf
, size
);
1253 dc
->data_size
+= size
;
1259 static void free_data_cache(DataCache
*data_cache
)
1261 g_free(data_cache
->buf
);
1264 static size_t get_len_buf_out(size_t page_size
, uint32_t flag_compress
)
1266 switch (flag_compress
) {
1267 case DUMP_DH_COMPRESSED_ZLIB
:
1268 return compressBound(page_size
);
1270 case DUMP_DH_COMPRESSED_LZO
:
1272 * LZO will expand incompressible data by a little amount. Please check
1273 * the following URL to see the expansion calculation:
1274 * http://www.oberhumer.com/opensource/lzo/lzofaq.php
1276 return page_size
+ page_size
/ 16 + 64 + 3;
1278 #ifdef CONFIG_SNAPPY
1279 case DUMP_DH_COMPRESSED_SNAPPY
:
1280 return snappy_max_compressed_length(page_size
);
1286 static void write_dump_pages(DumpState
*s
, Error
**errp
)
1289 DataCache page_desc
, page_data
;
1290 size_t len_buf_out
, size_out
;
1292 lzo_bytep wrkmem
= NULL
;
1294 uint8_t *buf_out
= NULL
;
1295 off_t offset_desc
, offset_data
;
1296 PageDescriptor pd
, pd_zero
;
1298 GuestPhysBlock
*block_iter
= NULL
;
1301 /* get offset of page_desc and page_data in dump file */
1302 offset_desc
= s
->offset_page
;
1303 offset_data
= offset_desc
+ sizeof(PageDescriptor
) * s
->num_dumpable
;
1305 prepare_data_cache(&page_desc
, s
, offset_desc
);
1306 prepare_data_cache(&page_data
, s
, offset_data
);
1308 /* prepare buffer to store compressed data */
1309 len_buf_out
= get_len_buf_out(s
->dump_info
.page_size
, s
->flag_compress
);
1310 assert(len_buf_out
!= 0);
1313 wrkmem
= g_malloc(LZO1X_1_MEM_COMPRESS
);
1316 buf_out
= g_malloc(len_buf_out
);
1319 * init zero page's page_desc and page_data, because every zero page
1320 * uses the same page_data
1322 pd_zero
.size
= cpu_to_dump32(s
, s
->dump_info
.page_size
);
1323 pd_zero
.flags
= cpu_to_dump32(s
, 0);
1324 pd_zero
.offset
= cpu_to_dump64(s
, offset_data
);
1325 pd_zero
.page_flags
= cpu_to_dump64(s
, 0);
1326 buf
= g_malloc0(s
->dump_info
.page_size
);
1327 ret
= write_cache(&page_data
, buf
, s
->dump_info
.page_size
, false);
1330 error_setg(errp
, "dump: failed to write page data (zero page)");
1334 offset_data
+= s
->dump_info
.page_size
;
1337 * dump memory to vmcore page by page. zero page will all be resided in the
1338 * first page of page section
1340 while (get_next_page(&block_iter
, &pfn_iter
, &buf
, s
)) {
1341 /* check zero page */
1342 if (buffer_is_zero(buf
, s
->dump_info
.page_size
)) {
1343 ret
= write_cache(&page_desc
, &pd_zero
, sizeof(PageDescriptor
),
1346 error_setg(errp
, "dump: failed to write page desc");
1351 * not zero page, then:
1352 * 1. compress the page
1353 * 2. write the compressed page into the cache of page_data
1354 * 3. get page desc of the compressed page and write it into the
1355 * cache of page_desc
1357 * only one compression format will be used here, for
1358 * s->flag_compress is set. But when compression fails to work,
1359 * we fall back to save in plaintext.
1361 size_out
= len_buf_out
;
1362 if ((s
->flag_compress
& DUMP_DH_COMPRESSED_ZLIB
) &&
1363 (compress2(buf_out
, (uLongf
*)&size_out
, buf
,
1364 s
->dump_info
.page_size
, Z_BEST_SPEED
) == Z_OK
) &&
1365 (size_out
< s
->dump_info
.page_size
)) {
1366 pd
.flags
= cpu_to_dump32(s
, DUMP_DH_COMPRESSED_ZLIB
);
1367 pd
.size
= cpu_to_dump32(s
, size_out
);
1369 ret
= write_cache(&page_data
, buf_out
, size_out
, false);
1371 error_setg(errp
, "dump: failed to write page data");
1375 } else if ((s
->flag_compress
& DUMP_DH_COMPRESSED_LZO
) &&
1376 (lzo1x_1_compress(buf
, s
->dump_info
.page_size
, buf_out
,
1377 (lzo_uint
*)&size_out
, wrkmem
) == LZO_E_OK
) &&
1378 (size_out
< s
->dump_info
.page_size
)) {
1379 pd
.flags
= cpu_to_dump32(s
, DUMP_DH_COMPRESSED_LZO
);
1380 pd
.size
= cpu_to_dump32(s
, size_out
);
1382 ret
= write_cache(&page_data
, buf_out
, size_out
, false);
1384 error_setg(errp
, "dump: failed to write page data");
1388 #ifdef CONFIG_SNAPPY
1389 } else if ((s
->flag_compress
& DUMP_DH_COMPRESSED_SNAPPY
) &&
1390 (snappy_compress((char *)buf
, s
->dump_info
.page_size
,
1391 (char *)buf_out
, &size_out
) == SNAPPY_OK
) &&
1392 (size_out
< s
->dump_info
.page_size
)) {
1393 pd
.flags
= cpu_to_dump32(s
, DUMP_DH_COMPRESSED_SNAPPY
);
1394 pd
.size
= cpu_to_dump32(s
, size_out
);
1396 ret
= write_cache(&page_data
, buf_out
, size_out
, false);
1398 error_setg(errp
, "dump: failed to write page data");
1404 * fall back to save in plaintext, size_out should be
1405 * assigned the target's page size
1407 pd
.flags
= cpu_to_dump32(s
, 0);
1408 size_out
= s
->dump_info
.page_size
;
1409 pd
.size
= cpu_to_dump32(s
, size_out
);
1411 ret
= write_cache(&page_data
, buf
,
1412 s
->dump_info
.page_size
, false);
1414 error_setg(errp
, "dump: failed to write page data");
1419 /* get and write page desc here */
1420 pd
.page_flags
= cpu_to_dump64(s
, 0);
1421 pd
.offset
= cpu_to_dump64(s
, offset_data
);
1422 offset_data
+= size_out
;
1424 ret
= write_cache(&page_desc
, &pd
, sizeof(PageDescriptor
), false);
1426 error_setg(errp
, "dump: failed to write page desc");
1430 s
->written_size
+= s
->dump_info
.page_size
;
1433 ret
= write_cache(&page_desc
, NULL
, 0, true);
1435 error_setg(errp
, "dump: failed to sync cache for page_desc");
1438 ret
= write_cache(&page_data
, NULL
, 0, true);
1440 error_setg(errp
, "dump: failed to sync cache for page_data");
1445 free_data_cache(&page_desc
);
1446 free_data_cache(&page_data
);
1455 static void create_kdump_vmcore(DumpState
*s
, Error
**errp
)
1461 * the kdump-compressed format is:
1463 * +------------------------------------------+ 0x0
1464 * | main header (struct disk_dump_header) |
1465 * |------------------------------------------+ block 1
1466 * | sub header (struct kdump_sub_header) |
1467 * |------------------------------------------+ block 2
1468 * | 1st-dump_bitmap |
1469 * |------------------------------------------+ block 2 + X blocks
1470 * | 2nd-dump_bitmap | (aligned by block)
1471 * |------------------------------------------+ block 2 + 2 * X blocks
1472 * | page desc for pfn 0 (struct page_desc) | (aligned by block)
1473 * | page desc for pfn 1 (struct page_desc) |
1475 * |------------------------------------------| (not aligned by block)
1476 * | page data (pfn 0) |
1477 * | page data (pfn 1) |
1479 * +------------------------------------------+
1482 ret
= write_start_flat_header(s
->fd
);
1484 error_setg(errp
, "dump: failed to write start flat header");
1488 write_dump_header(s
, errp
);
1493 write_dump_bitmap(s
, errp
);
1498 write_dump_pages(s
, errp
);
1503 ret
= write_end_flat_header(s
->fd
);
1505 error_setg(errp
, "dump: failed to write end flat header");
1510 static int validate_start_block(DumpState
*s
)
1512 GuestPhysBlock
*block
;
1514 if (!dump_has_filter(s
)) {
1518 QTAILQ_FOREACH(block
, &s
->guest_phys_blocks
.head
, next
) {
1519 /* This block is out of the range */
1520 if (block
->target_start
>= s
->filter_area_begin
+ s
->filter_area_length
||
1521 block
->target_end
<= s
->filter_area_begin
) {
1530 static void get_max_mapnr(DumpState
*s
)
1532 GuestPhysBlock
*last_block
;
1534 last_block
= QTAILQ_LAST(&s
->guest_phys_blocks
.head
);
1535 s
->max_mapnr
= dump_paddr_to_pfn(s
, last_block
->target_end
);
1538 static DumpState dump_state_global
= { .status
= DUMP_STATUS_NONE
};
1540 static void dump_state_prepare(DumpState
*s
)
1542 /* zero the struct, setting status to active */
1543 *s
= (DumpState
) { .status
= DUMP_STATUS_ACTIVE
};
1546 bool qemu_system_dump_in_progress(void)
1548 DumpState
*state
= &dump_state_global
;
1549 return (qatomic_read(&state
->status
) == DUMP_STATUS_ACTIVE
);
1553 * calculate total size of memory to be dumped (taking filter into
1556 static int64_t dump_calculate_size(DumpState
*s
)
1558 GuestPhysBlock
*block
;
1561 QTAILQ_FOREACH(block
, &s
->guest_phys_blocks
.head
, next
) {
1562 total
+= dump_filtered_memblock_size(block
,
1563 s
->filter_area_begin
,
1564 s
->filter_area_length
);
1570 static void vmcoreinfo_update_phys_base(DumpState
*s
)
1572 uint64_t size
, note_head_size
, name_size
, phys_base
;
1577 if (!note_name_equal(s
, s
->guest_note
, "VMCOREINFO")) {
1581 get_note_sizes(s
, s
->guest_note
, ¬e_head_size
, &name_size
, &size
);
1582 note_head_size
= ROUND_UP(note_head_size
, 4);
1584 vmci
= s
->guest_note
+ note_head_size
+ ROUND_UP(name_size
, 4);
1585 *(vmci
+ size
) = '\0';
1587 lines
= g_strsplit((char *)vmci
, "\n", -1);
1588 for (i
= 0; lines
[i
]; i
++) {
1589 const char *prefix
= NULL
;
1591 if (s
->dump_info
.d_machine
== EM_X86_64
) {
1592 prefix
= "NUMBER(phys_base)=";
1593 } else if (s
->dump_info
.d_machine
== EM_AARCH64
) {
1594 prefix
= "NUMBER(PHYS_OFFSET)=";
1597 if (prefix
&& g_str_has_prefix(lines
[i
], prefix
)) {
1598 if (qemu_strtou64(lines
[i
] + strlen(prefix
), NULL
, 16,
1600 warn_report("Failed to read %s", prefix
);
1602 s
->dump_info
.phys_base
= phys_base
;
1611 static void dump_init(DumpState
*s
, int fd
, bool has_format
,
1612 DumpGuestMemoryFormat format
, bool paging
, bool has_filter
,
1613 int64_t begin
, int64_t length
, Error
**errp
)
1616 VMCoreInfoState
*vmci
= vmcoreinfo_find();
1621 s
->has_format
= has_format
;
1623 s
->written_size
= 0;
1625 /* kdump-compressed is conflict with paging and filter */
1626 if (has_format
&& format
!= DUMP_GUEST_MEMORY_FORMAT_ELF
) {
1627 assert(!paging
&& !has_filter
);
1630 if (runstate_is_running()) {
1631 vm_stop(RUN_STATE_SAVE_VM
);
1637 /* If we use KVM, we should synchronize the registers before we get dump
1638 * info or physmap info.
1640 cpu_synchronize_all_states();
1647 if (has_filter
&& !length
) {
1648 error_setg(errp
, QERR_INVALID_PARAMETER
, "length");
1651 s
->filter_area_begin
= begin
;
1652 s
->filter_area_length
= length
;
1654 memory_mapping_list_init(&s
->list
);
1656 guest_phys_blocks_init(&s
->guest_phys_blocks
);
1657 guest_phys_blocks_append(&s
->guest_phys_blocks
);
1658 s
->total_size
= dump_calculate_size(s
);
1659 #ifdef DEBUG_DUMP_GUEST_MEMORY
1660 fprintf(stderr
, "DUMP: total memory to dump: %lu\n", s
->total_size
);
1663 /* it does not make sense to dump non-existent memory */
1664 if (!s
->total_size
) {
1665 error_setg(errp
, "dump: no guest memory to dump");
1669 /* Is the filter filtering everything? */
1670 if (validate_start_block(s
) == -1) {
1671 error_setg(errp
, QERR_INVALID_PARAMETER
, "begin");
1675 /* get dump info: endian, class and architecture.
1676 * If the target architecture is not supported, cpu_get_dump_info() will
1679 ret
= cpu_get_dump_info(&s
->dump_info
, &s
->guest_phys_blocks
);
1681 error_setg(errp
, QERR_UNSUPPORTED
);
1685 if (!s
->dump_info
.page_size
) {
1686 s
->dump_info
.page_size
= TARGET_PAGE_SIZE
;
1689 s
->note_size
= cpu_get_note_size(s
->dump_info
.d_class
,
1690 s
->dump_info
.d_machine
, nr_cpus
);
1691 if (s
->note_size
< 0) {
1692 error_setg(errp
, QERR_UNSUPPORTED
);
1697 * The goal of this block is to (a) update the previously guessed
1698 * phys_base, (b) copy the guest note out of the guest.
1699 * Failure to do so is not fatal for dumping.
1702 uint64_t addr
, note_head_size
, name_size
, desc_size
;
1706 note_head_size
= dump_is_64bit(s
) ?
1707 sizeof(Elf64_Nhdr
) : sizeof(Elf32_Nhdr
);
1709 format
= le16_to_cpu(vmci
->vmcoreinfo
.guest_format
);
1710 size
= le32_to_cpu(vmci
->vmcoreinfo
.size
);
1711 addr
= le64_to_cpu(vmci
->vmcoreinfo
.paddr
);
1712 if (!vmci
->has_vmcoreinfo
) {
1713 warn_report("guest note is not present");
1714 } else if (size
< note_head_size
|| size
> MAX_GUEST_NOTE_SIZE
) {
1715 warn_report("guest note size is invalid: %" PRIu32
, size
);
1716 } else if (format
!= FW_CFG_VMCOREINFO_FORMAT_ELF
) {
1717 warn_report("guest note format is unsupported: %" PRIu16
, format
);
1719 s
->guest_note
= g_malloc(size
+ 1); /* +1 for adding \0 */
1720 cpu_physical_memory_read(addr
, s
->guest_note
, size
);
1722 get_note_sizes(s
, s
->guest_note
, NULL
, &name_size
, &desc_size
);
1723 s
->guest_note_size
= ELF_NOTE_SIZE(note_head_size
, name_size
,
1725 if (name_size
> MAX_GUEST_NOTE_SIZE
||
1726 desc_size
> MAX_GUEST_NOTE_SIZE
||
1727 s
->guest_note_size
> size
) {
1728 warn_report("Invalid guest note header");
1729 g_free(s
->guest_note
);
1730 s
->guest_note
= NULL
;
1732 vmcoreinfo_update_phys_base(s
);
1733 s
->note_size
+= s
->guest_note_size
;
1738 /* get memory mapping */
1740 qemu_get_guest_memory_mapping(&s
->list
, &s
->guest_phys_blocks
, errp
);
1745 qemu_get_guest_simple_memory_mapping(&s
->list
, &s
->guest_phys_blocks
);
1748 s
->nr_cpus
= nr_cpus
;
1753 tmp
= DIV_ROUND_UP(DIV_ROUND_UP(s
->max_mapnr
, CHAR_BIT
),
1754 s
->dump_info
.page_size
);
1755 s
->len_dump_bitmap
= tmp
* s
->dump_info
.page_size
;
1757 /* init for kdump-compressed format */
1758 if (has_format
&& format
!= DUMP_GUEST_MEMORY_FORMAT_ELF
) {
1760 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB
:
1761 s
->flag_compress
= DUMP_DH_COMPRESSED_ZLIB
;
1764 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO
:
1766 if (lzo_init() != LZO_E_OK
) {
1767 error_setg(errp
, "failed to initialize the LZO library");
1771 s
->flag_compress
= DUMP_DH_COMPRESSED_LZO
;
1774 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY
:
1775 s
->flag_compress
= DUMP_DH_COMPRESSED_SNAPPY
;
1779 s
->flag_compress
= 0;
1785 if (dump_has_filter(s
)) {
1786 memory_mapping_filter(&s
->list
, s
->filter_area_begin
, s
->filter_area_length
);
1790 * calculate phdr_num
1792 * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
1794 s
->phdr_num
= 1; /* PT_NOTE */
1795 if (s
->list
.num
< UINT16_MAX
- 2) {
1797 s
->phdr_num
+= s
->list
.num
;
1799 /* sh_info of section 0 holds the real number of phdrs */
1802 /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
1803 if (s
->list
.num
<= UINT32_MAX
- 1) {
1804 s
->phdr_num
+= s
->list
.num
;
1806 s
->phdr_num
= UINT32_MAX
;
1810 if (dump_is_64bit(s
)) {
1811 s
->phdr_offset
= sizeof(Elf64_Ehdr
);
1812 s
->shdr_offset
= s
->phdr_offset
+ sizeof(Elf64_Phdr
) * s
->phdr_num
;
1813 s
->note_offset
= s
->shdr_offset
+ sizeof(Elf64_Shdr
) * s
->shdr_num
;
1814 s
->memory_offset
= s
->note_offset
+ s
->note_size
;
1817 s
->phdr_offset
= sizeof(Elf32_Ehdr
);
1818 s
->shdr_offset
= s
->phdr_offset
+ sizeof(Elf32_Phdr
) * s
->phdr_num
;
1819 s
->note_offset
= s
->shdr_offset
+ sizeof(Elf32_Shdr
) * s
->shdr_num
;
1820 s
->memory_offset
= s
->note_offset
+ s
->note_size
;
1829 /* this operation might be time consuming. */
1830 static void dump_process(DumpState
*s
, Error
**errp
)
1833 DumpQueryResult
*result
= NULL
;
1835 if (s
->has_format
&& s
->format
== DUMP_GUEST_MEMORY_FORMAT_WIN_DMP
) {
1836 #ifdef TARGET_X86_64
1837 create_win_dump(s
, errp
);
1839 } else if (s
->has_format
&& s
->format
!= DUMP_GUEST_MEMORY_FORMAT_ELF
) {
1840 create_kdump_vmcore(s
, errp
);
1842 create_vmcore(s
, errp
);
1845 /* make sure status is written after written_size updates */
1847 qatomic_set(&s
->status
,
1848 (*errp
? DUMP_STATUS_FAILED
: DUMP_STATUS_COMPLETED
));
1850 /* send DUMP_COMPLETED message (unconditionally) */
1851 result
= qmp_query_dump(NULL
);
1852 /* should never fail */
1854 qapi_event_send_dump_completed(result
, !!*errp
, (*errp
?
1855 error_get_pretty(*errp
) : NULL
));
1856 qapi_free_DumpQueryResult(result
);
1861 static void *dump_thread(void *data
)
1863 DumpState
*s
= (DumpState
*)data
;
1864 dump_process(s
, NULL
);
1868 DumpQueryResult
*qmp_query_dump(Error
**errp
)
1870 DumpQueryResult
*result
= g_new(DumpQueryResult
, 1);
1871 DumpState
*state
= &dump_state_global
;
1872 result
->status
= qatomic_read(&state
->status
);
1873 /* make sure we are reading status and written_size in order */
1875 result
->completed
= state
->written_size
;
1876 result
->total
= state
->total_size
;
1880 void qmp_dump_guest_memory(bool paging
, const char *file
,
1881 bool has_detach
, bool detach
,
1882 bool has_begin
, int64_t begin
, bool has_length
,
1883 int64_t length
, bool has_format
,
1884 DumpGuestMemoryFormat format
, Error
**errp
)
1890 bool detach_p
= false;
1892 if (runstate_check(RUN_STATE_INMIGRATE
)) {
1893 error_setg(errp
, "Dump not allowed during incoming migration.");
1897 /* if there is a dump in background, we should wait until the dump
1899 if (qemu_system_dump_in_progress()) {
1900 error_setg(errp
, "There is a dump in process, please wait.");
1905 * kdump-compressed format need the whole memory dumped, so paging or
1906 * filter is not supported here.
1908 if ((has_format
&& format
!= DUMP_GUEST_MEMORY_FORMAT_ELF
) &&
1909 (paging
|| has_begin
|| has_length
)) {
1910 error_setg(errp
, "kdump-compressed format doesn't support paging or "
1914 if (has_begin
&& !has_length
) {
1915 error_setg(errp
, QERR_MISSING_PARAMETER
, "length");
1918 if (!has_begin
&& has_length
) {
1919 error_setg(errp
, QERR_MISSING_PARAMETER
, "begin");
1926 /* check whether lzo/snappy is supported */
1928 if (has_format
&& format
== DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO
) {
1929 error_setg(errp
, "kdump-lzo is not available now");
1934 #ifndef CONFIG_SNAPPY
1935 if (has_format
&& format
== DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY
) {
1936 error_setg(errp
, "kdump-snappy is not available now");
1941 #ifndef TARGET_X86_64
1942 if (has_format
&& format
== DUMP_GUEST_MEMORY_FORMAT_WIN_DMP
) {
1943 error_setg(errp
, "Windows dump is only available for x86-64");
1949 if (strstart(file
, "fd:", &p
)) {
1950 fd
= monitor_get_fd(monitor_cur(), p
, errp
);
1957 if (strstart(file
, "file:", &p
)) {
1958 fd
= qemu_open_old(p
, O_WRONLY
| O_CREAT
| O_TRUNC
| O_BINARY
, S_IRUSR
);
1960 error_setg_file_open(errp
, errno
, p
);
1966 error_setg(errp
, QERR_INVALID_PARAMETER
, "protocol");
1970 if (!dump_migration_blocker
) {
1971 error_setg(&dump_migration_blocker
,
1972 "Live migration disabled: dump-guest-memory in progress");
1976 * Allows even for -only-migratable, but forbid migration during the
1977 * process of dump guest memory.
1979 if (migrate_add_blocker_internal(dump_migration_blocker
, errp
)) {
1980 /* Remember to release the fd before passing it over to dump state */
1985 s
= &dump_state_global
;
1986 dump_state_prepare(s
);
1988 dump_init(s
, fd
, has_format
, format
, paging
, has_begin
,
1989 begin
, length
, errp
);
1991 qatomic_set(&s
->status
, DUMP_STATUS_FAILED
);
1998 qemu_thread_create(&s
->dump_thread
, "dump_thread", dump_thread
,
1999 s
, QEMU_THREAD_DETACHED
);
2002 dump_process(s
, errp
);
2006 DumpGuestMemoryCapability
*qmp_query_dump_guest_memory_capability(Error
**errp
)
2008 DumpGuestMemoryCapability
*cap
=
2009 g_new0(DumpGuestMemoryCapability
, 1);
2010 DumpGuestMemoryFormatList
**tail
= &cap
->formats
;
2012 /* elf is always available */
2013 QAPI_LIST_APPEND(tail
, DUMP_GUEST_MEMORY_FORMAT_ELF
);
2015 /* kdump-zlib is always available */
2016 QAPI_LIST_APPEND(tail
, DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB
);
2018 /* add new item if kdump-lzo is available */
2020 QAPI_LIST_APPEND(tail
, DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO
);
2023 /* add new item if kdump-snappy is available */
2024 #ifdef CONFIG_SNAPPY
2025 QAPI_LIST_APPEND(tail
, DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY
);
2028 /* Windows dump is available only if target is x86_64 */
2029 #ifdef TARGET_X86_64
2030 QAPI_LIST_APPEND(tail
, DUMP_GUEST_MEMORY_FORMAT_WIN_DMP
);