4 * Copyright Fujitsu, Corp. 2011, 2012
7 * Wen Congyang <wency@cn.fujitsu.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
17 #include "qemu/bswap.h"
18 #include "exec/target_page.h"
19 #include "monitor/monitor.h"
20 #include "sysemu/dump.h"
21 #include "sysemu/runstate.h"
22 #include "sysemu/cpus.h"
23 #include "qapi/error.h"
24 #include "qapi/qapi-commands-dump.h"
25 #include "qapi/qapi-events-dump.h"
26 #include "qapi/qmp/qerror.h"
27 #include "qemu/error-report.h"
28 #include "qemu/main-loop.h"
29 #include "hw/misc/vmcoreinfo.h"
30 #include "migration/blocker.h"
31 #include "hw/core/cpu.h"
36 #include <lzo/lzo1x.h>
41 #ifndef ELF_MACHINE_UNAME
42 #define ELF_MACHINE_UNAME "Unknown"
45 #define MAX_GUEST_NOTE_SIZE (1 << 20) /* 1MB should be enough */
47 static Error
*dump_migration_blocker
;
49 #define ELF_NOTE_SIZE(hdr_size, name_size, desc_size) \
50 ((DIV_ROUND_UP((hdr_size), 4) + \
51 DIV_ROUND_UP((name_size), 4) + \
52 DIV_ROUND_UP((desc_size), 4)) * 4)
54 static inline bool dump_is_64bit(DumpState
*s
)
56 return s
->dump_info
.d_class
== ELFCLASS64
;
59 static inline bool dump_has_filter(DumpState
*s
)
61 return s
->filter_area_length
> 0;
64 uint16_t cpu_to_dump16(DumpState
*s
, uint16_t val
)
66 if (s
->dump_info
.d_endian
== ELFDATA2LSB
) {
67 val
= cpu_to_le16(val
);
69 val
= cpu_to_be16(val
);
75 uint32_t cpu_to_dump32(DumpState
*s
, uint32_t val
)
77 if (s
->dump_info
.d_endian
== ELFDATA2LSB
) {
78 val
= cpu_to_le32(val
);
80 val
= cpu_to_be32(val
);
86 uint64_t cpu_to_dump64(DumpState
*s
, uint64_t val
)
88 if (s
->dump_info
.d_endian
== ELFDATA2LSB
) {
89 val
= cpu_to_le64(val
);
91 val
= cpu_to_be64(val
);
97 static int dump_cleanup(DumpState
*s
)
99 if (s
->dump_info
.arch_cleanup_fn
) {
100 s
->dump_info
.arch_cleanup_fn(s
);
103 guest_phys_blocks_free(&s
->guest_phys_blocks
);
104 memory_mapping_list_free(&s
->list
);
106 g_free(s
->guest_note
);
107 g_clear_pointer(&s
->string_table_buf
, g_array_unref
);
108 s
->guest_note
= NULL
;
111 qemu_mutex_lock_iothread();
115 qemu_mutex_unlock_iothread();
118 migrate_del_blocker(&dump_migration_blocker
);
123 static int fd_write_vmcore(const void *buf
, size_t size
, void *opaque
)
125 DumpState
*s
= opaque
;
128 written_size
= qemu_write_full(s
->fd
, buf
, size
);
129 if (written_size
!= size
) {
136 static void prepare_elf64_header(DumpState
*s
, Elf64_Ehdr
*elf_header
)
139 * phnum in the elf header is 16 bit, if we have more segments we
140 * set phnum to PN_XNUM and write the real number of segments to a
143 uint16_t phnum
= MIN(s
->phdr_num
, PN_XNUM
);
145 memset(elf_header
, 0, sizeof(Elf64_Ehdr
));
146 memcpy(elf_header
, ELFMAG
, SELFMAG
);
147 elf_header
->e_ident
[EI_CLASS
] = ELFCLASS64
;
148 elf_header
->e_ident
[EI_DATA
] = s
->dump_info
.d_endian
;
149 elf_header
->e_ident
[EI_VERSION
] = EV_CURRENT
;
150 elf_header
->e_type
= cpu_to_dump16(s
, ET_CORE
);
151 elf_header
->e_machine
= cpu_to_dump16(s
, s
->dump_info
.d_machine
);
152 elf_header
->e_version
= cpu_to_dump32(s
, EV_CURRENT
);
153 elf_header
->e_ehsize
= cpu_to_dump16(s
, sizeof(elf_header
));
154 elf_header
->e_phoff
= cpu_to_dump64(s
, s
->phdr_offset
);
155 elf_header
->e_phentsize
= cpu_to_dump16(s
, sizeof(Elf64_Phdr
));
156 elf_header
->e_phnum
= cpu_to_dump16(s
, phnum
);
157 elf_header
->e_shoff
= cpu_to_dump64(s
, s
->shdr_offset
);
158 elf_header
->e_shentsize
= cpu_to_dump16(s
, sizeof(Elf64_Shdr
));
159 elf_header
->e_shnum
= cpu_to_dump16(s
, s
->shdr_num
);
160 elf_header
->e_shstrndx
= cpu_to_dump16(s
, s
->shdr_num
- 1);
163 static void prepare_elf32_header(DumpState
*s
, Elf32_Ehdr
*elf_header
)
166 * phnum in the elf header is 16 bit, if we have more segments we
167 * set phnum to PN_XNUM and write the real number of segments to a
170 uint16_t phnum
= MIN(s
->phdr_num
, PN_XNUM
);
172 memset(elf_header
, 0, sizeof(Elf32_Ehdr
));
173 memcpy(elf_header
, ELFMAG
, SELFMAG
);
174 elf_header
->e_ident
[EI_CLASS
] = ELFCLASS32
;
175 elf_header
->e_ident
[EI_DATA
] = s
->dump_info
.d_endian
;
176 elf_header
->e_ident
[EI_VERSION
] = EV_CURRENT
;
177 elf_header
->e_type
= cpu_to_dump16(s
, ET_CORE
);
178 elf_header
->e_machine
= cpu_to_dump16(s
, s
->dump_info
.d_machine
);
179 elf_header
->e_version
= cpu_to_dump32(s
, EV_CURRENT
);
180 elf_header
->e_ehsize
= cpu_to_dump16(s
, sizeof(elf_header
));
181 elf_header
->e_phoff
= cpu_to_dump32(s
, s
->phdr_offset
);
182 elf_header
->e_phentsize
= cpu_to_dump16(s
, sizeof(Elf32_Phdr
));
183 elf_header
->e_phnum
= cpu_to_dump16(s
, phnum
);
184 elf_header
->e_shoff
= cpu_to_dump32(s
, s
->shdr_offset
);
185 elf_header
->e_shentsize
= cpu_to_dump16(s
, sizeof(Elf32_Shdr
));
186 elf_header
->e_shnum
= cpu_to_dump16(s
, s
->shdr_num
);
187 elf_header
->e_shstrndx
= cpu_to_dump16(s
, s
->shdr_num
- 1);
190 static void write_elf_header(DumpState
*s
, Error
**errp
)
192 Elf32_Ehdr elf32_header
;
193 Elf64_Ehdr elf64_header
;
198 /* The NULL header and the shstrtab are always defined */
199 assert(s
->shdr_num
>= 2);
200 if (dump_is_64bit(s
)) {
201 prepare_elf64_header(s
, &elf64_header
);
202 header_size
= sizeof(elf64_header
);
203 header_ptr
= &elf64_header
;
205 prepare_elf32_header(s
, &elf32_header
);
206 header_size
= sizeof(elf32_header
);
207 header_ptr
= &elf32_header
;
210 ret
= fd_write_vmcore(header_ptr
, header_size
, s
);
212 error_setg_errno(errp
, -ret
, "dump: failed to write elf header");
216 static void write_elf64_load(DumpState
*s
, MemoryMapping
*memory_mapping
,
217 int phdr_index
, hwaddr offset
,
218 hwaddr filesz
, Error
**errp
)
223 memset(&phdr
, 0, sizeof(Elf64_Phdr
));
224 phdr
.p_type
= cpu_to_dump32(s
, PT_LOAD
);
225 phdr
.p_offset
= cpu_to_dump64(s
, offset
);
226 phdr
.p_paddr
= cpu_to_dump64(s
, memory_mapping
->phys_addr
);
227 phdr
.p_filesz
= cpu_to_dump64(s
, filesz
);
228 phdr
.p_memsz
= cpu_to_dump64(s
, memory_mapping
->length
);
229 phdr
.p_vaddr
= cpu_to_dump64(s
, memory_mapping
->virt_addr
) ?: phdr
.p_paddr
;
231 assert(memory_mapping
->length
>= filesz
);
233 ret
= fd_write_vmcore(&phdr
, sizeof(Elf64_Phdr
), s
);
235 error_setg_errno(errp
, -ret
,
236 "dump: failed to write program header table");
240 static void write_elf32_load(DumpState
*s
, MemoryMapping
*memory_mapping
,
241 int phdr_index
, hwaddr offset
,
242 hwaddr filesz
, Error
**errp
)
247 memset(&phdr
, 0, sizeof(Elf32_Phdr
));
248 phdr
.p_type
= cpu_to_dump32(s
, PT_LOAD
);
249 phdr
.p_offset
= cpu_to_dump32(s
, offset
);
250 phdr
.p_paddr
= cpu_to_dump32(s
, memory_mapping
->phys_addr
);
251 phdr
.p_filesz
= cpu_to_dump32(s
, filesz
);
252 phdr
.p_memsz
= cpu_to_dump32(s
, memory_mapping
->length
);
254 cpu_to_dump32(s
, memory_mapping
->virt_addr
) ?: phdr
.p_paddr
;
256 assert(memory_mapping
->length
>= filesz
);
258 ret
= fd_write_vmcore(&phdr
, sizeof(Elf32_Phdr
), s
);
260 error_setg_errno(errp
, -ret
,
261 "dump: failed to write program header table");
265 static void prepare_elf64_phdr_note(DumpState
*s
, Elf64_Phdr
*phdr
)
267 memset(phdr
, 0, sizeof(*phdr
));
268 phdr
->p_type
= cpu_to_dump32(s
, PT_NOTE
);
269 phdr
->p_offset
= cpu_to_dump64(s
, s
->note_offset
);
271 phdr
->p_filesz
= cpu_to_dump64(s
, s
->note_size
);
272 phdr
->p_memsz
= cpu_to_dump64(s
, s
->note_size
);
276 static inline int cpu_index(CPUState
*cpu
)
278 return cpu
->cpu_index
+ 1;
281 static void write_guest_note(WriteCoreDumpFunction f
, DumpState
*s
,
287 ret
= f(s
->guest_note
, s
->guest_note_size
, s
);
289 error_setg(errp
, "dump: failed to write guest note");
294 static void write_elf64_notes(WriteCoreDumpFunction f
, DumpState
*s
,
303 ret
= cpu_write_elf64_note(f
, cpu
, id
, s
);
305 error_setg(errp
, "dump: failed to write elf notes");
311 ret
= cpu_write_elf64_qemunote(f
, cpu
, s
);
313 error_setg(errp
, "dump: failed to write CPU status");
318 write_guest_note(f
, s
, errp
);
321 static void prepare_elf32_phdr_note(DumpState
*s
, Elf32_Phdr
*phdr
)
323 memset(phdr
, 0, sizeof(*phdr
));
324 phdr
->p_type
= cpu_to_dump32(s
, PT_NOTE
);
325 phdr
->p_offset
= cpu_to_dump32(s
, s
->note_offset
);
327 phdr
->p_filesz
= cpu_to_dump32(s
, s
->note_size
);
328 phdr
->p_memsz
= cpu_to_dump32(s
, s
->note_size
);
332 static void write_elf32_notes(WriteCoreDumpFunction f
, DumpState
*s
,
341 ret
= cpu_write_elf32_note(f
, cpu
, id
, s
);
343 error_setg(errp
, "dump: failed to write elf notes");
349 ret
= cpu_write_elf32_qemunote(f
, cpu
, s
);
351 error_setg(errp
, "dump: failed to write CPU status");
356 write_guest_note(f
, s
, errp
);
359 static void write_elf_phdr_note(DumpState
*s
, Error
**errp
)
367 if (dump_is_64bit(s
)) {
368 prepare_elf64_phdr_note(s
, &phdr64
);
369 size
= sizeof(phdr64
);
372 prepare_elf32_phdr_note(s
, &phdr32
);
373 size
= sizeof(phdr32
);
377 ret
= fd_write_vmcore(phdr
, size
, s
);
379 error_setg_errno(errp
, -ret
,
380 "dump: failed to write program header table");
384 static void prepare_elf_section_hdr_zero(DumpState
*s
)
386 if (dump_is_64bit(s
)) {
387 Elf64_Shdr
*shdr64
= s
->elf_section_hdrs
;
389 shdr64
->sh_info
= cpu_to_dump32(s
, s
->phdr_num
);
391 Elf32_Shdr
*shdr32
= s
->elf_section_hdrs
;
393 shdr32
->sh_info
= cpu_to_dump32(s
, s
->phdr_num
);
397 static void prepare_elf_section_hdr_string(DumpState
*s
, void *buff
)
399 uint64_t index
= s
->string_table_buf
->len
;
400 const char strtab
[] = ".shstrtab";
401 Elf32_Shdr shdr32
= {};
402 Elf64_Shdr shdr64
= {};
406 g_array_append_vals(s
->string_table_buf
, strtab
, sizeof(strtab
));
407 if (dump_is_64bit(s
)) {
408 shdr_size
= sizeof(Elf64_Shdr
);
409 shdr64
.sh_type
= SHT_STRTAB
;
410 shdr64
.sh_offset
= s
->section_offset
+ s
->elf_section_data_size
;
411 shdr64
.sh_name
= index
;
412 shdr64
.sh_size
= s
->string_table_buf
->len
;
415 shdr_size
= sizeof(Elf32_Shdr
);
416 shdr32
.sh_type
= SHT_STRTAB
;
417 shdr32
.sh_offset
= s
->section_offset
+ s
->elf_section_data_size
;
418 shdr32
.sh_name
= index
;
419 shdr32
.sh_size
= s
->string_table_buf
->len
;
422 memcpy(buff
, shdr
, shdr_size
);
425 static bool prepare_elf_section_hdrs(DumpState
*s
, Error
**errp
)
427 size_t len
, sizeof_shdr
;
433 * - Arch section hdrs
436 sizeof_shdr
= dump_is_64bit(s
) ? sizeof(Elf64_Shdr
) : sizeof(Elf32_Shdr
);
437 len
= sizeof_shdr
* s
->shdr_num
;
438 s
->elf_section_hdrs
= g_malloc0(len
);
439 buff_hdr
= s
->elf_section_hdrs
;
442 * The first section header is ALWAYS a special initial section
445 * The header should be 0 with one exception being that if
446 * phdr_num is PN_XNUM then the sh_info field contains the real
447 * number of segment entries.
449 * As we zero allocate the buffer we will only need to modify
450 * sh_info for the PN_XNUM case.
452 if (s
->phdr_num
>= PN_XNUM
) {
453 prepare_elf_section_hdr_zero(s
);
455 buff_hdr
+= sizeof_shdr
;
457 /* Add architecture defined section headers */
458 if (s
->dump_info
.arch_sections_write_hdr_fn
459 && s
->shdr_num
> 2) {
460 buff_hdr
+= s
->dump_info
.arch_sections_write_hdr_fn(s
, buff_hdr
);
462 if (s
->shdr_num
>= SHN_LORESERVE
) {
463 error_setg_errno(errp
, EINVAL
,
464 "dump: too many architecture defined sections");
470 * String table is the last section since strings are added via
471 * arch_sections_write_hdr().
473 prepare_elf_section_hdr_string(s
, buff_hdr
);
477 static void write_elf_section_headers(DumpState
*s
, Error
**errp
)
479 size_t sizeof_shdr
= dump_is_64bit(s
) ? sizeof(Elf64_Shdr
) : sizeof(Elf32_Shdr
);
482 if (!prepare_elf_section_hdrs(s
, errp
)) {
486 ret
= fd_write_vmcore(s
->elf_section_hdrs
, s
->shdr_num
* sizeof_shdr
, s
);
488 error_setg_errno(errp
, -ret
, "dump: failed to write section headers");
491 g_free(s
->elf_section_hdrs
);
494 static void write_elf_sections(DumpState
*s
, Error
**errp
)
498 if (s
->elf_section_data_size
) {
499 /* Write architecture section data */
500 ret
= fd_write_vmcore(s
->elf_section_data
,
501 s
->elf_section_data_size
, s
);
503 error_setg_errno(errp
, -ret
,
504 "dump: failed to write architecture section data");
509 /* Write string table */
510 ret
= fd_write_vmcore(s
->string_table_buf
->data
,
511 s
->string_table_buf
->len
, s
);
513 error_setg_errno(errp
, -ret
, "dump: failed to write string table data");
517 static void write_data(DumpState
*s
, void *buf
, int length
, Error
**errp
)
521 ret
= fd_write_vmcore(buf
, length
, s
);
523 error_setg_errno(errp
, -ret
, "dump: failed to save memory");
525 s
->written_size
+= length
;
529 /* write the memory to vmcore. 1 page per I/O. */
530 static void write_memory(DumpState
*s
, GuestPhysBlock
*block
, ram_addr_t start
,
531 int64_t size
, Error
**errp
)
536 for (i
= 0; i
< size
/ s
->dump_info
.page_size
; i
++) {
537 write_data(s
, block
->host_addr
+ start
+ i
* s
->dump_info
.page_size
,
538 s
->dump_info
.page_size
, errp
);
544 if ((size
% s
->dump_info
.page_size
) != 0) {
545 write_data(s
, block
->host_addr
+ start
+ i
* s
->dump_info
.page_size
,
546 size
% s
->dump_info
.page_size
, errp
);
553 /* get the memory's offset and size in the vmcore */
554 static void get_offset_range(hwaddr phys_addr
,
555 ram_addr_t mapping_length
,
560 GuestPhysBlock
*block
;
561 hwaddr offset
= s
->memory_offset
;
562 int64_t size_in_block
, start
;
564 /* When the memory is not stored into vmcore, offset will be -1 */
568 if (dump_has_filter(s
)) {
569 if (phys_addr
< s
->filter_area_begin
||
570 phys_addr
>= s
->filter_area_begin
+ s
->filter_area_length
) {
575 QTAILQ_FOREACH(block
, &s
->guest_phys_blocks
.head
, next
) {
576 if (dump_has_filter(s
)) {
577 if (block
->target_start
>= s
->filter_area_begin
+ s
->filter_area_length
||
578 block
->target_end
<= s
->filter_area_begin
) {
579 /* This block is out of the range */
583 if (s
->filter_area_begin
<= block
->target_start
) {
584 start
= block
->target_start
;
586 start
= s
->filter_area_begin
;
589 size_in_block
= block
->target_end
- start
;
590 if (s
->filter_area_begin
+ s
->filter_area_length
< block
->target_end
) {
591 size_in_block
-= block
->target_end
- (s
->filter_area_begin
+ s
->filter_area_length
);
594 start
= block
->target_start
;
595 size_in_block
= block
->target_end
- block
->target_start
;
598 if (phys_addr
>= start
&& phys_addr
< start
+ size_in_block
) {
599 *p_offset
= phys_addr
- start
+ offset
;
601 /* The offset range mapped from the vmcore file must not spill over
602 * the GuestPhysBlock, clamp it. The rest of the mapping will be
603 * zero-filled in memory at load time; see
604 * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>.
606 *p_filesz
= phys_addr
+ mapping_length
<= start
+ size_in_block
?
608 size_in_block
- (phys_addr
- start
);
612 offset
+= size_in_block
;
616 static void write_elf_phdr_loads(DumpState
*s
, Error
**errp
)
619 hwaddr offset
, filesz
;
620 MemoryMapping
*memory_mapping
;
621 uint32_t phdr_index
= 1;
623 QTAILQ_FOREACH(memory_mapping
, &s
->list
.head
, next
) {
624 get_offset_range(memory_mapping
->phys_addr
,
625 memory_mapping
->length
,
626 s
, &offset
, &filesz
);
627 if (dump_is_64bit(s
)) {
628 write_elf64_load(s
, memory_mapping
, phdr_index
++, offset
,
631 write_elf32_load(s
, memory_mapping
, phdr_index
++, offset
,
639 if (phdr_index
>= s
->phdr_num
) {
645 static void write_elf_notes(DumpState
*s
, Error
**errp
)
647 if (dump_is_64bit(s
)) {
648 write_elf64_notes(fd_write_vmcore
, s
, errp
);
650 write_elf32_notes(fd_write_vmcore
, s
, errp
);
654 /* write elf header, PT_NOTE and elf note to vmcore. */
655 static void dump_begin(DumpState
*s
, Error
**errp
)
660 * the vmcore's format is:
679 * we only know where the memory is saved after we write elf note into
683 /* write elf header to vmcore */
684 write_elf_header(s
, errp
);
689 /* write section headers to vmcore */
690 write_elf_section_headers(s
, errp
);
695 /* write PT_NOTE to vmcore */
696 write_elf_phdr_note(s
, errp
);
701 /* write all PT_LOADs to vmcore */
702 write_elf_phdr_loads(s
, errp
);
707 /* write notes to vmcore */
708 write_elf_notes(s
, errp
);
711 int64_t dump_filtered_memblock_size(GuestPhysBlock
*block
,
712 int64_t filter_area_start
,
713 int64_t filter_area_length
)
715 int64_t size
, left
, right
;
717 /* No filter, return full size */
718 if (!filter_area_length
) {
719 return block
->target_end
- block
->target_start
;
722 /* calculate the overlapped region. */
723 left
= MAX(filter_area_start
, block
->target_start
);
724 right
= MIN(filter_area_start
+ filter_area_length
, block
->target_end
);
726 size
= size
> 0 ? size
: 0;
731 int64_t dump_filtered_memblock_start(GuestPhysBlock
*block
,
732 int64_t filter_area_start
,
733 int64_t filter_area_length
)
735 if (filter_area_length
) {
736 /* return -1 if the block is not within filter area */
737 if (block
->target_start
>= filter_area_start
+ filter_area_length
||
738 block
->target_end
<= filter_area_start
) {
742 if (filter_area_start
> block
->target_start
) {
743 return filter_area_start
- block
->target_start
;
750 /* write all memory to vmcore */
751 static void dump_iterate(DumpState
*s
, Error
**errp
)
754 GuestPhysBlock
*block
;
755 int64_t memblock_size
, memblock_start
;
757 QTAILQ_FOREACH(block
, &s
->guest_phys_blocks
.head
, next
) {
758 memblock_start
= dump_filtered_memblock_start(block
, s
->filter_area_begin
, s
->filter_area_length
);
759 if (memblock_start
== -1) {
763 memblock_size
= dump_filtered_memblock_size(block
, s
->filter_area_begin
, s
->filter_area_length
);
765 /* Write the memory to file */
766 write_memory(s
, block
, memblock_start
, memblock_size
, errp
);
773 static void dump_end(DumpState
*s
, Error
**errp
)
777 if (s
->elf_section_data_size
) {
778 s
->elf_section_data
= g_malloc0(s
->elf_section_data_size
);
781 /* Adds the architecture defined section data to s->elf_section_data */
782 if (s
->dump_info
.arch_sections_write_fn
&&
783 s
->elf_section_data_size
) {
784 rc
= s
->dump_info
.arch_sections_write_fn(s
, s
->elf_section_data
);
786 error_setg_errno(errp
, rc
,
787 "dump: failed to get arch section data");
788 g_free(s
->elf_section_data
);
793 /* write sections to vmcore */
794 write_elf_sections(s
, errp
);
797 static void create_vmcore(DumpState
*s
, Error
**errp
)
806 /* Iterate over memory and dump it to file */
807 dump_iterate(s
, errp
);
812 /* Write the section data */
816 static int write_start_flat_header(DumpState
*s
)
818 MakedumpfileHeader
*mh
;
825 QEMU_BUILD_BUG_ON(sizeof *mh
> MAX_SIZE_MDF_HEADER
);
826 mh
= g_malloc0(MAX_SIZE_MDF_HEADER
);
828 memcpy(mh
->signature
, MAKEDUMPFILE_SIGNATURE
,
829 MIN(sizeof mh
->signature
, sizeof MAKEDUMPFILE_SIGNATURE
));
831 mh
->type
= cpu_to_be64(TYPE_FLAT_HEADER
);
832 mh
->version
= cpu_to_be64(VERSION_FLAT_HEADER
);
835 written_size
= qemu_write_full(s
->fd
, mh
, MAX_SIZE_MDF_HEADER
);
836 if (written_size
!= MAX_SIZE_MDF_HEADER
) {
844 static int write_end_flat_header(DumpState
*s
)
846 MakedumpfileDataHeader mdh
;
852 mdh
.offset
= END_FLAG_FLAT_HEADER
;
853 mdh
.buf_size
= END_FLAG_FLAT_HEADER
;
856 written_size
= qemu_write_full(s
->fd
, &mdh
, sizeof(mdh
));
857 if (written_size
!= sizeof(mdh
)) {
864 static int write_buffer(DumpState
*s
, off_t offset
, const void *buf
, size_t size
)
867 MakedumpfileDataHeader mdh
;
871 seek_loc
= lseek(s
->fd
, offset
, SEEK_SET
);
872 if (seek_loc
== (off_t
) -1) {
876 mdh
.offset
= cpu_to_be64(offset
);
877 mdh
.buf_size
= cpu_to_be64(size
);
879 written_size
= qemu_write_full(s
->fd
, &mdh
, sizeof(mdh
));
880 if (written_size
!= sizeof(mdh
)) {
885 written_size
= qemu_write_full(s
->fd
, buf
, size
);
886 if (written_size
!= size
) {
893 static int buf_write_note(const void *buf
, size_t size
, void *opaque
)
895 DumpState
*s
= opaque
;
897 /* note_buf is not enough */
898 if (s
->note_buf_offset
+ size
> s
->note_size
) {
902 memcpy(s
->note_buf
+ s
->note_buf_offset
, buf
, size
);
904 s
->note_buf_offset
+= size
;
910 * This function retrieves various sizes from an elf header.
912 * @note has to be a valid ELF note. The return sizes are unmodified
913 * (not padded or rounded up to be multiple of 4).
915 static void get_note_sizes(DumpState
*s
, const void *note
,
916 uint64_t *note_head_size
,
920 uint64_t note_head_sz
;
924 if (dump_is_64bit(s
)) {
925 const Elf64_Nhdr
*hdr
= note
;
926 note_head_sz
= sizeof(Elf64_Nhdr
);
927 name_sz
= cpu_to_dump64(s
, hdr
->n_namesz
);
928 desc_sz
= cpu_to_dump64(s
, hdr
->n_descsz
);
930 const Elf32_Nhdr
*hdr
= note
;
931 note_head_sz
= sizeof(Elf32_Nhdr
);
932 name_sz
= cpu_to_dump32(s
, hdr
->n_namesz
);
933 desc_sz
= cpu_to_dump32(s
, hdr
->n_descsz
);
936 if (note_head_size
) {
937 *note_head_size
= note_head_sz
;
940 *name_size
= name_sz
;
943 *desc_size
= desc_sz
;
947 static bool note_name_equal(DumpState
*s
,
948 const uint8_t *note
, const char *name
)
950 int len
= strlen(name
) + 1;
951 uint64_t head_size
, name_size
;
953 get_note_sizes(s
, note
, &head_size
, &name_size
, NULL
);
954 head_size
= ROUND_UP(head_size
, 4);
956 return name_size
== len
&& memcmp(note
+ head_size
, name
, len
) == 0;
959 /* write common header, sub header and elf note to vmcore */
960 static void create_header32(DumpState
*s
, Error
**errp
)
963 DiskDumpHeader32
*dh
= NULL
;
964 KdumpSubHeader32
*kh
= NULL
;
967 uint32_t sub_hdr_size
;
968 uint32_t bitmap_blocks
;
970 uint64_t offset_note
;
972 /* write common header, the version of kdump-compressed format is 6th */
973 size
= sizeof(DiskDumpHeader32
);
974 dh
= g_malloc0(size
);
976 memcpy(dh
->signature
, KDUMP_SIGNATURE
, SIG_LEN
);
977 dh
->header_version
= cpu_to_dump32(s
, 6);
978 block_size
= s
->dump_info
.page_size
;
979 dh
->block_size
= cpu_to_dump32(s
, block_size
);
980 sub_hdr_size
= sizeof(struct KdumpSubHeader32
) + s
->note_size
;
981 sub_hdr_size
= DIV_ROUND_UP(sub_hdr_size
, block_size
);
982 dh
->sub_hdr_size
= cpu_to_dump32(s
, sub_hdr_size
);
983 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
984 dh
->max_mapnr
= cpu_to_dump32(s
, MIN(s
->max_mapnr
, UINT_MAX
));
985 dh
->nr_cpus
= cpu_to_dump32(s
, s
->nr_cpus
);
986 bitmap_blocks
= DIV_ROUND_UP(s
->len_dump_bitmap
, block_size
) * 2;
987 dh
->bitmap_blocks
= cpu_to_dump32(s
, bitmap_blocks
);
988 strncpy(dh
->utsname
.machine
, ELF_MACHINE_UNAME
, sizeof(dh
->utsname
.machine
));
990 if (s
->flag_compress
& DUMP_DH_COMPRESSED_ZLIB
) {
991 status
|= DUMP_DH_COMPRESSED_ZLIB
;
994 if (s
->flag_compress
& DUMP_DH_COMPRESSED_LZO
) {
995 status
|= DUMP_DH_COMPRESSED_LZO
;
999 if (s
->flag_compress
& DUMP_DH_COMPRESSED_SNAPPY
) {
1000 status
|= DUMP_DH_COMPRESSED_SNAPPY
;
1003 dh
->status
= cpu_to_dump32(s
, status
);
1005 if (write_buffer(s
, 0, dh
, size
) < 0) {
1006 error_setg(errp
, "dump: failed to write disk dump header");
1010 /* write sub header */
1011 size
= sizeof(KdumpSubHeader32
);
1012 kh
= g_malloc0(size
);
1014 /* 64bit max_mapnr_64 */
1015 kh
->max_mapnr_64
= cpu_to_dump64(s
, s
->max_mapnr
);
1016 kh
->phys_base
= cpu_to_dump32(s
, s
->dump_info
.phys_base
);
1017 kh
->dump_level
= cpu_to_dump32(s
, DUMP_LEVEL
);
1019 offset_note
= DISKDUMP_HEADER_BLOCKS
* block_size
+ size
;
1020 if (s
->guest_note
&&
1021 note_name_equal(s
, s
->guest_note
, "VMCOREINFO")) {
1022 uint64_t hsize
, name_size
, size_vmcoreinfo_desc
, offset_vmcoreinfo
;
1024 get_note_sizes(s
, s
->guest_note
,
1025 &hsize
, &name_size
, &size_vmcoreinfo_desc
);
1026 offset_vmcoreinfo
= offset_note
+ s
->note_size
- s
->guest_note_size
+
1027 (DIV_ROUND_UP(hsize
, 4) + DIV_ROUND_UP(name_size
, 4)) * 4;
1028 kh
->offset_vmcoreinfo
= cpu_to_dump64(s
, offset_vmcoreinfo
);
1029 kh
->size_vmcoreinfo
= cpu_to_dump32(s
, size_vmcoreinfo_desc
);
1032 kh
->offset_note
= cpu_to_dump64(s
, offset_note
);
1033 kh
->note_size
= cpu_to_dump32(s
, s
->note_size
);
1035 if (write_buffer(s
, DISKDUMP_HEADER_BLOCKS
*
1036 block_size
, kh
, size
) < 0) {
1037 error_setg(errp
, "dump: failed to write kdump sub header");
1042 s
->note_buf
= g_malloc0(s
->note_size
);
1043 s
->note_buf_offset
= 0;
1045 /* use s->note_buf to store notes temporarily */
1046 write_elf32_notes(buf_write_note
, s
, errp
);
1050 if (write_buffer(s
, offset_note
, s
->note_buf
,
1051 s
->note_size
) < 0) {
1052 error_setg(errp
, "dump: failed to write notes");
1056 /* get offset of dump_bitmap */
1057 s
->offset_dump_bitmap
= (DISKDUMP_HEADER_BLOCKS
+ sub_hdr_size
) *
1060 /* get offset of page */
1061 s
->offset_page
= (DISKDUMP_HEADER_BLOCKS
+ sub_hdr_size
+ bitmap_blocks
) *
1067 g_free(s
->note_buf
);
1070 /* write common header, sub header and elf note to vmcore */
1071 static void create_header64(DumpState
*s
, Error
**errp
)
1074 DiskDumpHeader64
*dh
= NULL
;
1075 KdumpSubHeader64
*kh
= NULL
;
1077 uint32_t block_size
;
1078 uint32_t sub_hdr_size
;
1079 uint32_t bitmap_blocks
;
1080 uint32_t status
= 0;
1081 uint64_t offset_note
;
1083 /* write common header, the version of kdump-compressed format is 6th */
1084 size
= sizeof(DiskDumpHeader64
);
1085 dh
= g_malloc0(size
);
1087 memcpy(dh
->signature
, KDUMP_SIGNATURE
, SIG_LEN
);
1088 dh
->header_version
= cpu_to_dump32(s
, 6);
1089 block_size
= s
->dump_info
.page_size
;
1090 dh
->block_size
= cpu_to_dump32(s
, block_size
);
1091 sub_hdr_size
= sizeof(struct KdumpSubHeader64
) + s
->note_size
;
1092 sub_hdr_size
= DIV_ROUND_UP(sub_hdr_size
, block_size
);
1093 dh
->sub_hdr_size
= cpu_to_dump32(s
, sub_hdr_size
);
1094 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
1095 dh
->max_mapnr
= cpu_to_dump32(s
, MIN(s
->max_mapnr
, UINT_MAX
));
1096 dh
->nr_cpus
= cpu_to_dump32(s
, s
->nr_cpus
);
1097 bitmap_blocks
= DIV_ROUND_UP(s
->len_dump_bitmap
, block_size
) * 2;
1098 dh
->bitmap_blocks
= cpu_to_dump32(s
, bitmap_blocks
);
1099 strncpy(dh
->utsname
.machine
, ELF_MACHINE_UNAME
, sizeof(dh
->utsname
.machine
));
1101 if (s
->flag_compress
& DUMP_DH_COMPRESSED_ZLIB
) {
1102 status
|= DUMP_DH_COMPRESSED_ZLIB
;
1105 if (s
->flag_compress
& DUMP_DH_COMPRESSED_LZO
) {
1106 status
|= DUMP_DH_COMPRESSED_LZO
;
1109 #ifdef CONFIG_SNAPPY
1110 if (s
->flag_compress
& DUMP_DH_COMPRESSED_SNAPPY
) {
1111 status
|= DUMP_DH_COMPRESSED_SNAPPY
;
1114 dh
->status
= cpu_to_dump32(s
, status
);
1116 if (write_buffer(s
, 0, dh
, size
) < 0) {
1117 error_setg(errp
, "dump: failed to write disk dump header");
1121 /* write sub header */
1122 size
= sizeof(KdumpSubHeader64
);
1123 kh
= g_malloc0(size
);
1125 /* 64bit max_mapnr_64 */
1126 kh
->max_mapnr_64
= cpu_to_dump64(s
, s
->max_mapnr
);
1127 kh
->phys_base
= cpu_to_dump64(s
, s
->dump_info
.phys_base
);
1128 kh
->dump_level
= cpu_to_dump32(s
, DUMP_LEVEL
);
1130 offset_note
= DISKDUMP_HEADER_BLOCKS
* block_size
+ size
;
1131 if (s
->guest_note
&&
1132 note_name_equal(s
, s
->guest_note
, "VMCOREINFO")) {
1133 uint64_t hsize
, name_size
, size_vmcoreinfo_desc
, offset_vmcoreinfo
;
1135 get_note_sizes(s
, s
->guest_note
,
1136 &hsize
, &name_size
, &size_vmcoreinfo_desc
);
1137 offset_vmcoreinfo
= offset_note
+ s
->note_size
- s
->guest_note_size
+
1138 (DIV_ROUND_UP(hsize
, 4) + DIV_ROUND_UP(name_size
, 4)) * 4;
1139 kh
->offset_vmcoreinfo
= cpu_to_dump64(s
, offset_vmcoreinfo
);
1140 kh
->size_vmcoreinfo
= cpu_to_dump64(s
, size_vmcoreinfo_desc
);
1143 kh
->offset_note
= cpu_to_dump64(s
, offset_note
);
1144 kh
->note_size
= cpu_to_dump64(s
, s
->note_size
);
1146 if (write_buffer(s
, DISKDUMP_HEADER_BLOCKS
*
1147 block_size
, kh
, size
) < 0) {
1148 error_setg(errp
, "dump: failed to write kdump sub header");
1153 s
->note_buf
= g_malloc0(s
->note_size
);
1154 s
->note_buf_offset
= 0;
1156 /* use s->note_buf to store notes temporarily */
1157 write_elf64_notes(buf_write_note
, s
, errp
);
1162 if (write_buffer(s
, offset_note
, s
->note_buf
,
1163 s
->note_size
) < 0) {
1164 error_setg(errp
, "dump: failed to write notes");
1168 /* get offset of dump_bitmap */
1169 s
->offset_dump_bitmap
= (DISKDUMP_HEADER_BLOCKS
+ sub_hdr_size
) *
1172 /* get offset of page */
1173 s
->offset_page
= (DISKDUMP_HEADER_BLOCKS
+ sub_hdr_size
+ bitmap_blocks
) *
1179 g_free(s
->note_buf
);
1182 static void write_dump_header(DumpState
*s
, Error
**errp
)
1184 if (dump_is_64bit(s
)) {
1185 create_header64(s
, errp
);
1187 create_header32(s
, errp
);
1191 static size_t dump_bitmap_get_bufsize(DumpState
*s
)
1193 return s
->dump_info
.page_size
;
1197 * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be
1198 * rewritten, so if need to set the first bit, set last_pfn and pfn to 0.
1199 * set_dump_bitmap will always leave the recently set bit un-sync. And setting
1200 * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into
1201 * vmcore, ie. synchronizing un-sync bit into vmcore.
1203 static int set_dump_bitmap(uint64_t last_pfn
, uint64_t pfn
, bool value
,
1204 uint8_t *buf
, DumpState
*s
)
1206 off_t old_offset
, new_offset
;
1207 off_t offset_bitmap1
, offset_bitmap2
;
1209 size_t bitmap_bufsize
= dump_bitmap_get_bufsize(s
);
1210 size_t bits_per_buf
= bitmap_bufsize
* CHAR_BIT
;
1212 /* should not set the previous place */
1213 assert(last_pfn
<= pfn
);
1216 * if the bit needed to be set is not cached in buf, flush the data in buf
1217 * to vmcore firstly.
1218 * making new_offset be bigger than old_offset can also sync remained data
1221 old_offset
= bitmap_bufsize
* (last_pfn
/ bits_per_buf
);
1222 new_offset
= bitmap_bufsize
* (pfn
/ bits_per_buf
);
1224 while (old_offset
< new_offset
) {
1225 /* calculate the offset and write dump_bitmap */
1226 offset_bitmap1
= s
->offset_dump_bitmap
+ old_offset
;
1227 if (write_buffer(s
, offset_bitmap1
, buf
,
1228 bitmap_bufsize
) < 0) {
1232 /* dump level 1 is chosen, so 1st and 2nd bitmap are same */
1233 offset_bitmap2
= s
->offset_dump_bitmap
+ s
->len_dump_bitmap
+
1235 if (write_buffer(s
, offset_bitmap2
, buf
,
1236 bitmap_bufsize
) < 0) {
1240 memset(buf
, 0, bitmap_bufsize
);
1241 old_offset
+= bitmap_bufsize
;
1244 /* get the exact place of the bit in the buf, and set it */
1245 byte
= (pfn
% bits_per_buf
) / CHAR_BIT
;
1246 bit
= (pfn
% bits_per_buf
) % CHAR_BIT
;
1248 buf
[byte
] |= 1u << bit
;
1250 buf
[byte
] &= ~(1u << bit
);
1256 static uint64_t dump_paddr_to_pfn(DumpState
*s
, uint64_t addr
)
1258 int target_page_shift
= ctz32(s
->dump_info
.page_size
);
1260 return (addr
>> target_page_shift
) - ARCH_PFN_OFFSET
;
1263 static uint64_t dump_pfn_to_paddr(DumpState
*s
, uint64_t pfn
)
1265 int target_page_shift
= ctz32(s
->dump_info
.page_size
);
1267 return (pfn
+ ARCH_PFN_OFFSET
) << target_page_shift
;
1271 * Return the page frame number and the page content in *bufptr. bufptr can be
1272 * NULL. If not NULL, *bufptr must contains a target page size of pre-allocated
1273 * memory. This is not necessarily the memory returned.
1275 static bool get_next_page(GuestPhysBlock
**blockptr
, uint64_t *pfnptr
,
1276 uint8_t **bufptr
, DumpState
*s
)
1278 GuestPhysBlock
*block
= *blockptr
;
1279 uint32_t page_size
= s
->dump_info
.page_size
;
1280 uint8_t *buf
= NULL
, *hbuf
;
1283 /* block == NULL means the start of the iteration */
1285 block
= QTAILQ_FIRST(&s
->guest_phys_blocks
.head
);
1287 addr
= block
->target_start
;
1288 *pfnptr
= dump_paddr_to_pfn(s
, addr
);
1291 addr
= dump_pfn_to_paddr(s
, *pfnptr
);
1293 assert(block
!= NULL
);
1296 if (addr
>= block
->target_start
&& addr
< block
->target_end
) {
1297 size_t n
= MIN(block
->target_end
- addr
, page_size
- addr
% page_size
);
1298 hbuf
= block
->host_addr
+ (addr
- block
->target_start
);
1300 if (n
== page_size
) {
1301 /* this is a whole target page, go for it */
1302 assert(addr
% page_size
== 0);
1305 } else if (bufptr
) {
1308 memset(buf
, 0, page_size
);
1314 memcpy(buf
+ addr
% page_size
, hbuf
, n
);
1316 if (addr
% page_size
== 0 || addr
>= block
->target_end
) {
1317 /* we filled up the page or the current block is finished */
1321 /* the next page is in the next block */
1322 *blockptr
= block
= QTAILQ_NEXT(block
, next
);
1327 addr
= block
->target_start
;
1328 /* are we still in the same page? */
1329 if (dump_paddr_to_pfn(s
, addr
) != *pfnptr
) {
1331 /* no, but we already filled something earlier, return it */
1334 /* else continue from there */
1335 *pfnptr
= dump_paddr_to_pfn(s
, addr
);
1348 static void write_dump_bitmap(DumpState
*s
, Error
**errp
)
1351 uint64_t last_pfn
, pfn
;
1352 void *dump_bitmap_buf
;
1353 size_t num_dumpable
;
1354 GuestPhysBlock
*block_iter
= NULL
;
1355 size_t bitmap_bufsize
= dump_bitmap_get_bufsize(s
);
1356 size_t bits_per_buf
= bitmap_bufsize
* CHAR_BIT
;
1358 /* dump_bitmap_buf is used to store dump_bitmap temporarily */
1359 dump_bitmap_buf
= g_malloc0(bitmap_bufsize
);
1365 * exam memory page by page, and set the bit in dump_bitmap corresponded
1366 * to the existing page.
1368 while (get_next_page(&block_iter
, &pfn
, NULL
, s
)) {
1369 ret
= set_dump_bitmap(last_pfn
, pfn
, true, dump_bitmap_buf
, s
);
1371 error_setg(errp
, "dump: failed to set dump_bitmap");
1380 * set_dump_bitmap will always leave the recently set bit un-sync. Here we
1381 * set the remaining bits from last_pfn to the end of the bitmap buffer to
1382 * 0. With those set, the un-sync bit will be synchronized into the vmcore.
1384 if (num_dumpable
> 0) {
1385 ret
= set_dump_bitmap(last_pfn
, last_pfn
+ bits_per_buf
, false,
1386 dump_bitmap_buf
, s
);
1388 error_setg(errp
, "dump: failed to sync dump_bitmap");
1393 /* number of dumpable pages that will be dumped later */
1394 s
->num_dumpable
= num_dumpable
;
1397 g_free(dump_bitmap_buf
);
1400 static void prepare_data_cache(DataCache
*data_cache
, DumpState
*s
,
1403 data_cache
->state
= s
;
1404 data_cache
->data_size
= 0;
1405 data_cache
->buf_size
= 4 * dump_bitmap_get_bufsize(s
);
1406 data_cache
->buf
= g_malloc0(data_cache
->buf_size
);
1407 data_cache
->offset
= offset
;
1410 static int write_cache(DataCache
*dc
, const void *buf
, size_t size
,
1414 * dc->buf_size should not be less than size, otherwise dc will never be
1417 assert(size
<= dc
->buf_size
);
1420 * if flag_sync is set, synchronize data in dc->buf into vmcore.
1421 * otherwise check if the space is enough for caching data in buf, if not,
1422 * write the data in dc->buf to dc->state->fd and reset dc->buf
1424 if ((!flag_sync
&& dc
->data_size
+ size
> dc
->buf_size
) ||
1425 (flag_sync
&& dc
->data_size
> 0)) {
1426 if (write_buffer(dc
->state
, dc
->offset
, dc
->buf
, dc
->data_size
) < 0) {
1430 dc
->offset
+= dc
->data_size
;
1435 memcpy(dc
->buf
+ dc
->data_size
, buf
, size
);
1436 dc
->data_size
+= size
;
1442 static void free_data_cache(DataCache
*data_cache
)
1444 g_free(data_cache
->buf
);
1447 static size_t get_len_buf_out(size_t page_size
, uint32_t flag_compress
)
1449 switch (flag_compress
) {
1450 case DUMP_DH_COMPRESSED_ZLIB
:
1451 return compressBound(page_size
);
1453 case DUMP_DH_COMPRESSED_LZO
:
1455 * LZO will expand incompressible data by a little amount. Please check
1456 * the following URL to see the expansion calculation:
1457 * http://www.oberhumer.com/opensource/lzo/lzofaq.php
1459 return page_size
+ page_size
/ 16 + 64 + 3;
1461 #ifdef CONFIG_SNAPPY
1462 case DUMP_DH_COMPRESSED_SNAPPY
:
1463 return snappy_max_compressed_length(page_size
);
1469 static void write_dump_pages(DumpState
*s
, Error
**errp
)
1472 DataCache page_desc
, page_data
;
1473 size_t len_buf_out
, size_out
;
1475 lzo_bytep wrkmem
= NULL
;
1477 uint8_t *buf_out
= NULL
;
1478 off_t offset_desc
, offset_data
;
1479 PageDescriptor pd
, pd_zero
;
1481 GuestPhysBlock
*block_iter
= NULL
;
1483 g_autofree
uint8_t *page
= NULL
;
1485 /* get offset of page_desc and page_data in dump file */
1486 offset_desc
= s
->offset_page
;
1487 offset_data
= offset_desc
+ sizeof(PageDescriptor
) * s
->num_dumpable
;
1489 prepare_data_cache(&page_desc
, s
, offset_desc
);
1490 prepare_data_cache(&page_data
, s
, offset_data
);
1492 /* prepare buffer to store compressed data */
1493 len_buf_out
= get_len_buf_out(s
->dump_info
.page_size
, s
->flag_compress
);
1494 assert(len_buf_out
!= 0);
1497 wrkmem
= g_malloc(LZO1X_1_MEM_COMPRESS
);
1500 buf_out
= g_malloc(len_buf_out
);
1503 * init zero page's page_desc and page_data, because every zero page
1504 * uses the same page_data
1506 pd_zero
.size
= cpu_to_dump32(s
, s
->dump_info
.page_size
);
1507 pd_zero
.flags
= cpu_to_dump32(s
, 0);
1508 pd_zero
.offset
= cpu_to_dump64(s
, offset_data
);
1509 pd_zero
.page_flags
= cpu_to_dump64(s
, 0);
1510 buf
= g_malloc0(s
->dump_info
.page_size
);
1511 ret
= write_cache(&page_data
, buf
, s
->dump_info
.page_size
, false);
1514 error_setg(errp
, "dump: failed to write page data (zero page)");
1518 offset_data
+= s
->dump_info
.page_size
;
1519 page
= g_malloc(s
->dump_info
.page_size
);
1522 * dump memory to vmcore page by page. zero page will all be resided in the
1523 * first page of page section
1525 for (buf
= page
; get_next_page(&block_iter
, &pfn_iter
, &buf
, s
); buf
= page
) {
1526 /* check zero page */
1527 if (buffer_is_zero(buf
, s
->dump_info
.page_size
)) {
1528 ret
= write_cache(&page_desc
, &pd_zero
, sizeof(PageDescriptor
),
1531 error_setg(errp
, "dump: failed to write page desc");
1536 * not zero page, then:
1537 * 1. compress the page
1538 * 2. write the compressed page into the cache of page_data
1539 * 3. get page desc of the compressed page and write it into the
1540 * cache of page_desc
1542 * only one compression format will be used here, for
1543 * s->flag_compress is set. But when compression fails to work,
1544 * we fall back to save in plaintext.
1546 size_out
= len_buf_out
;
1547 if ((s
->flag_compress
& DUMP_DH_COMPRESSED_ZLIB
) &&
1548 (compress2(buf_out
, (uLongf
*)&size_out
, buf
,
1549 s
->dump_info
.page_size
, Z_BEST_SPEED
) == Z_OK
) &&
1550 (size_out
< s
->dump_info
.page_size
)) {
1551 pd
.flags
= cpu_to_dump32(s
, DUMP_DH_COMPRESSED_ZLIB
);
1552 pd
.size
= cpu_to_dump32(s
, size_out
);
1554 ret
= write_cache(&page_data
, buf_out
, size_out
, false);
1556 error_setg(errp
, "dump: failed to write page data");
1560 } else if ((s
->flag_compress
& DUMP_DH_COMPRESSED_LZO
) &&
1561 (lzo1x_1_compress(buf
, s
->dump_info
.page_size
, buf_out
,
1562 (lzo_uint
*)&size_out
, wrkmem
) == LZO_E_OK
) &&
1563 (size_out
< s
->dump_info
.page_size
)) {
1564 pd
.flags
= cpu_to_dump32(s
, DUMP_DH_COMPRESSED_LZO
);
1565 pd
.size
= cpu_to_dump32(s
, size_out
);
1567 ret
= write_cache(&page_data
, buf_out
, size_out
, false);
1569 error_setg(errp
, "dump: failed to write page data");
1573 #ifdef CONFIG_SNAPPY
1574 } else if ((s
->flag_compress
& DUMP_DH_COMPRESSED_SNAPPY
) &&
1575 (snappy_compress((char *)buf
, s
->dump_info
.page_size
,
1576 (char *)buf_out
, &size_out
) == SNAPPY_OK
) &&
1577 (size_out
< s
->dump_info
.page_size
)) {
1578 pd
.flags
= cpu_to_dump32(s
, DUMP_DH_COMPRESSED_SNAPPY
);
1579 pd
.size
= cpu_to_dump32(s
, size_out
);
1581 ret
= write_cache(&page_data
, buf_out
, size_out
, false);
1583 error_setg(errp
, "dump: failed to write page data");
1589 * fall back to save in plaintext, size_out should be
1590 * assigned the target's page size
1592 pd
.flags
= cpu_to_dump32(s
, 0);
1593 size_out
= s
->dump_info
.page_size
;
1594 pd
.size
= cpu_to_dump32(s
, size_out
);
1596 ret
= write_cache(&page_data
, buf
,
1597 s
->dump_info
.page_size
, false);
1599 error_setg(errp
, "dump: failed to write page data");
1604 /* get and write page desc here */
1605 pd
.page_flags
= cpu_to_dump64(s
, 0);
1606 pd
.offset
= cpu_to_dump64(s
, offset_data
);
1607 offset_data
+= size_out
;
1609 ret
= write_cache(&page_desc
, &pd
, sizeof(PageDescriptor
), false);
1611 error_setg(errp
, "dump: failed to write page desc");
1615 s
->written_size
+= s
->dump_info
.page_size
;
1618 ret
= write_cache(&page_desc
, NULL
, 0, true);
1620 error_setg(errp
, "dump: failed to sync cache for page_desc");
1623 ret
= write_cache(&page_data
, NULL
, 0, true);
1625 error_setg(errp
, "dump: failed to sync cache for page_data");
1630 free_data_cache(&page_desc
);
1631 free_data_cache(&page_data
);
1640 static void create_kdump_vmcore(DumpState
*s
, Error
**errp
)
1646 * the kdump-compressed format is:
1648 * +------------------------------------------+ 0x0
1649 * | main header (struct disk_dump_header) |
1650 * |------------------------------------------+ block 1
1651 * | sub header (struct kdump_sub_header) |
1652 * |------------------------------------------+ block 2
1653 * | 1st-dump_bitmap |
1654 * |------------------------------------------+ block 2 + X blocks
1655 * | 2nd-dump_bitmap | (aligned by block)
1656 * |------------------------------------------+ block 2 + 2 * X blocks
1657 * | page desc for pfn 0 (struct page_desc) | (aligned by block)
1658 * | page desc for pfn 1 (struct page_desc) |
1660 * |------------------------------------------| (not aligned by block)
1661 * | page data (pfn 0) |
1662 * | page data (pfn 1) |
1664 * +------------------------------------------+
1667 ret
= write_start_flat_header(s
);
1669 error_setg(errp
, "dump: failed to write start flat header");
1673 write_dump_header(s
, errp
);
1678 write_dump_bitmap(s
, errp
);
1683 write_dump_pages(s
, errp
);
1688 ret
= write_end_flat_header(s
);
1690 error_setg(errp
, "dump: failed to write end flat header");
1695 static void get_max_mapnr(DumpState
*s
)
1697 GuestPhysBlock
*last_block
;
1699 last_block
= QTAILQ_LAST(&s
->guest_phys_blocks
.head
);
1700 s
->max_mapnr
= dump_paddr_to_pfn(s
, last_block
->target_end
);
1703 static DumpState dump_state_global
= { .status
= DUMP_STATUS_NONE
};
1705 static void dump_state_prepare(DumpState
*s
)
1707 /* zero the struct, setting status to active */
1708 *s
= (DumpState
) { .status
= DUMP_STATUS_ACTIVE
};
1711 bool qemu_system_dump_in_progress(void)
1713 DumpState
*state
= &dump_state_global
;
1714 return (qatomic_read(&state
->status
) == DUMP_STATUS_ACTIVE
);
1718 * calculate total size of memory to be dumped (taking filter into
1721 static int64_t dump_calculate_size(DumpState
*s
)
1723 GuestPhysBlock
*block
;
1726 QTAILQ_FOREACH(block
, &s
->guest_phys_blocks
.head
, next
) {
1727 total
+= dump_filtered_memblock_size(block
,
1728 s
->filter_area_begin
,
1729 s
->filter_area_length
);
1735 static void vmcoreinfo_update_phys_base(DumpState
*s
)
1737 uint64_t size
, note_head_size
, name_size
, phys_base
;
1742 if (!note_name_equal(s
, s
->guest_note
, "VMCOREINFO")) {
1746 get_note_sizes(s
, s
->guest_note
, ¬e_head_size
, &name_size
, &size
);
1747 note_head_size
= ROUND_UP(note_head_size
, 4);
1749 vmci
= s
->guest_note
+ note_head_size
+ ROUND_UP(name_size
, 4);
1750 *(vmci
+ size
) = '\0';
1752 lines
= g_strsplit((char *)vmci
, "\n", -1);
1753 for (i
= 0; lines
[i
]; i
++) {
1754 const char *prefix
= NULL
;
1756 if (s
->dump_info
.d_machine
== EM_X86_64
) {
1757 prefix
= "NUMBER(phys_base)=";
1758 } else if (s
->dump_info
.d_machine
== EM_AARCH64
) {
1759 prefix
= "NUMBER(PHYS_OFFSET)=";
1762 if (prefix
&& g_str_has_prefix(lines
[i
], prefix
)) {
1763 if (qemu_strtou64(lines
[i
] + strlen(prefix
), NULL
, 16,
1765 warn_report("Failed to read %s", prefix
);
1767 s
->dump_info
.phys_base
= phys_base
;
1776 static void dump_init(DumpState
*s
, int fd
, bool has_format
,
1777 DumpGuestMemoryFormat format
, bool paging
, bool has_filter
,
1778 int64_t begin
, int64_t length
, bool kdump_raw
,
1782 VMCoreInfoState
*vmci
= vmcoreinfo_find();
1787 s
->has_format
= has_format
;
1789 s
->written_size
= 0;
1790 s
->kdump_raw
= kdump_raw
;
1792 /* kdump-compressed is conflict with paging and filter */
1793 if (has_format
&& format
!= DUMP_GUEST_MEMORY_FORMAT_ELF
) {
1794 assert(!paging
&& !has_filter
);
1797 if (runstate_is_running()) {
1798 vm_stop(RUN_STATE_SAVE_VM
);
1804 /* If we use KVM, we should synchronize the registers before we get dump
1805 * info or physmap info.
1807 cpu_synchronize_all_states();
1814 if (has_filter
&& !length
) {
1815 error_setg(errp
, "parameter 'length' expects a non-zero size");
1818 s
->filter_area_begin
= begin
;
1819 s
->filter_area_length
= length
;
1821 /* First index is 0, it's the special null name */
1822 s
->string_table_buf
= g_array_new(FALSE
, TRUE
, 1);
1824 * Allocate the null name, due to the clearing option set to true
1827 g_array_set_size(s
->string_table_buf
, 1);
1829 memory_mapping_list_init(&s
->list
);
1831 guest_phys_blocks_init(&s
->guest_phys_blocks
);
1832 guest_phys_blocks_append(&s
->guest_phys_blocks
);
1833 s
->total_size
= dump_calculate_size(s
);
1834 #ifdef DEBUG_DUMP_GUEST_MEMORY
1835 fprintf(stderr
, "DUMP: total memory to dump: %lu\n", s
->total_size
);
1838 /* it does not make sense to dump non-existent memory */
1839 if (!s
->total_size
) {
1840 error_setg(errp
, "dump: no guest memory to dump");
1844 /* get dump info: endian, class and architecture.
1845 * If the target architecture is not supported, cpu_get_dump_info() will
1848 ret
= cpu_get_dump_info(&s
->dump_info
, &s
->guest_phys_blocks
);
1851 "dumping guest memory is not supported on this target");
1855 if (!s
->dump_info
.page_size
) {
1856 s
->dump_info
.page_size
= qemu_target_page_size();
1859 s
->note_size
= cpu_get_note_size(s
->dump_info
.d_class
,
1860 s
->dump_info
.d_machine
, nr_cpus
);
1861 assert(s
->note_size
>= 0);
1864 * The goal of this block is to (a) update the previously guessed
1865 * phys_base, (b) copy the guest note out of the guest.
1866 * Failure to do so is not fatal for dumping.
1869 uint64_t addr
, note_head_size
, name_size
, desc_size
;
1871 uint16_t guest_format
;
1873 note_head_size
= dump_is_64bit(s
) ?
1874 sizeof(Elf64_Nhdr
) : sizeof(Elf32_Nhdr
);
1876 guest_format
= le16_to_cpu(vmci
->vmcoreinfo
.guest_format
);
1877 size
= le32_to_cpu(vmci
->vmcoreinfo
.size
);
1878 addr
= le64_to_cpu(vmci
->vmcoreinfo
.paddr
);
1879 if (!vmci
->has_vmcoreinfo
) {
1880 warn_report("guest note is not present");
1881 } else if (size
< note_head_size
|| size
> MAX_GUEST_NOTE_SIZE
) {
1882 warn_report("guest note size is invalid: %" PRIu32
, size
);
1883 } else if (guest_format
!= FW_CFG_VMCOREINFO_FORMAT_ELF
) {
1884 warn_report("guest note format is unsupported: %" PRIu16
, guest_format
);
1886 s
->guest_note
= g_malloc(size
+ 1); /* +1 for adding \0 */
1887 cpu_physical_memory_read(addr
, s
->guest_note
, size
);
1889 get_note_sizes(s
, s
->guest_note
, NULL
, &name_size
, &desc_size
);
1890 s
->guest_note_size
= ELF_NOTE_SIZE(note_head_size
, name_size
,
1892 if (name_size
> MAX_GUEST_NOTE_SIZE
||
1893 desc_size
> MAX_GUEST_NOTE_SIZE
||
1894 s
->guest_note_size
> size
) {
1895 warn_report("Invalid guest note header");
1896 g_free(s
->guest_note
);
1897 s
->guest_note
= NULL
;
1899 vmcoreinfo_update_phys_base(s
);
1900 s
->note_size
+= s
->guest_note_size
;
1905 /* get memory mapping */
1907 qemu_get_guest_memory_mapping(&s
->list
, &s
->guest_phys_blocks
, errp
);
1912 qemu_get_guest_simple_memory_mapping(&s
->list
, &s
->guest_phys_blocks
);
1915 s
->nr_cpus
= nr_cpus
;
1920 tmp
= DIV_ROUND_UP(DIV_ROUND_UP(s
->max_mapnr
, CHAR_BIT
),
1921 s
->dump_info
.page_size
);
1922 s
->len_dump_bitmap
= tmp
* s
->dump_info
.page_size
;
1924 /* init for kdump-compressed format */
1925 if (has_format
&& format
!= DUMP_GUEST_MEMORY_FORMAT_ELF
) {
1927 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB
:
1928 s
->flag_compress
= DUMP_DH_COMPRESSED_ZLIB
;
1931 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO
:
1933 if (lzo_init() != LZO_E_OK
) {
1934 error_setg(errp
, "failed to initialize the LZO library");
1938 s
->flag_compress
= DUMP_DH_COMPRESSED_LZO
;
1941 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY
:
1942 s
->flag_compress
= DUMP_DH_COMPRESSED_SNAPPY
;
1946 s
->flag_compress
= 0;
1952 if (dump_has_filter(s
)) {
1953 memory_mapping_filter(&s
->list
, s
->filter_area_begin
, s
->filter_area_length
);
1957 * The first section header is always a special one in which most
1958 * fields are 0. The section header string table is also always
1964 * Adds the number of architecture sections to shdr_num and sets
1965 * elf_section_data_size so we know the offsets and sizes of all
1968 if (s
->dump_info
.arch_sections_add_fn
) {
1969 s
->dump_info
.arch_sections_add_fn(s
);
1973 * calculate shdr_num so we know the offsets and sizes of all
1975 * Calculate phdr_num
1977 * The absolute maximum amount of phdrs is UINT32_MAX - 1 as
1978 * sh_info is 32 bit. There's special handling once we go over
1979 * UINT16_MAX - 1 but that is handled in the ehdr and section
1982 s
->phdr_num
= 1; /* Reserve PT_NOTE */
1983 if (s
->list
.num
<= UINT32_MAX
- 1) {
1984 s
->phdr_num
+= s
->list
.num
;
1986 s
->phdr_num
= UINT32_MAX
;
1990 * Now that the number of section and program headers is known we
1991 * can calculate the offsets of the headers and data.
1993 if (dump_is_64bit(s
)) {
1994 s
->shdr_offset
= sizeof(Elf64_Ehdr
);
1995 s
->phdr_offset
= s
->shdr_offset
+ sizeof(Elf64_Shdr
) * s
->shdr_num
;
1996 s
->note_offset
= s
->phdr_offset
+ sizeof(Elf64_Phdr
) * s
->phdr_num
;
1998 s
->shdr_offset
= sizeof(Elf32_Ehdr
);
1999 s
->phdr_offset
= s
->shdr_offset
+ sizeof(Elf32_Shdr
) * s
->shdr_num
;
2000 s
->note_offset
= s
->phdr_offset
+ sizeof(Elf32_Phdr
) * s
->phdr_num
;
2002 s
->memory_offset
= s
->note_offset
+ s
->note_size
;
2003 s
->section_offset
= s
->memory_offset
+ s
->total_size
;
2011 /* this operation might be time consuming. */
2012 static void dump_process(DumpState
*s
, Error
**errp
)
2015 DumpQueryResult
*result
= NULL
;
2017 if (s
->has_format
&& s
->format
== DUMP_GUEST_MEMORY_FORMAT_WIN_DMP
) {
2018 create_win_dump(s
, errp
);
2019 } else if (s
->has_format
&& s
->format
!= DUMP_GUEST_MEMORY_FORMAT_ELF
) {
2020 create_kdump_vmcore(s
, errp
);
2022 create_vmcore(s
, errp
);
2025 /* make sure status is written after written_size updates */
2027 qatomic_set(&s
->status
,
2028 (*errp
? DUMP_STATUS_FAILED
: DUMP_STATUS_COMPLETED
));
2030 /* send DUMP_COMPLETED message (unconditionally) */
2031 result
= qmp_query_dump(NULL
);
2032 /* should never fail */
2034 qapi_event_send_dump_completed(result
,
2035 *errp
? error_get_pretty(*errp
) : NULL
);
2036 qapi_free_DumpQueryResult(result
);
2041 static void *dump_thread(void *data
)
2043 DumpState
*s
= (DumpState
*)data
;
2044 dump_process(s
, NULL
);
2048 DumpQueryResult
*qmp_query_dump(Error
**errp
)
2050 DumpQueryResult
*result
= g_new(DumpQueryResult
, 1);
2051 DumpState
*state
= &dump_state_global
;
2052 result
->status
= qatomic_read(&state
->status
);
2053 /* make sure we are reading status and written_size in order */
2055 result
->completed
= state
->written_size
;
2056 result
->total
= state
->total_size
;
2060 void qmp_dump_guest_memory(bool paging
, const char *protocol
,
2061 bool has_detach
, bool detach
,
2062 bool has_begin
, int64_t begin
,
2063 bool has_length
, int64_t length
,
2064 bool has_format
, DumpGuestMemoryFormat format
,
2071 bool detach_p
= false;
2072 bool kdump_raw
= false;
2074 if (runstate_check(RUN_STATE_INMIGRATE
)) {
2075 error_setg(errp
, "Dump not allowed during incoming migration.");
2079 /* if there is a dump in background, we should wait until the dump
2081 if (qemu_system_dump_in_progress()) {
2082 error_setg(errp
, "There is a dump in process, please wait.");
2087 * externally, we represent kdump-raw-* as separate formats, but internally
2088 * they are handled the same, except for the "raw" flag
2092 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_RAW_ZLIB
:
2093 format
= DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB
;
2096 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_RAW_LZO
:
2097 format
= DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO
;
2100 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_RAW_SNAPPY
:
2101 format
= DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY
;
2110 * kdump-compressed format need the whole memory dumped, so paging or
2111 * filter is not supported here.
2113 if ((has_format
&& format
!= DUMP_GUEST_MEMORY_FORMAT_ELF
) &&
2114 (paging
|| has_begin
|| has_length
)) {
2115 error_setg(errp
, "kdump-compressed format doesn't support paging or "
2119 if (has_begin
&& !has_length
) {
2120 error_setg(errp
, QERR_MISSING_PARAMETER
, "length");
2123 if (!has_begin
&& has_length
) {
2124 error_setg(errp
, QERR_MISSING_PARAMETER
, "begin");
2131 /* check whether lzo/snappy is supported */
2133 if (has_format
&& format
== DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO
) {
2134 error_setg(errp
, "kdump-lzo is not available now");
2139 #ifndef CONFIG_SNAPPY
2140 if (has_format
&& format
== DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY
) {
2141 error_setg(errp
, "kdump-snappy is not available now");
2146 if (has_format
&& format
== DUMP_GUEST_MEMORY_FORMAT_WIN_DMP
2147 && !win_dump_available(errp
)) {
2151 if (strstart(protocol
, "fd:", &p
)) {
2152 fd
= monitor_get_fd(monitor_cur(), p
, errp
);
2156 } else if (strstart(protocol
, "file:", &p
)) {
2157 fd
= qemu_create(p
, O_WRONLY
| O_TRUNC
| O_BINARY
, S_IRUSR
, errp
);
2163 "parameter 'protocol' must start with 'file:' or 'fd:'");
2166 if (kdump_raw
&& lseek(fd
, 0, SEEK_CUR
) == (off_t
) -1) {
2168 error_setg(errp
, "kdump-raw formats require a seekable file");
2172 if (!dump_migration_blocker
) {
2173 error_setg(&dump_migration_blocker
,
2174 "Live migration disabled: dump-guest-memory in progress");
2178 * Allows even for -only-migratable, but forbid migration during the
2179 * process of dump guest memory.
2181 if (migrate_add_blocker_internal(&dump_migration_blocker
, errp
)) {
2182 /* Remember to release the fd before passing it over to dump state */
2187 s
= &dump_state_global
;
2188 dump_state_prepare(s
);
2190 dump_init(s
, fd
, has_format
, format
, paging
, has_begin
,
2191 begin
, length
, kdump_raw
, errp
);
2193 qatomic_set(&s
->status
, DUMP_STATUS_FAILED
);
2200 qemu_thread_create(&s
->dump_thread
, "dump_thread", dump_thread
,
2201 s
, QEMU_THREAD_DETACHED
);
2204 dump_process(s
, errp
);
2208 DumpGuestMemoryCapability
*qmp_query_dump_guest_memory_capability(Error
**errp
)
2210 DumpGuestMemoryCapability
*cap
=
2211 g_new0(DumpGuestMemoryCapability
, 1);
2212 DumpGuestMemoryFormatList
**tail
= &cap
->formats
;
2214 /* elf is always available */
2215 QAPI_LIST_APPEND(tail
, DUMP_GUEST_MEMORY_FORMAT_ELF
);
2217 /* kdump-zlib is always available */
2218 QAPI_LIST_APPEND(tail
, DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB
);
2219 QAPI_LIST_APPEND(tail
, DUMP_GUEST_MEMORY_FORMAT_KDUMP_RAW_ZLIB
);
2221 /* add new item if kdump-lzo is available */
2223 QAPI_LIST_APPEND(tail
, DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO
);
2224 QAPI_LIST_APPEND(tail
, DUMP_GUEST_MEMORY_FORMAT_KDUMP_RAW_LZO
);
2227 /* add new item if kdump-snappy is available */
2228 #ifdef CONFIG_SNAPPY
2229 QAPI_LIST_APPEND(tail
, DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY
);
2230 QAPI_LIST_APPEND(tail
, DUMP_GUEST_MEMORY_FORMAT_KDUMP_RAW_SNAPPY
);
2233 if (win_dump_available(NULL
)) {
2234 QAPI_LIST_APPEND(tail
, DUMP_GUEST_MEMORY_FORMAT_WIN_DMP
);