4 * Copyright Fujitsu, Corp. 2011, 2012
7 * Wen Congyang <wency@cn.fujitsu.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
18 #include "exec/hwaddr.h"
19 #include "monitor/monitor.h"
20 #include "sysemu/kvm.h"
21 #include "sysemu/dump.h"
22 #include "sysemu/sysemu.h"
23 #include "sysemu/memory_mapping.h"
24 #include "sysemu/cpus.h"
25 #include "qapi/qmp/qerror.h"
26 #include "qmp-commands.h"
27 #include "qapi-event.h"
31 #include <lzo/lzo1x.h>
36 #ifndef ELF_MACHINE_UNAME
37 #define ELF_MACHINE_UNAME "Unknown"
40 uint16_t cpu_to_dump16(DumpState
*s
, uint16_t val
)
42 if (s
->dump_info
.d_endian
== ELFDATA2LSB
) {
43 val
= cpu_to_le16(val
);
45 val
= cpu_to_be16(val
);
51 uint32_t cpu_to_dump32(DumpState
*s
, uint32_t val
)
53 if (s
->dump_info
.d_endian
== ELFDATA2LSB
) {
54 val
= cpu_to_le32(val
);
56 val
= cpu_to_be32(val
);
62 uint64_t cpu_to_dump64(DumpState
*s
, uint64_t val
)
64 if (s
->dump_info
.d_endian
== ELFDATA2LSB
) {
65 val
= cpu_to_le64(val
);
67 val
= cpu_to_be64(val
);
73 static int dump_cleanup(DumpState
*s
)
75 guest_phys_blocks_free(&s
->guest_phys_blocks
);
76 memory_mapping_list_free(&s
->list
);
80 qemu_mutex_lock_iothread();
84 qemu_mutex_unlock_iothread();
91 static int fd_write_vmcore(const void *buf
, size_t size
, void *opaque
)
93 DumpState
*s
= opaque
;
96 written_size
= qemu_write_full(s
->fd
, buf
, size
);
97 if (written_size
!= size
) {
104 static void write_elf64_header(DumpState
*s
, Error
**errp
)
106 Elf64_Ehdr elf_header
;
109 memset(&elf_header
, 0, sizeof(Elf64_Ehdr
));
110 memcpy(&elf_header
, ELFMAG
, SELFMAG
);
111 elf_header
.e_ident
[EI_CLASS
] = ELFCLASS64
;
112 elf_header
.e_ident
[EI_DATA
] = s
->dump_info
.d_endian
;
113 elf_header
.e_ident
[EI_VERSION
] = EV_CURRENT
;
114 elf_header
.e_type
= cpu_to_dump16(s
, ET_CORE
);
115 elf_header
.e_machine
= cpu_to_dump16(s
, s
->dump_info
.d_machine
);
116 elf_header
.e_version
= cpu_to_dump32(s
, EV_CURRENT
);
117 elf_header
.e_ehsize
= cpu_to_dump16(s
, sizeof(elf_header
));
118 elf_header
.e_phoff
= cpu_to_dump64(s
, sizeof(Elf64_Ehdr
));
119 elf_header
.e_phentsize
= cpu_to_dump16(s
, sizeof(Elf64_Phdr
));
120 elf_header
.e_phnum
= cpu_to_dump16(s
, s
->phdr_num
);
121 if (s
->have_section
) {
122 uint64_t shoff
= sizeof(Elf64_Ehdr
) + sizeof(Elf64_Phdr
) * s
->sh_info
;
124 elf_header
.e_shoff
= cpu_to_dump64(s
, shoff
);
125 elf_header
.e_shentsize
= cpu_to_dump16(s
, sizeof(Elf64_Shdr
));
126 elf_header
.e_shnum
= cpu_to_dump16(s
, 1);
129 ret
= fd_write_vmcore(&elf_header
, sizeof(elf_header
), s
);
131 error_setg(errp
, "dump: failed to write elf header");
135 static void write_elf32_header(DumpState
*s
, Error
**errp
)
137 Elf32_Ehdr elf_header
;
140 memset(&elf_header
, 0, sizeof(Elf32_Ehdr
));
141 memcpy(&elf_header
, ELFMAG
, SELFMAG
);
142 elf_header
.e_ident
[EI_CLASS
] = ELFCLASS32
;
143 elf_header
.e_ident
[EI_DATA
] = s
->dump_info
.d_endian
;
144 elf_header
.e_ident
[EI_VERSION
] = EV_CURRENT
;
145 elf_header
.e_type
= cpu_to_dump16(s
, ET_CORE
);
146 elf_header
.e_machine
= cpu_to_dump16(s
, s
->dump_info
.d_machine
);
147 elf_header
.e_version
= cpu_to_dump32(s
, EV_CURRENT
);
148 elf_header
.e_ehsize
= cpu_to_dump16(s
, sizeof(elf_header
));
149 elf_header
.e_phoff
= cpu_to_dump32(s
, sizeof(Elf32_Ehdr
));
150 elf_header
.e_phentsize
= cpu_to_dump16(s
, sizeof(Elf32_Phdr
));
151 elf_header
.e_phnum
= cpu_to_dump16(s
, s
->phdr_num
);
152 if (s
->have_section
) {
153 uint32_t shoff
= sizeof(Elf32_Ehdr
) + sizeof(Elf32_Phdr
) * s
->sh_info
;
155 elf_header
.e_shoff
= cpu_to_dump32(s
, shoff
);
156 elf_header
.e_shentsize
= cpu_to_dump16(s
, sizeof(Elf32_Shdr
));
157 elf_header
.e_shnum
= cpu_to_dump16(s
, 1);
160 ret
= fd_write_vmcore(&elf_header
, sizeof(elf_header
), s
);
162 error_setg(errp
, "dump: failed to write elf header");
166 static void write_elf64_load(DumpState
*s
, MemoryMapping
*memory_mapping
,
167 int phdr_index
, hwaddr offset
,
168 hwaddr filesz
, Error
**errp
)
173 memset(&phdr
, 0, sizeof(Elf64_Phdr
));
174 phdr
.p_type
= cpu_to_dump32(s
, PT_LOAD
);
175 phdr
.p_offset
= cpu_to_dump64(s
, offset
);
176 phdr
.p_paddr
= cpu_to_dump64(s
, memory_mapping
->phys_addr
);
177 phdr
.p_filesz
= cpu_to_dump64(s
, filesz
);
178 phdr
.p_memsz
= cpu_to_dump64(s
, memory_mapping
->length
);
179 phdr
.p_vaddr
= cpu_to_dump64(s
, memory_mapping
->virt_addr
);
181 assert(memory_mapping
->length
>= filesz
);
183 ret
= fd_write_vmcore(&phdr
, sizeof(Elf64_Phdr
), s
);
185 error_setg(errp
, "dump: failed to write program header table");
189 static void write_elf32_load(DumpState
*s
, MemoryMapping
*memory_mapping
,
190 int phdr_index
, hwaddr offset
,
191 hwaddr filesz
, Error
**errp
)
196 memset(&phdr
, 0, sizeof(Elf32_Phdr
));
197 phdr
.p_type
= cpu_to_dump32(s
, PT_LOAD
);
198 phdr
.p_offset
= cpu_to_dump32(s
, offset
);
199 phdr
.p_paddr
= cpu_to_dump32(s
, memory_mapping
->phys_addr
);
200 phdr
.p_filesz
= cpu_to_dump32(s
, filesz
);
201 phdr
.p_memsz
= cpu_to_dump32(s
, memory_mapping
->length
);
202 phdr
.p_vaddr
= cpu_to_dump32(s
, memory_mapping
->virt_addr
);
204 assert(memory_mapping
->length
>= filesz
);
206 ret
= fd_write_vmcore(&phdr
, sizeof(Elf32_Phdr
), s
);
208 error_setg(errp
, "dump: failed to write program header table");
212 static void write_elf64_note(DumpState
*s
, Error
**errp
)
215 hwaddr begin
= s
->memory_offset
- s
->note_size
;
218 memset(&phdr
, 0, sizeof(Elf64_Phdr
));
219 phdr
.p_type
= cpu_to_dump32(s
, PT_NOTE
);
220 phdr
.p_offset
= cpu_to_dump64(s
, begin
);
222 phdr
.p_filesz
= cpu_to_dump64(s
, s
->note_size
);
223 phdr
.p_memsz
= cpu_to_dump64(s
, s
->note_size
);
226 ret
= fd_write_vmcore(&phdr
, sizeof(Elf64_Phdr
), s
);
228 error_setg(errp
, "dump: failed to write program header table");
232 static inline int cpu_index(CPUState
*cpu
)
234 return cpu
->cpu_index
+ 1;
237 static void write_elf64_notes(WriteCoreDumpFunction f
, DumpState
*s
,
246 ret
= cpu_write_elf64_note(f
, cpu
, id
, s
);
248 error_setg(errp
, "dump: failed to write elf notes");
254 ret
= cpu_write_elf64_qemunote(f
, cpu
, s
);
256 error_setg(errp
, "dump: failed to write CPU status");
262 static void write_elf32_note(DumpState
*s
, Error
**errp
)
264 hwaddr begin
= s
->memory_offset
- s
->note_size
;
268 memset(&phdr
, 0, sizeof(Elf32_Phdr
));
269 phdr
.p_type
= cpu_to_dump32(s
, PT_NOTE
);
270 phdr
.p_offset
= cpu_to_dump32(s
, begin
);
272 phdr
.p_filesz
= cpu_to_dump32(s
, s
->note_size
);
273 phdr
.p_memsz
= cpu_to_dump32(s
, s
->note_size
);
276 ret
= fd_write_vmcore(&phdr
, sizeof(Elf32_Phdr
), s
);
278 error_setg(errp
, "dump: failed to write program header table");
282 static void write_elf32_notes(WriteCoreDumpFunction f
, DumpState
*s
,
291 ret
= cpu_write_elf32_note(f
, cpu
, id
, s
);
293 error_setg(errp
, "dump: failed to write elf notes");
299 ret
= cpu_write_elf32_qemunote(f
, cpu
, s
);
301 error_setg(errp
, "dump: failed to write CPU status");
307 static void write_elf_section(DumpState
*s
, int type
, Error
**errp
)
316 shdr_size
= sizeof(Elf32_Shdr
);
317 memset(&shdr32
, 0, shdr_size
);
318 shdr32
.sh_info
= cpu_to_dump32(s
, s
->sh_info
);
321 shdr_size
= sizeof(Elf64_Shdr
);
322 memset(&shdr64
, 0, shdr_size
);
323 shdr64
.sh_info
= cpu_to_dump32(s
, s
->sh_info
);
327 ret
= fd_write_vmcore(&shdr
, shdr_size
, s
);
329 error_setg(errp
, "dump: failed to write section header table");
333 static void write_data(DumpState
*s
, void *buf
, int length
, Error
**errp
)
337 ret
= fd_write_vmcore(buf
, length
, s
);
339 error_setg(errp
, "dump: failed to save memory");
341 s
->written_size
+= length
;
345 /* write the memory to vmcore. 1 page per I/O. */
346 static void write_memory(DumpState
*s
, GuestPhysBlock
*block
, ram_addr_t start
,
347 int64_t size
, Error
**errp
)
350 Error
*local_err
= NULL
;
352 for (i
= 0; i
< size
/ s
->dump_info
.page_size
; i
++) {
353 write_data(s
, block
->host_addr
+ start
+ i
* s
->dump_info
.page_size
,
354 s
->dump_info
.page_size
, &local_err
);
356 error_propagate(errp
, local_err
);
361 if ((size
% s
->dump_info
.page_size
) != 0) {
362 write_data(s
, block
->host_addr
+ start
+ i
* s
->dump_info
.page_size
,
363 size
% s
->dump_info
.page_size
, &local_err
);
365 error_propagate(errp
, local_err
);
371 /* get the memory's offset and size in the vmcore */
372 static void get_offset_range(hwaddr phys_addr
,
373 ram_addr_t mapping_length
,
378 GuestPhysBlock
*block
;
379 hwaddr offset
= s
->memory_offset
;
380 int64_t size_in_block
, start
;
382 /* When the memory is not stored into vmcore, offset will be -1 */
387 if (phys_addr
< s
->begin
|| phys_addr
>= s
->begin
+ s
->length
) {
392 QTAILQ_FOREACH(block
, &s
->guest_phys_blocks
.head
, next
) {
394 if (block
->target_start
>= s
->begin
+ s
->length
||
395 block
->target_end
<= s
->begin
) {
396 /* This block is out of the range */
400 if (s
->begin
<= block
->target_start
) {
401 start
= block
->target_start
;
406 size_in_block
= block
->target_end
- start
;
407 if (s
->begin
+ s
->length
< block
->target_end
) {
408 size_in_block
-= block
->target_end
- (s
->begin
+ s
->length
);
411 start
= block
->target_start
;
412 size_in_block
= block
->target_end
- block
->target_start
;
415 if (phys_addr
>= start
&& phys_addr
< start
+ size_in_block
) {
416 *p_offset
= phys_addr
- start
+ offset
;
418 /* The offset range mapped from the vmcore file must not spill over
419 * the GuestPhysBlock, clamp it. The rest of the mapping will be
420 * zero-filled in memory at load time; see
421 * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>.
423 *p_filesz
= phys_addr
+ mapping_length
<= start
+ size_in_block
?
425 size_in_block
- (phys_addr
- start
);
429 offset
+= size_in_block
;
433 static void write_elf_loads(DumpState
*s
, Error
**errp
)
435 hwaddr offset
, filesz
;
436 MemoryMapping
*memory_mapping
;
437 uint32_t phdr_index
= 1;
439 Error
*local_err
= NULL
;
441 if (s
->have_section
) {
442 max_index
= s
->sh_info
;
444 max_index
= s
->phdr_num
;
447 QTAILQ_FOREACH(memory_mapping
, &s
->list
.head
, next
) {
448 get_offset_range(memory_mapping
->phys_addr
,
449 memory_mapping
->length
,
450 s
, &offset
, &filesz
);
451 if (s
->dump_info
.d_class
== ELFCLASS64
) {
452 write_elf64_load(s
, memory_mapping
, phdr_index
++, offset
,
455 write_elf32_load(s
, memory_mapping
, phdr_index
++, offset
,
460 error_propagate(errp
, local_err
);
464 if (phdr_index
>= max_index
) {
470 /* write elf header, PT_NOTE and elf note to vmcore. */
471 static void dump_begin(DumpState
*s
, Error
**errp
)
473 Error
*local_err
= NULL
;
476 * the vmcore's format is:
495 * we only know where the memory is saved after we write elf note into
499 /* write elf header to vmcore */
500 if (s
->dump_info
.d_class
== ELFCLASS64
) {
501 write_elf64_header(s
, &local_err
);
503 write_elf32_header(s
, &local_err
);
506 error_propagate(errp
, local_err
);
510 if (s
->dump_info
.d_class
== ELFCLASS64
) {
511 /* write PT_NOTE to vmcore */
512 write_elf64_note(s
, &local_err
);
514 error_propagate(errp
, local_err
);
518 /* write all PT_LOAD to vmcore */
519 write_elf_loads(s
, &local_err
);
521 error_propagate(errp
, local_err
);
525 /* write section to vmcore */
526 if (s
->have_section
) {
527 write_elf_section(s
, 1, &local_err
);
529 error_propagate(errp
, local_err
);
534 /* write notes to vmcore */
535 write_elf64_notes(fd_write_vmcore
, s
, &local_err
);
537 error_propagate(errp
, local_err
);
541 /* write PT_NOTE to vmcore */
542 write_elf32_note(s
, &local_err
);
544 error_propagate(errp
, local_err
);
548 /* write all PT_LOAD to vmcore */
549 write_elf_loads(s
, &local_err
);
551 error_propagate(errp
, local_err
);
555 /* write section to vmcore */
556 if (s
->have_section
) {
557 write_elf_section(s
, 0, &local_err
);
559 error_propagate(errp
, local_err
);
564 /* write notes to vmcore */
565 write_elf32_notes(fd_write_vmcore
, s
, &local_err
);
567 error_propagate(errp
, local_err
);
573 static int get_next_block(DumpState
*s
, GuestPhysBlock
*block
)
576 block
= QTAILQ_NEXT(block
, next
);
583 s
->next_block
= block
;
585 if (block
->target_start
>= s
->begin
+ s
->length
||
586 block
->target_end
<= s
->begin
) {
587 /* This block is out of the range */
591 if (s
->begin
> block
->target_start
) {
592 s
->start
= s
->begin
- block
->target_start
;
600 /* write all memory to vmcore */
601 static void dump_iterate(DumpState
*s
, Error
**errp
)
603 GuestPhysBlock
*block
;
605 Error
*local_err
= NULL
;
608 block
= s
->next_block
;
610 size
= block
->target_end
- block
->target_start
;
613 if (s
->begin
+ s
->length
< block
->target_end
) {
614 size
-= block
->target_end
- (s
->begin
+ s
->length
);
617 write_memory(s
, block
, s
->start
, size
, &local_err
);
619 error_propagate(errp
, local_err
);
623 } while (!get_next_block(s
, block
));
626 static void create_vmcore(DumpState
*s
, Error
**errp
)
628 Error
*local_err
= NULL
;
630 dump_begin(s
, &local_err
);
632 error_propagate(errp
, local_err
);
636 dump_iterate(s
, errp
);
639 static int write_start_flat_header(int fd
)
641 MakedumpfileHeader
*mh
;
644 QEMU_BUILD_BUG_ON(sizeof *mh
> MAX_SIZE_MDF_HEADER
);
645 mh
= g_malloc0(MAX_SIZE_MDF_HEADER
);
647 memcpy(mh
->signature
, MAKEDUMPFILE_SIGNATURE
,
648 MIN(sizeof mh
->signature
, sizeof MAKEDUMPFILE_SIGNATURE
));
650 mh
->type
= cpu_to_be64(TYPE_FLAT_HEADER
);
651 mh
->version
= cpu_to_be64(VERSION_FLAT_HEADER
);
654 written_size
= qemu_write_full(fd
, mh
, MAX_SIZE_MDF_HEADER
);
655 if (written_size
!= MAX_SIZE_MDF_HEADER
) {
663 static int write_end_flat_header(int fd
)
665 MakedumpfileDataHeader mdh
;
667 mdh
.offset
= END_FLAG_FLAT_HEADER
;
668 mdh
.buf_size
= END_FLAG_FLAT_HEADER
;
671 written_size
= qemu_write_full(fd
, &mdh
, sizeof(mdh
));
672 if (written_size
!= sizeof(mdh
)) {
679 static int write_buffer(int fd
, off_t offset
, const void *buf
, size_t size
)
682 MakedumpfileDataHeader mdh
;
684 mdh
.offset
= cpu_to_be64(offset
);
685 mdh
.buf_size
= cpu_to_be64(size
);
687 written_size
= qemu_write_full(fd
, &mdh
, sizeof(mdh
));
688 if (written_size
!= sizeof(mdh
)) {
692 written_size
= qemu_write_full(fd
, buf
, size
);
693 if (written_size
!= size
) {
700 static int buf_write_note(const void *buf
, size_t size
, void *opaque
)
702 DumpState
*s
= opaque
;
704 /* note_buf is not enough */
705 if (s
->note_buf_offset
+ size
> s
->note_size
) {
709 memcpy(s
->note_buf
+ s
->note_buf_offset
, buf
, size
);
711 s
->note_buf_offset
+= size
;
716 /* write common header, sub header and elf note to vmcore */
717 static void create_header32(DumpState
*s
, Error
**errp
)
719 DiskDumpHeader32
*dh
= NULL
;
720 KdumpSubHeader32
*kh
= NULL
;
723 uint32_t sub_hdr_size
;
724 uint32_t bitmap_blocks
;
726 uint64_t offset_note
;
727 Error
*local_err
= NULL
;
729 /* write common header, the version of kdump-compressed format is 6th */
730 size
= sizeof(DiskDumpHeader32
);
731 dh
= g_malloc0(size
);
733 strncpy(dh
->signature
, KDUMP_SIGNATURE
, strlen(KDUMP_SIGNATURE
));
734 dh
->header_version
= cpu_to_dump32(s
, 6);
735 block_size
= s
->dump_info
.page_size
;
736 dh
->block_size
= cpu_to_dump32(s
, block_size
);
737 sub_hdr_size
= sizeof(struct KdumpSubHeader32
) + s
->note_size
;
738 sub_hdr_size
= DIV_ROUND_UP(sub_hdr_size
, block_size
);
739 dh
->sub_hdr_size
= cpu_to_dump32(s
, sub_hdr_size
);
740 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
741 dh
->max_mapnr
= cpu_to_dump32(s
, MIN(s
->max_mapnr
, UINT_MAX
));
742 dh
->nr_cpus
= cpu_to_dump32(s
, s
->nr_cpus
);
743 bitmap_blocks
= DIV_ROUND_UP(s
->len_dump_bitmap
, block_size
) * 2;
744 dh
->bitmap_blocks
= cpu_to_dump32(s
, bitmap_blocks
);
745 strncpy(dh
->utsname
.machine
, ELF_MACHINE_UNAME
, sizeof(dh
->utsname
.machine
));
747 if (s
->flag_compress
& DUMP_DH_COMPRESSED_ZLIB
) {
748 status
|= DUMP_DH_COMPRESSED_ZLIB
;
751 if (s
->flag_compress
& DUMP_DH_COMPRESSED_LZO
) {
752 status
|= DUMP_DH_COMPRESSED_LZO
;
756 if (s
->flag_compress
& DUMP_DH_COMPRESSED_SNAPPY
) {
757 status
|= DUMP_DH_COMPRESSED_SNAPPY
;
760 dh
->status
= cpu_to_dump32(s
, status
);
762 if (write_buffer(s
->fd
, 0, dh
, size
) < 0) {
763 error_setg(errp
, "dump: failed to write disk dump header");
767 /* write sub header */
768 size
= sizeof(KdumpSubHeader32
);
769 kh
= g_malloc0(size
);
771 /* 64bit max_mapnr_64 */
772 kh
->max_mapnr_64
= cpu_to_dump64(s
, s
->max_mapnr
);
773 kh
->phys_base
= cpu_to_dump32(s
, s
->dump_info
.phys_base
);
774 kh
->dump_level
= cpu_to_dump32(s
, DUMP_LEVEL
);
776 offset_note
= DISKDUMP_HEADER_BLOCKS
* block_size
+ size
;
777 kh
->offset_note
= cpu_to_dump64(s
, offset_note
);
778 kh
->note_size
= cpu_to_dump32(s
, s
->note_size
);
780 if (write_buffer(s
->fd
, DISKDUMP_HEADER_BLOCKS
*
781 block_size
, kh
, size
) < 0) {
782 error_setg(errp
, "dump: failed to write kdump sub header");
787 s
->note_buf
= g_malloc0(s
->note_size
);
788 s
->note_buf_offset
= 0;
790 /* use s->note_buf to store notes temporarily */
791 write_elf32_notes(buf_write_note
, s
, &local_err
);
793 error_propagate(errp
, local_err
);
796 if (write_buffer(s
->fd
, offset_note
, s
->note_buf
,
798 error_setg(errp
, "dump: failed to write notes");
802 /* get offset of dump_bitmap */
803 s
->offset_dump_bitmap
= (DISKDUMP_HEADER_BLOCKS
+ sub_hdr_size
) *
806 /* get offset of page */
807 s
->offset_page
= (DISKDUMP_HEADER_BLOCKS
+ sub_hdr_size
+ bitmap_blocks
) *
816 /* write common header, sub header and elf note to vmcore */
817 static void create_header64(DumpState
*s
, Error
**errp
)
819 DiskDumpHeader64
*dh
= NULL
;
820 KdumpSubHeader64
*kh
= NULL
;
823 uint32_t sub_hdr_size
;
824 uint32_t bitmap_blocks
;
826 uint64_t offset_note
;
827 Error
*local_err
= NULL
;
829 /* write common header, the version of kdump-compressed format is 6th */
830 size
= sizeof(DiskDumpHeader64
);
831 dh
= g_malloc0(size
);
833 strncpy(dh
->signature
, KDUMP_SIGNATURE
, strlen(KDUMP_SIGNATURE
));
834 dh
->header_version
= cpu_to_dump32(s
, 6);
835 block_size
= s
->dump_info
.page_size
;
836 dh
->block_size
= cpu_to_dump32(s
, block_size
);
837 sub_hdr_size
= sizeof(struct KdumpSubHeader64
) + s
->note_size
;
838 sub_hdr_size
= DIV_ROUND_UP(sub_hdr_size
, block_size
);
839 dh
->sub_hdr_size
= cpu_to_dump32(s
, sub_hdr_size
);
840 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
841 dh
->max_mapnr
= cpu_to_dump32(s
, MIN(s
->max_mapnr
, UINT_MAX
));
842 dh
->nr_cpus
= cpu_to_dump32(s
, s
->nr_cpus
);
843 bitmap_blocks
= DIV_ROUND_UP(s
->len_dump_bitmap
, block_size
) * 2;
844 dh
->bitmap_blocks
= cpu_to_dump32(s
, bitmap_blocks
);
845 strncpy(dh
->utsname
.machine
, ELF_MACHINE_UNAME
, sizeof(dh
->utsname
.machine
));
847 if (s
->flag_compress
& DUMP_DH_COMPRESSED_ZLIB
) {
848 status
|= DUMP_DH_COMPRESSED_ZLIB
;
851 if (s
->flag_compress
& DUMP_DH_COMPRESSED_LZO
) {
852 status
|= DUMP_DH_COMPRESSED_LZO
;
856 if (s
->flag_compress
& DUMP_DH_COMPRESSED_SNAPPY
) {
857 status
|= DUMP_DH_COMPRESSED_SNAPPY
;
860 dh
->status
= cpu_to_dump32(s
, status
);
862 if (write_buffer(s
->fd
, 0, dh
, size
) < 0) {
863 error_setg(errp
, "dump: failed to write disk dump header");
867 /* write sub header */
868 size
= sizeof(KdumpSubHeader64
);
869 kh
= g_malloc0(size
);
871 /* 64bit max_mapnr_64 */
872 kh
->max_mapnr_64
= cpu_to_dump64(s
, s
->max_mapnr
);
873 kh
->phys_base
= cpu_to_dump64(s
, s
->dump_info
.phys_base
);
874 kh
->dump_level
= cpu_to_dump32(s
, DUMP_LEVEL
);
876 offset_note
= DISKDUMP_HEADER_BLOCKS
* block_size
+ size
;
877 kh
->offset_note
= cpu_to_dump64(s
, offset_note
);
878 kh
->note_size
= cpu_to_dump64(s
, s
->note_size
);
880 if (write_buffer(s
->fd
, DISKDUMP_HEADER_BLOCKS
*
881 block_size
, kh
, size
) < 0) {
882 error_setg(errp
, "dump: failed to write kdump sub header");
887 s
->note_buf
= g_malloc0(s
->note_size
);
888 s
->note_buf_offset
= 0;
890 /* use s->note_buf to store notes temporarily */
891 write_elf64_notes(buf_write_note
, s
, &local_err
);
893 error_propagate(errp
, local_err
);
897 if (write_buffer(s
->fd
, offset_note
, s
->note_buf
,
899 error_setg(errp
, "dump: failed to write notes");
903 /* get offset of dump_bitmap */
904 s
->offset_dump_bitmap
= (DISKDUMP_HEADER_BLOCKS
+ sub_hdr_size
) *
907 /* get offset of page */
908 s
->offset_page
= (DISKDUMP_HEADER_BLOCKS
+ sub_hdr_size
+ bitmap_blocks
) *
917 static void write_dump_header(DumpState
*s
, Error
**errp
)
919 Error
*local_err
= NULL
;
921 if (s
->dump_info
.d_class
== ELFCLASS32
) {
922 create_header32(s
, &local_err
);
924 create_header64(s
, &local_err
);
926 error_propagate(errp
, local_err
);
929 static size_t dump_bitmap_get_bufsize(DumpState
*s
)
931 return s
->dump_info
.page_size
;
935 * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be
936 * rewritten, so if need to set the first bit, set last_pfn and pfn to 0.
937 * set_dump_bitmap will always leave the recently set bit un-sync. And setting
938 * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into
939 * vmcore, ie. synchronizing un-sync bit into vmcore.
941 static int set_dump_bitmap(uint64_t last_pfn
, uint64_t pfn
, bool value
,
942 uint8_t *buf
, DumpState
*s
)
944 off_t old_offset
, new_offset
;
945 off_t offset_bitmap1
, offset_bitmap2
;
947 size_t bitmap_bufsize
= dump_bitmap_get_bufsize(s
);
948 size_t bits_per_buf
= bitmap_bufsize
* CHAR_BIT
;
950 /* should not set the previous place */
951 assert(last_pfn
<= pfn
);
954 * if the bit needed to be set is not cached in buf, flush the data in buf
956 * making new_offset be bigger than old_offset can also sync remained data
959 old_offset
= bitmap_bufsize
* (last_pfn
/ bits_per_buf
);
960 new_offset
= bitmap_bufsize
* (pfn
/ bits_per_buf
);
962 while (old_offset
< new_offset
) {
963 /* calculate the offset and write dump_bitmap */
964 offset_bitmap1
= s
->offset_dump_bitmap
+ old_offset
;
965 if (write_buffer(s
->fd
, offset_bitmap1
, buf
,
966 bitmap_bufsize
) < 0) {
970 /* dump level 1 is chosen, so 1st and 2nd bitmap are same */
971 offset_bitmap2
= s
->offset_dump_bitmap
+ s
->len_dump_bitmap
+
973 if (write_buffer(s
->fd
, offset_bitmap2
, buf
,
974 bitmap_bufsize
) < 0) {
978 memset(buf
, 0, bitmap_bufsize
);
979 old_offset
+= bitmap_bufsize
;
982 /* get the exact place of the bit in the buf, and set it */
983 byte
= (pfn
% bits_per_buf
) / CHAR_BIT
;
984 bit
= (pfn
% bits_per_buf
) % CHAR_BIT
;
986 buf
[byte
] |= 1u << bit
;
988 buf
[byte
] &= ~(1u << bit
);
994 static uint64_t dump_paddr_to_pfn(DumpState
*s
, uint64_t addr
)
996 int target_page_shift
= ctz32(s
->dump_info
.page_size
);
998 return (addr
>> target_page_shift
) - ARCH_PFN_OFFSET
;
1001 static uint64_t dump_pfn_to_paddr(DumpState
*s
, uint64_t pfn
)
1003 int target_page_shift
= ctz32(s
->dump_info
.page_size
);
1005 return (pfn
+ ARCH_PFN_OFFSET
) << target_page_shift
;
1009 * exam every page and return the page frame number and the address of the page.
1010 * bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys
1011 * blocks, so block->target_start and block->target_end should be interal
1012 * multiples of the target page size.
1014 static bool get_next_page(GuestPhysBlock
**blockptr
, uint64_t *pfnptr
,
1015 uint8_t **bufptr
, DumpState
*s
)
1017 GuestPhysBlock
*block
= *blockptr
;
1018 hwaddr addr
, target_page_mask
= ~((hwaddr
)s
->dump_info
.page_size
- 1);
1021 /* block == NULL means the start of the iteration */
1023 block
= QTAILQ_FIRST(&s
->guest_phys_blocks
.head
);
1025 assert((block
->target_start
& ~target_page_mask
) == 0);
1026 assert((block
->target_end
& ~target_page_mask
) == 0);
1027 *pfnptr
= dump_paddr_to_pfn(s
, block
->target_start
);
1029 *bufptr
= block
->host_addr
;
1034 *pfnptr
= *pfnptr
+ 1;
1035 addr
= dump_pfn_to_paddr(s
, *pfnptr
);
1037 if ((addr
>= block
->target_start
) &&
1038 (addr
+ s
->dump_info
.page_size
<= block
->target_end
)) {
1039 buf
= block
->host_addr
+ (addr
- block
->target_start
);
1041 /* the next page is in the next block */
1042 block
= QTAILQ_NEXT(block
, next
);
1047 assert((block
->target_start
& ~target_page_mask
) == 0);
1048 assert((block
->target_end
& ~target_page_mask
) == 0);
1049 *pfnptr
= dump_paddr_to_pfn(s
, block
->target_start
);
1050 buf
= block
->host_addr
;
1060 static void write_dump_bitmap(DumpState
*s
, Error
**errp
)
1063 uint64_t last_pfn
, pfn
;
1064 void *dump_bitmap_buf
;
1065 size_t num_dumpable
;
1066 GuestPhysBlock
*block_iter
= NULL
;
1067 size_t bitmap_bufsize
= dump_bitmap_get_bufsize(s
);
1068 size_t bits_per_buf
= bitmap_bufsize
* CHAR_BIT
;
1070 /* dump_bitmap_buf is used to store dump_bitmap temporarily */
1071 dump_bitmap_buf
= g_malloc0(bitmap_bufsize
);
1077 * exam memory page by page, and set the bit in dump_bitmap corresponded
1078 * to the existing page.
1080 while (get_next_page(&block_iter
, &pfn
, NULL
, s
)) {
1081 ret
= set_dump_bitmap(last_pfn
, pfn
, true, dump_bitmap_buf
, s
);
1083 error_setg(errp
, "dump: failed to set dump_bitmap");
1092 * set_dump_bitmap will always leave the recently set bit un-sync. Here we
1093 * set the remaining bits from last_pfn to the end of the bitmap buffer to
1094 * 0. With those set, the un-sync bit will be synchronized into the vmcore.
1096 if (num_dumpable
> 0) {
1097 ret
= set_dump_bitmap(last_pfn
, last_pfn
+ bits_per_buf
, false,
1098 dump_bitmap_buf
, s
);
1100 error_setg(errp
, "dump: failed to sync dump_bitmap");
1105 /* number of dumpable pages that will be dumped later */
1106 s
->num_dumpable
= num_dumpable
;
1109 g_free(dump_bitmap_buf
);
1112 static void prepare_data_cache(DataCache
*data_cache
, DumpState
*s
,
1115 data_cache
->fd
= s
->fd
;
1116 data_cache
->data_size
= 0;
1117 data_cache
->buf_size
= 4 * dump_bitmap_get_bufsize(s
);
1118 data_cache
->buf
= g_malloc0(data_cache
->buf_size
);
1119 data_cache
->offset
= offset
;
1122 static int write_cache(DataCache
*dc
, const void *buf
, size_t size
,
1126 * dc->buf_size should not be less than size, otherwise dc will never be
1129 assert(size
<= dc
->buf_size
);
1132 * if flag_sync is set, synchronize data in dc->buf into vmcore.
1133 * otherwise check if the space is enough for caching data in buf, if not,
1134 * write the data in dc->buf to dc->fd and reset dc->buf
1136 if ((!flag_sync
&& dc
->data_size
+ size
> dc
->buf_size
) ||
1137 (flag_sync
&& dc
->data_size
> 0)) {
1138 if (write_buffer(dc
->fd
, dc
->offset
, dc
->buf
, dc
->data_size
) < 0) {
1142 dc
->offset
+= dc
->data_size
;
1147 memcpy(dc
->buf
+ dc
->data_size
, buf
, size
);
1148 dc
->data_size
+= size
;
1154 static void free_data_cache(DataCache
*data_cache
)
1156 g_free(data_cache
->buf
);
1159 static size_t get_len_buf_out(size_t page_size
, uint32_t flag_compress
)
1161 switch (flag_compress
) {
1162 case DUMP_DH_COMPRESSED_ZLIB
:
1163 return compressBound(page_size
);
1165 case DUMP_DH_COMPRESSED_LZO
:
1167 * LZO will expand incompressible data by a little amount. Please check
1168 * the following URL to see the expansion calculation:
1169 * http://www.oberhumer.com/opensource/lzo/lzofaq.php
1171 return page_size
+ page_size
/ 16 + 64 + 3;
1173 #ifdef CONFIG_SNAPPY
1174 case DUMP_DH_COMPRESSED_SNAPPY
:
1175 return snappy_max_compressed_length(page_size
);
1182 * check if the page is all 0
1184 static inline bool is_zero_page(const uint8_t *buf
, size_t page_size
)
1186 return buffer_is_zero(buf
, page_size
);
1189 static void write_dump_pages(DumpState
*s
, Error
**errp
)
1192 DataCache page_desc
, page_data
;
1193 size_t len_buf_out
, size_out
;
1195 lzo_bytep wrkmem
= NULL
;
1197 uint8_t *buf_out
= NULL
;
1198 off_t offset_desc
, offset_data
;
1199 PageDescriptor pd
, pd_zero
;
1201 GuestPhysBlock
*block_iter
= NULL
;
1204 /* get offset of page_desc and page_data in dump file */
1205 offset_desc
= s
->offset_page
;
1206 offset_data
= offset_desc
+ sizeof(PageDescriptor
) * s
->num_dumpable
;
1208 prepare_data_cache(&page_desc
, s
, offset_desc
);
1209 prepare_data_cache(&page_data
, s
, offset_data
);
1211 /* prepare buffer to store compressed data */
1212 len_buf_out
= get_len_buf_out(s
->dump_info
.page_size
, s
->flag_compress
);
1213 assert(len_buf_out
!= 0);
1216 wrkmem
= g_malloc(LZO1X_1_MEM_COMPRESS
);
1219 buf_out
= g_malloc(len_buf_out
);
1222 * init zero page's page_desc and page_data, because every zero page
1223 * uses the same page_data
1225 pd_zero
.size
= cpu_to_dump32(s
, s
->dump_info
.page_size
);
1226 pd_zero
.flags
= cpu_to_dump32(s
, 0);
1227 pd_zero
.offset
= cpu_to_dump64(s
, offset_data
);
1228 pd_zero
.page_flags
= cpu_to_dump64(s
, 0);
1229 buf
= g_malloc0(s
->dump_info
.page_size
);
1230 ret
= write_cache(&page_data
, buf
, s
->dump_info
.page_size
, false);
1233 error_setg(errp
, "dump: failed to write page data (zero page)");
1237 offset_data
+= s
->dump_info
.page_size
;
1240 * dump memory to vmcore page by page. zero page will all be resided in the
1241 * first page of page section
1243 while (get_next_page(&block_iter
, &pfn_iter
, &buf
, s
)) {
1244 /* check zero page */
1245 if (is_zero_page(buf
, s
->dump_info
.page_size
)) {
1246 ret
= write_cache(&page_desc
, &pd_zero
, sizeof(PageDescriptor
),
1249 error_setg(errp
, "dump: failed to write page desc");
1254 * not zero page, then:
1255 * 1. compress the page
1256 * 2. write the compressed page into the cache of page_data
1257 * 3. get page desc of the compressed page and write it into the
1258 * cache of page_desc
1260 * only one compression format will be used here, for
1261 * s->flag_compress is set. But when compression fails to work,
1262 * we fall back to save in plaintext.
1264 size_out
= len_buf_out
;
1265 if ((s
->flag_compress
& DUMP_DH_COMPRESSED_ZLIB
) &&
1266 (compress2(buf_out
, (uLongf
*)&size_out
, buf
,
1267 s
->dump_info
.page_size
, Z_BEST_SPEED
) == Z_OK
) &&
1268 (size_out
< s
->dump_info
.page_size
)) {
1269 pd
.flags
= cpu_to_dump32(s
, DUMP_DH_COMPRESSED_ZLIB
);
1270 pd
.size
= cpu_to_dump32(s
, size_out
);
1272 ret
= write_cache(&page_data
, buf_out
, size_out
, false);
1274 error_setg(errp
, "dump: failed to write page data");
1278 } else if ((s
->flag_compress
& DUMP_DH_COMPRESSED_LZO
) &&
1279 (lzo1x_1_compress(buf
, s
->dump_info
.page_size
, buf_out
,
1280 (lzo_uint
*)&size_out
, wrkmem
) == LZO_E_OK
) &&
1281 (size_out
< s
->dump_info
.page_size
)) {
1282 pd
.flags
= cpu_to_dump32(s
, DUMP_DH_COMPRESSED_LZO
);
1283 pd
.size
= cpu_to_dump32(s
, size_out
);
1285 ret
= write_cache(&page_data
, buf_out
, size_out
, false);
1287 error_setg(errp
, "dump: failed to write page data");
1291 #ifdef CONFIG_SNAPPY
1292 } else if ((s
->flag_compress
& DUMP_DH_COMPRESSED_SNAPPY
) &&
1293 (snappy_compress((char *)buf
, s
->dump_info
.page_size
,
1294 (char *)buf_out
, &size_out
) == SNAPPY_OK
) &&
1295 (size_out
< s
->dump_info
.page_size
)) {
1296 pd
.flags
= cpu_to_dump32(s
, DUMP_DH_COMPRESSED_SNAPPY
);
1297 pd
.size
= cpu_to_dump32(s
, size_out
);
1299 ret
= write_cache(&page_data
, buf_out
, size_out
, false);
1301 error_setg(errp
, "dump: failed to write page data");
1307 * fall back to save in plaintext, size_out should be
1308 * assigned the target's page size
1310 pd
.flags
= cpu_to_dump32(s
, 0);
1311 size_out
= s
->dump_info
.page_size
;
1312 pd
.size
= cpu_to_dump32(s
, size_out
);
1314 ret
= write_cache(&page_data
, buf
,
1315 s
->dump_info
.page_size
, false);
1317 error_setg(errp
, "dump: failed to write page data");
1322 /* get and write page desc here */
1323 pd
.page_flags
= cpu_to_dump64(s
, 0);
1324 pd
.offset
= cpu_to_dump64(s
, offset_data
);
1325 offset_data
+= size_out
;
1327 ret
= write_cache(&page_desc
, &pd
, sizeof(PageDescriptor
), false);
1329 error_setg(errp
, "dump: failed to write page desc");
1333 s
->written_size
+= s
->dump_info
.page_size
;
1336 ret
= write_cache(&page_desc
, NULL
, 0, true);
1338 error_setg(errp
, "dump: failed to sync cache for page_desc");
1341 ret
= write_cache(&page_data
, NULL
, 0, true);
1343 error_setg(errp
, "dump: failed to sync cache for page_data");
1348 free_data_cache(&page_desc
);
1349 free_data_cache(&page_data
);
1358 static void create_kdump_vmcore(DumpState
*s
, Error
**errp
)
1361 Error
*local_err
= NULL
;
1364 * the kdump-compressed format is:
1366 * +------------------------------------------+ 0x0
1367 * | main header (struct disk_dump_header) |
1368 * |------------------------------------------+ block 1
1369 * | sub header (struct kdump_sub_header) |
1370 * |------------------------------------------+ block 2
1371 * | 1st-dump_bitmap |
1372 * |------------------------------------------+ block 2 + X blocks
1373 * | 2nd-dump_bitmap | (aligned by block)
1374 * |------------------------------------------+ block 2 + 2 * X blocks
1375 * | page desc for pfn 0 (struct page_desc) | (aligned by block)
1376 * | page desc for pfn 1 (struct page_desc) |
1378 * |------------------------------------------| (not aligned by block)
1379 * | page data (pfn 0) |
1380 * | page data (pfn 1) |
1382 * +------------------------------------------+
1385 ret
= write_start_flat_header(s
->fd
);
1387 error_setg(errp
, "dump: failed to write start flat header");
1391 write_dump_header(s
, &local_err
);
1393 error_propagate(errp
, local_err
);
1397 write_dump_bitmap(s
, &local_err
);
1399 error_propagate(errp
, local_err
);
1403 write_dump_pages(s
, &local_err
);
1405 error_propagate(errp
, local_err
);
1409 ret
= write_end_flat_header(s
->fd
);
1411 error_setg(errp
, "dump: failed to write end flat header");
1416 static ram_addr_t
get_start_block(DumpState
*s
)
1418 GuestPhysBlock
*block
;
1420 if (!s
->has_filter
) {
1421 s
->next_block
= QTAILQ_FIRST(&s
->guest_phys_blocks
.head
);
1425 QTAILQ_FOREACH(block
, &s
->guest_phys_blocks
.head
, next
) {
1426 if (block
->target_start
>= s
->begin
+ s
->length
||
1427 block
->target_end
<= s
->begin
) {
1428 /* This block is out of the range */
1432 s
->next_block
= block
;
1433 if (s
->begin
> block
->target_start
) {
1434 s
->start
= s
->begin
- block
->target_start
;
1444 static void get_max_mapnr(DumpState
*s
)
1446 GuestPhysBlock
*last_block
;
1448 last_block
= QTAILQ_LAST(&s
->guest_phys_blocks
.head
, GuestPhysBlockHead
);
1449 s
->max_mapnr
= dump_paddr_to_pfn(s
, last_block
->target_end
);
1452 static DumpState dump_state_global
= { .status
= DUMP_STATUS_NONE
};
1454 static void dump_state_prepare(DumpState
*s
)
1456 /* zero the struct, setting status to active */
1457 *s
= (DumpState
) { .status
= DUMP_STATUS_ACTIVE
};
1460 bool dump_in_progress(void)
1462 DumpState
*state
= &dump_state_global
;
1463 return (atomic_read(&state
->status
) == DUMP_STATUS_ACTIVE
);
1466 /* calculate total size of memory to be dumped (taking filter into
1468 static int64_t dump_calculate_size(DumpState
*s
)
1470 GuestPhysBlock
*block
;
1471 int64_t size
= 0, total
= 0, left
= 0, right
= 0;
1473 QTAILQ_FOREACH(block
, &s
->guest_phys_blocks
.head
, next
) {
1474 if (s
->has_filter
) {
1475 /* calculate the overlapped region. */
1476 left
= MAX(s
->begin
, block
->target_start
);
1477 right
= MIN(s
->begin
+ s
->length
, block
->target_end
);
1478 size
= right
- left
;
1479 size
= size
> 0 ? size
: 0;
1481 /* count the whole region in */
1482 size
= (block
->target_end
- block
->target_start
);
1490 static void dump_init(DumpState
*s
, int fd
, bool has_format
,
1491 DumpGuestMemoryFormat format
, bool paging
, bool has_filter
,
1492 int64_t begin
, int64_t length
, Error
**errp
)
1499 s
->has_format
= has_format
;
1501 s
->written_size
= 0;
1503 /* kdump-compressed is conflict with paging and filter */
1504 if (has_format
&& format
!= DUMP_GUEST_MEMORY_FORMAT_ELF
) {
1505 assert(!paging
&& !has_filter
);
1508 if (runstate_is_running()) {
1509 vm_stop(RUN_STATE_SAVE_VM
);
1515 /* If we use KVM, we should synchronize the registers before we get dump
1516 * info or physmap info.
1518 cpu_synchronize_all_states();
1525 s
->has_filter
= has_filter
;
1529 memory_mapping_list_init(&s
->list
);
1531 guest_phys_blocks_init(&s
->guest_phys_blocks
);
1532 guest_phys_blocks_append(&s
->guest_phys_blocks
);
1533 s
->total_size
= dump_calculate_size(s
);
1534 #ifdef DEBUG_DUMP_GUEST_MEMORY
1535 fprintf(stderr
, "DUMP: total memory to dump: %lu\n", s
->total_size
);
1538 /* it does not make sense to dump non-existent memory */
1539 if (!s
->total_size
) {
1540 error_setg(errp
, "dump: no guest memory to dump");
1544 s
->start
= get_start_block(s
);
1545 if (s
->start
== -1) {
1546 error_setg(errp
, QERR_INVALID_PARAMETER
, "begin");
1550 /* get dump info: endian, class and architecture.
1551 * If the target architecture is not supported, cpu_get_dump_info() will
1554 ret
= cpu_get_dump_info(&s
->dump_info
, &s
->guest_phys_blocks
);
1556 error_setg(errp
, QERR_UNSUPPORTED
);
1560 if (!s
->dump_info
.page_size
) {
1561 s
->dump_info
.page_size
= TARGET_PAGE_SIZE
;
1564 s
->note_size
= cpu_get_note_size(s
->dump_info
.d_class
,
1565 s
->dump_info
.d_machine
, nr_cpus
);
1566 if (s
->note_size
< 0) {
1567 error_setg(errp
, QERR_UNSUPPORTED
);
1571 /* get memory mapping */
1573 qemu_get_guest_memory_mapping(&s
->list
, &s
->guest_phys_blocks
, &err
);
1575 error_propagate(errp
, err
);
1579 qemu_get_guest_simple_memory_mapping(&s
->list
, &s
->guest_phys_blocks
);
1582 s
->nr_cpus
= nr_cpus
;
1587 tmp
= DIV_ROUND_UP(DIV_ROUND_UP(s
->max_mapnr
, CHAR_BIT
),
1588 s
->dump_info
.page_size
);
1589 s
->len_dump_bitmap
= tmp
* s
->dump_info
.page_size
;
1591 /* init for kdump-compressed format */
1592 if (has_format
&& format
!= DUMP_GUEST_MEMORY_FORMAT_ELF
) {
1594 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB
:
1595 s
->flag_compress
= DUMP_DH_COMPRESSED_ZLIB
;
1598 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO
:
1600 if (lzo_init() != LZO_E_OK
) {
1601 error_setg(errp
, "failed to initialize the LZO library");
1605 s
->flag_compress
= DUMP_DH_COMPRESSED_LZO
;
1608 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY
:
1609 s
->flag_compress
= DUMP_DH_COMPRESSED_SNAPPY
;
1613 s
->flag_compress
= 0;
1619 if (s
->has_filter
) {
1620 memory_mapping_filter(&s
->list
, s
->begin
, s
->length
);
1624 * calculate phdr_num
1626 * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
1628 s
->phdr_num
= 1; /* PT_NOTE */
1629 if (s
->list
.num
< UINT16_MAX
- 2) {
1630 s
->phdr_num
+= s
->list
.num
;
1631 s
->have_section
= false;
1633 s
->have_section
= true;
1634 s
->phdr_num
= PN_XNUM
;
1635 s
->sh_info
= 1; /* PT_NOTE */
1637 /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
1638 if (s
->list
.num
<= UINT32_MAX
- 1) {
1639 s
->sh_info
+= s
->list
.num
;
1641 s
->sh_info
= UINT32_MAX
;
1645 if (s
->dump_info
.d_class
== ELFCLASS64
) {
1646 if (s
->have_section
) {
1647 s
->memory_offset
= sizeof(Elf64_Ehdr
) +
1648 sizeof(Elf64_Phdr
) * s
->sh_info
+
1649 sizeof(Elf64_Shdr
) + s
->note_size
;
1651 s
->memory_offset
= sizeof(Elf64_Ehdr
) +
1652 sizeof(Elf64_Phdr
) * s
->phdr_num
+ s
->note_size
;
1655 if (s
->have_section
) {
1656 s
->memory_offset
= sizeof(Elf32_Ehdr
) +
1657 sizeof(Elf32_Phdr
) * s
->sh_info
+
1658 sizeof(Elf32_Shdr
) + s
->note_size
;
1660 s
->memory_offset
= sizeof(Elf32_Ehdr
) +
1661 sizeof(Elf32_Phdr
) * s
->phdr_num
+ s
->note_size
;
1671 /* this operation might be time consuming. */
1672 static void dump_process(DumpState
*s
, Error
**errp
)
1674 Error
*local_err
= NULL
;
1675 DumpQueryResult
*result
= NULL
;
1677 if (s
->has_format
&& s
->format
!= DUMP_GUEST_MEMORY_FORMAT_ELF
) {
1678 create_kdump_vmcore(s
, &local_err
);
1680 create_vmcore(s
, &local_err
);
1683 /* make sure status is written after written_size updates */
1685 atomic_set(&s
->status
,
1686 (local_err
? DUMP_STATUS_FAILED
: DUMP_STATUS_COMPLETED
));
1688 /* send DUMP_COMPLETED message (unconditionally) */
1689 result
= qmp_query_dump(NULL
);
1690 /* should never fail */
1692 qapi_event_send_dump_completed(result
, !!local_err
, (local_err
? \
1693 error_get_pretty(local_err
) : NULL
),
1695 qapi_free_DumpQueryResult(result
);
1697 error_propagate(errp
, local_err
);
1701 static void *dump_thread(void *data
)
1703 DumpState
*s
= (DumpState
*)data
;
1704 dump_process(s
, NULL
);
1708 DumpQueryResult
*qmp_query_dump(Error
**errp
)
1710 DumpQueryResult
*result
= g_new(DumpQueryResult
, 1);
1711 DumpState
*state
= &dump_state_global
;
1712 result
->status
= atomic_read(&state
->status
);
1713 /* make sure we are reading status and written_size in order */
1715 result
->completed
= state
->written_size
;
1716 result
->total
= state
->total_size
;
1720 void qmp_dump_guest_memory(bool paging
, const char *file
,
1721 bool has_detach
, bool detach
,
1722 bool has_begin
, int64_t begin
, bool has_length
,
1723 int64_t length
, bool has_format
,
1724 DumpGuestMemoryFormat format
, Error
**errp
)
1729 Error
*local_err
= NULL
;
1730 bool detach_p
= false;
1732 if (runstate_check(RUN_STATE_INMIGRATE
)) {
1733 error_setg(errp
, "Dump not allowed during incoming migration.");
1737 /* if there is a dump in background, we should wait until the dump
1739 if (dump_in_progress()) {
1740 error_setg(errp
, "There is a dump in process, please wait.");
1745 * kdump-compressed format need the whole memory dumped, so paging or
1746 * filter is not supported here.
1748 if ((has_format
&& format
!= DUMP_GUEST_MEMORY_FORMAT_ELF
) &&
1749 (paging
|| has_begin
|| has_length
)) {
1750 error_setg(errp
, "kdump-compressed format doesn't support paging or "
1754 if (has_begin
&& !has_length
) {
1755 error_setg(errp
, QERR_MISSING_PARAMETER
, "length");
1758 if (!has_begin
&& has_length
) {
1759 error_setg(errp
, QERR_MISSING_PARAMETER
, "begin");
1766 /* check whether lzo/snappy is supported */
1768 if (has_format
&& format
== DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO
) {
1769 error_setg(errp
, "kdump-lzo is not available now");
1774 #ifndef CONFIG_SNAPPY
1775 if (has_format
&& format
== DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY
) {
1776 error_setg(errp
, "kdump-snappy is not available now");
1782 if (strstart(file
, "fd:", &p
)) {
1783 fd
= monitor_get_fd(cur_mon
, p
, errp
);
1790 if (strstart(file
, "file:", &p
)) {
1791 fd
= qemu_open(p
, O_WRONLY
| O_CREAT
| O_TRUNC
| O_BINARY
, S_IRUSR
);
1793 error_setg_file_open(errp
, errno
, p
);
1799 error_setg(errp
, QERR_INVALID_PARAMETER
, "protocol");
1803 s
= &dump_state_global
;
1804 dump_state_prepare(s
);
1806 dump_init(s
, fd
, has_format
, format
, paging
, has_begin
,
1807 begin
, length
, &local_err
);
1809 error_propagate(errp
, local_err
);
1810 atomic_set(&s
->status
, DUMP_STATUS_FAILED
);
1817 qemu_thread_create(&s
->dump_thread
, "dump_thread", dump_thread
,
1818 s
, QEMU_THREAD_DETACHED
);
1821 dump_process(s
, errp
);
1825 DumpGuestMemoryCapability
*qmp_query_dump_guest_memory_capability(Error
**errp
)
1827 DumpGuestMemoryFormatList
*item
;
1828 DumpGuestMemoryCapability
*cap
=
1829 g_malloc0(sizeof(DumpGuestMemoryCapability
));
1831 /* elf is always available */
1832 item
= g_malloc0(sizeof(DumpGuestMemoryFormatList
));
1833 cap
->formats
= item
;
1834 item
->value
= DUMP_GUEST_MEMORY_FORMAT_ELF
;
1836 /* kdump-zlib is always available */
1837 item
->next
= g_malloc0(sizeof(DumpGuestMemoryFormatList
));
1839 item
->value
= DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB
;
1841 /* add new item if kdump-lzo is available */
1843 item
->next
= g_malloc0(sizeof(DumpGuestMemoryFormatList
));
1845 item
->value
= DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO
;
1848 /* add new item if kdump-snappy is available */
1849 #ifdef CONFIG_SNAPPY
1850 item
->next
= g_malloc0(sizeof(DumpGuestMemoryFormatList
));
1852 item
->value
= DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY
;