4 * Copyright Fujitsu, Corp. 2011, 2012
7 * Wen Congyang <wency@cn.fujitsu.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
18 #include "exec/hwaddr.h"
19 #include "monitor/monitor.h"
20 #include "sysemu/kvm.h"
21 #include "sysemu/dump.h"
22 #include "sysemu/sysemu.h"
23 #include "sysemu/memory_mapping.h"
24 #include "sysemu/cpus.h"
25 #include "qapi/error.h"
26 #include "qapi/qapi-commands-misc.h"
27 #include "qapi/qapi-events-misc.h"
28 #include "qapi/qmp/qerror.h"
29 #include "qemu/error-report.h"
30 #include "hw/misc/vmcoreinfo.h"
34 #include <lzo/lzo1x.h>
39 #ifndef ELF_MACHINE_UNAME
40 #define ELF_MACHINE_UNAME "Unknown"
43 #define MAX_GUEST_NOTE_SIZE (1 << 20) /* 1MB should be enough */
45 #define ELF_NOTE_SIZE(hdr_size, name_size, desc_size) \
46 ((DIV_ROUND_UP((hdr_size), 4) + \
47 DIV_ROUND_UP((name_size), 4) + \
48 DIV_ROUND_UP((desc_size), 4)) * 4)
50 uint16_t cpu_to_dump16(DumpState
*s
, uint16_t val
)
52 if (s
->dump_info
.d_endian
== ELFDATA2LSB
) {
53 val
= cpu_to_le16(val
);
55 val
= cpu_to_be16(val
);
61 uint32_t cpu_to_dump32(DumpState
*s
, uint32_t val
)
63 if (s
->dump_info
.d_endian
== ELFDATA2LSB
) {
64 val
= cpu_to_le32(val
);
66 val
= cpu_to_be32(val
);
72 uint64_t cpu_to_dump64(DumpState
*s
, uint64_t val
)
74 if (s
->dump_info
.d_endian
== ELFDATA2LSB
) {
75 val
= cpu_to_le64(val
);
77 val
= cpu_to_be64(val
);
83 static int dump_cleanup(DumpState
*s
)
85 guest_phys_blocks_free(&s
->guest_phys_blocks
);
86 memory_mapping_list_free(&s
->list
);
88 g_free(s
->guest_note
);
92 qemu_mutex_lock_iothread();
96 qemu_mutex_unlock_iothread();
103 static int fd_write_vmcore(const void *buf
, size_t size
, void *opaque
)
105 DumpState
*s
= opaque
;
108 written_size
= qemu_write_full(s
->fd
, buf
, size
);
109 if (written_size
!= size
) {
116 static void write_elf64_header(DumpState
*s
, Error
**errp
)
118 Elf64_Ehdr elf_header
;
121 memset(&elf_header
, 0, sizeof(Elf64_Ehdr
));
122 memcpy(&elf_header
, ELFMAG
, SELFMAG
);
123 elf_header
.e_ident
[EI_CLASS
] = ELFCLASS64
;
124 elf_header
.e_ident
[EI_DATA
] = s
->dump_info
.d_endian
;
125 elf_header
.e_ident
[EI_VERSION
] = EV_CURRENT
;
126 elf_header
.e_type
= cpu_to_dump16(s
, ET_CORE
);
127 elf_header
.e_machine
= cpu_to_dump16(s
, s
->dump_info
.d_machine
);
128 elf_header
.e_version
= cpu_to_dump32(s
, EV_CURRENT
);
129 elf_header
.e_ehsize
= cpu_to_dump16(s
, sizeof(elf_header
));
130 elf_header
.e_phoff
= cpu_to_dump64(s
, sizeof(Elf64_Ehdr
));
131 elf_header
.e_phentsize
= cpu_to_dump16(s
, sizeof(Elf64_Phdr
));
132 elf_header
.e_phnum
= cpu_to_dump16(s
, s
->phdr_num
);
133 if (s
->have_section
) {
134 uint64_t shoff
= sizeof(Elf64_Ehdr
) + sizeof(Elf64_Phdr
) * s
->sh_info
;
136 elf_header
.e_shoff
= cpu_to_dump64(s
, shoff
);
137 elf_header
.e_shentsize
= cpu_to_dump16(s
, sizeof(Elf64_Shdr
));
138 elf_header
.e_shnum
= cpu_to_dump16(s
, 1);
141 ret
= fd_write_vmcore(&elf_header
, sizeof(elf_header
), s
);
143 error_setg_errno(errp
, -ret
, "dump: failed to write elf header");
147 static void write_elf32_header(DumpState
*s
, Error
**errp
)
149 Elf32_Ehdr elf_header
;
152 memset(&elf_header
, 0, sizeof(Elf32_Ehdr
));
153 memcpy(&elf_header
, ELFMAG
, SELFMAG
);
154 elf_header
.e_ident
[EI_CLASS
] = ELFCLASS32
;
155 elf_header
.e_ident
[EI_DATA
] = s
->dump_info
.d_endian
;
156 elf_header
.e_ident
[EI_VERSION
] = EV_CURRENT
;
157 elf_header
.e_type
= cpu_to_dump16(s
, ET_CORE
);
158 elf_header
.e_machine
= cpu_to_dump16(s
, s
->dump_info
.d_machine
);
159 elf_header
.e_version
= cpu_to_dump32(s
, EV_CURRENT
);
160 elf_header
.e_ehsize
= cpu_to_dump16(s
, sizeof(elf_header
));
161 elf_header
.e_phoff
= cpu_to_dump32(s
, sizeof(Elf32_Ehdr
));
162 elf_header
.e_phentsize
= cpu_to_dump16(s
, sizeof(Elf32_Phdr
));
163 elf_header
.e_phnum
= cpu_to_dump16(s
, s
->phdr_num
);
164 if (s
->have_section
) {
165 uint32_t shoff
= sizeof(Elf32_Ehdr
) + sizeof(Elf32_Phdr
) * s
->sh_info
;
167 elf_header
.e_shoff
= cpu_to_dump32(s
, shoff
);
168 elf_header
.e_shentsize
= cpu_to_dump16(s
, sizeof(Elf32_Shdr
));
169 elf_header
.e_shnum
= cpu_to_dump16(s
, 1);
172 ret
= fd_write_vmcore(&elf_header
, sizeof(elf_header
), s
);
174 error_setg_errno(errp
, -ret
, "dump: failed to write elf header");
178 static void write_elf64_load(DumpState
*s
, MemoryMapping
*memory_mapping
,
179 int phdr_index
, hwaddr offset
,
180 hwaddr filesz
, Error
**errp
)
185 memset(&phdr
, 0, sizeof(Elf64_Phdr
));
186 phdr
.p_type
= cpu_to_dump32(s
, PT_LOAD
);
187 phdr
.p_offset
= cpu_to_dump64(s
, offset
);
188 phdr
.p_paddr
= cpu_to_dump64(s
, memory_mapping
->phys_addr
);
189 phdr
.p_filesz
= cpu_to_dump64(s
, filesz
);
190 phdr
.p_memsz
= cpu_to_dump64(s
, memory_mapping
->length
);
191 phdr
.p_vaddr
= cpu_to_dump64(s
, memory_mapping
->virt_addr
);
193 assert(memory_mapping
->length
>= filesz
);
195 ret
= fd_write_vmcore(&phdr
, sizeof(Elf64_Phdr
), s
);
197 error_setg_errno(errp
, -ret
,
198 "dump: failed to write program header table");
202 static void write_elf32_load(DumpState
*s
, MemoryMapping
*memory_mapping
,
203 int phdr_index
, hwaddr offset
,
204 hwaddr filesz
, Error
**errp
)
209 memset(&phdr
, 0, sizeof(Elf32_Phdr
));
210 phdr
.p_type
= cpu_to_dump32(s
, PT_LOAD
);
211 phdr
.p_offset
= cpu_to_dump32(s
, offset
);
212 phdr
.p_paddr
= cpu_to_dump32(s
, memory_mapping
->phys_addr
);
213 phdr
.p_filesz
= cpu_to_dump32(s
, filesz
);
214 phdr
.p_memsz
= cpu_to_dump32(s
, memory_mapping
->length
);
215 phdr
.p_vaddr
= cpu_to_dump32(s
, memory_mapping
->virt_addr
);
217 assert(memory_mapping
->length
>= filesz
);
219 ret
= fd_write_vmcore(&phdr
, sizeof(Elf32_Phdr
), s
);
221 error_setg_errno(errp
, -ret
,
222 "dump: failed to write program header table");
226 static void write_elf64_note(DumpState
*s
, Error
**errp
)
229 hwaddr begin
= s
->memory_offset
- s
->note_size
;
232 memset(&phdr
, 0, sizeof(Elf64_Phdr
));
233 phdr
.p_type
= cpu_to_dump32(s
, PT_NOTE
);
234 phdr
.p_offset
= cpu_to_dump64(s
, begin
);
236 phdr
.p_filesz
= cpu_to_dump64(s
, s
->note_size
);
237 phdr
.p_memsz
= cpu_to_dump64(s
, s
->note_size
);
240 ret
= fd_write_vmcore(&phdr
, sizeof(Elf64_Phdr
), s
);
242 error_setg_errno(errp
, -ret
,
243 "dump: failed to write program header table");
247 static inline int cpu_index(CPUState
*cpu
)
249 return cpu
->cpu_index
+ 1;
252 static void write_guest_note(WriteCoreDumpFunction f
, DumpState
*s
,
258 ret
= f(s
->guest_note
, s
->guest_note_size
, s
);
260 error_setg(errp
, "dump: failed to write guest note");
265 static void write_elf64_notes(WriteCoreDumpFunction f
, DumpState
*s
,
274 ret
= cpu_write_elf64_note(f
, cpu
, id
, s
);
276 error_setg(errp
, "dump: failed to write elf notes");
282 ret
= cpu_write_elf64_qemunote(f
, cpu
, s
);
284 error_setg(errp
, "dump: failed to write CPU status");
289 write_guest_note(f
, s
, errp
);
292 static void write_elf32_note(DumpState
*s
, Error
**errp
)
294 hwaddr begin
= s
->memory_offset
- s
->note_size
;
298 memset(&phdr
, 0, sizeof(Elf32_Phdr
));
299 phdr
.p_type
= cpu_to_dump32(s
, PT_NOTE
);
300 phdr
.p_offset
= cpu_to_dump32(s
, begin
);
302 phdr
.p_filesz
= cpu_to_dump32(s
, s
->note_size
);
303 phdr
.p_memsz
= cpu_to_dump32(s
, s
->note_size
);
306 ret
= fd_write_vmcore(&phdr
, sizeof(Elf32_Phdr
), s
);
308 error_setg_errno(errp
, -ret
,
309 "dump: failed to write program header table");
313 static void write_elf32_notes(WriteCoreDumpFunction f
, DumpState
*s
,
322 ret
= cpu_write_elf32_note(f
, cpu
, id
, s
);
324 error_setg(errp
, "dump: failed to write elf notes");
330 ret
= cpu_write_elf32_qemunote(f
, cpu
, s
);
332 error_setg(errp
, "dump: failed to write CPU status");
337 write_guest_note(f
, s
, errp
);
340 static void write_elf_section(DumpState
*s
, int type
, Error
**errp
)
349 shdr_size
= sizeof(Elf32_Shdr
);
350 memset(&shdr32
, 0, shdr_size
);
351 shdr32
.sh_info
= cpu_to_dump32(s
, s
->sh_info
);
354 shdr_size
= sizeof(Elf64_Shdr
);
355 memset(&shdr64
, 0, shdr_size
);
356 shdr64
.sh_info
= cpu_to_dump32(s
, s
->sh_info
);
360 ret
= fd_write_vmcore(&shdr
, shdr_size
, s
);
362 error_setg_errno(errp
, -ret
,
363 "dump: failed to write section header table");
367 static void write_data(DumpState
*s
, void *buf
, int length
, Error
**errp
)
371 ret
= fd_write_vmcore(buf
, length
, s
);
373 error_setg_errno(errp
, -ret
, "dump: failed to save memory");
375 s
->written_size
+= length
;
379 /* write the memory to vmcore. 1 page per I/O. */
380 static void write_memory(DumpState
*s
, GuestPhysBlock
*block
, ram_addr_t start
,
381 int64_t size
, Error
**errp
)
384 Error
*local_err
= NULL
;
386 for (i
= 0; i
< size
/ s
->dump_info
.page_size
; i
++) {
387 write_data(s
, block
->host_addr
+ start
+ i
* s
->dump_info
.page_size
,
388 s
->dump_info
.page_size
, &local_err
);
390 error_propagate(errp
, local_err
);
395 if ((size
% s
->dump_info
.page_size
) != 0) {
396 write_data(s
, block
->host_addr
+ start
+ i
* s
->dump_info
.page_size
,
397 size
% s
->dump_info
.page_size
, &local_err
);
399 error_propagate(errp
, local_err
);
405 /* get the memory's offset and size in the vmcore */
406 static void get_offset_range(hwaddr phys_addr
,
407 ram_addr_t mapping_length
,
412 GuestPhysBlock
*block
;
413 hwaddr offset
= s
->memory_offset
;
414 int64_t size_in_block
, start
;
416 /* When the memory is not stored into vmcore, offset will be -1 */
421 if (phys_addr
< s
->begin
|| phys_addr
>= s
->begin
+ s
->length
) {
426 QTAILQ_FOREACH(block
, &s
->guest_phys_blocks
.head
, next
) {
428 if (block
->target_start
>= s
->begin
+ s
->length
||
429 block
->target_end
<= s
->begin
) {
430 /* This block is out of the range */
434 if (s
->begin
<= block
->target_start
) {
435 start
= block
->target_start
;
440 size_in_block
= block
->target_end
- start
;
441 if (s
->begin
+ s
->length
< block
->target_end
) {
442 size_in_block
-= block
->target_end
- (s
->begin
+ s
->length
);
445 start
= block
->target_start
;
446 size_in_block
= block
->target_end
- block
->target_start
;
449 if (phys_addr
>= start
&& phys_addr
< start
+ size_in_block
) {
450 *p_offset
= phys_addr
- start
+ offset
;
452 /* The offset range mapped from the vmcore file must not spill over
453 * the GuestPhysBlock, clamp it. The rest of the mapping will be
454 * zero-filled in memory at load time; see
455 * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>.
457 *p_filesz
= phys_addr
+ mapping_length
<= start
+ size_in_block
?
459 size_in_block
- (phys_addr
- start
);
463 offset
+= size_in_block
;
467 static void write_elf_loads(DumpState
*s
, Error
**errp
)
469 hwaddr offset
, filesz
;
470 MemoryMapping
*memory_mapping
;
471 uint32_t phdr_index
= 1;
473 Error
*local_err
= NULL
;
475 if (s
->have_section
) {
476 max_index
= s
->sh_info
;
478 max_index
= s
->phdr_num
;
481 QTAILQ_FOREACH(memory_mapping
, &s
->list
.head
, next
) {
482 get_offset_range(memory_mapping
->phys_addr
,
483 memory_mapping
->length
,
484 s
, &offset
, &filesz
);
485 if (s
->dump_info
.d_class
== ELFCLASS64
) {
486 write_elf64_load(s
, memory_mapping
, phdr_index
++, offset
,
489 write_elf32_load(s
, memory_mapping
, phdr_index
++, offset
,
494 error_propagate(errp
, local_err
);
498 if (phdr_index
>= max_index
) {
504 /* write elf header, PT_NOTE and elf note to vmcore. */
505 static void dump_begin(DumpState
*s
, Error
**errp
)
507 Error
*local_err
= NULL
;
510 * the vmcore's format is:
529 * we only know where the memory is saved after we write elf note into
533 /* write elf header to vmcore */
534 if (s
->dump_info
.d_class
== ELFCLASS64
) {
535 write_elf64_header(s
, &local_err
);
537 write_elf32_header(s
, &local_err
);
540 error_propagate(errp
, local_err
);
544 if (s
->dump_info
.d_class
== ELFCLASS64
) {
545 /* write PT_NOTE to vmcore */
546 write_elf64_note(s
, &local_err
);
548 error_propagate(errp
, local_err
);
552 /* write all PT_LOAD to vmcore */
553 write_elf_loads(s
, &local_err
);
555 error_propagate(errp
, local_err
);
559 /* write section to vmcore */
560 if (s
->have_section
) {
561 write_elf_section(s
, 1, &local_err
);
563 error_propagate(errp
, local_err
);
568 /* write notes to vmcore */
569 write_elf64_notes(fd_write_vmcore
, s
, &local_err
);
571 error_propagate(errp
, local_err
);
575 /* write PT_NOTE to vmcore */
576 write_elf32_note(s
, &local_err
);
578 error_propagate(errp
, local_err
);
582 /* write all PT_LOAD to vmcore */
583 write_elf_loads(s
, &local_err
);
585 error_propagate(errp
, local_err
);
589 /* write section to vmcore */
590 if (s
->have_section
) {
591 write_elf_section(s
, 0, &local_err
);
593 error_propagate(errp
, local_err
);
598 /* write notes to vmcore */
599 write_elf32_notes(fd_write_vmcore
, s
, &local_err
);
601 error_propagate(errp
, local_err
);
607 static int get_next_block(DumpState
*s
, GuestPhysBlock
*block
)
610 block
= QTAILQ_NEXT(block
, next
);
617 s
->next_block
= block
;
619 if (block
->target_start
>= s
->begin
+ s
->length
||
620 block
->target_end
<= s
->begin
) {
621 /* This block is out of the range */
625 if (s
->begin
> block
->target_start
) {
626 s
->start
= s
->begin
- block
->target_start
;
634 /* write all memory to vmcore */
635 static void dump_iterate(DumpState
*s
, Error
**errp
)
637 GuestPhysBlock
*block
;
639 Error
*local_err
= NULL
;
642 block
= s
->next_block
;
644 size
= block
->target_end
- block
->target_start
;
647 if (s
->begin
+ s
->length
< block
->target_end
) {
648 size
-= block
->target_end
- (s
->begin
+ s
->length
);
651 write_memory(s
, block
, s
->start
, size
, &local_err
);
653 error_propagate(errp
, local_err
);
657 } while (!get_next_block(s
, block
));
660 static void create_vmcore(DumpState
*s
, Error
**errp
)
662 Error
*local_err
= NULL
;
664 dump_begin(s
, &local_err
);
666 error_propagate(errp
, local_err
);
670 dump_iterate(s
, errp
);
673 static int write_start_flat_header(int fd
)
675 MakedumpfileHeader
*mh
;
678 QEMU_BUILD_BUG_ON(sizeof *mh
> MAX_SIZE_MDF_HEADER
);
679 mh
= g_malloc0(MAX_SIZE_MDF_HEADER
);
681 memcpy(mh
->signature
, MAKEDUMPFILE_SIGNATURE
,
682 MIN(sizeof mh
->signature
, sizeof MAKEDUMPFILE_SIGNATURE
));
684 mh
->type
= cpu_to_be64(TYPE_FLAT_HEADER
);
685 mh
->version
= cpu_to_be64(VERSION_FLAT_HEADER
);
688 written_size
= qemu_write_full(fd
, mh
, MAX_SIZE_MDF_HEADER
);
689 if (written_size
!= MAX_SIZE_MDF_HEADER
) {
697 static int write_end_flat_header(int fd
)
699 MakedumpfileDataHeader mdh
;
701 mdh
.offset
= END_FLAG_FLAT_HEADER
;
702 mdh
.buf_size
= END_FLAG_FLAT_HEADER
;
705 written_size
= qemu_write_full(fd
, &mdh
, sizeof(mdh
));
706 if (written_size
!= sizeof(mdh
)) {
713 static int write_buffer(int fd
, off_t offset
, const void *buf
, size_t size
)
716 MakedumpfileDataHeader mdh
;
718 mdh
.offset
= cpu_to_be64(offset
);
719 mdh
.buf_size
= cpu_to_be64(size
);
721 written_size
= qemu_write_full(fd
, &mdh
, sizeof(mdh
));
722 if (written_size
!= sizeof(mdh
)) {
726 written_size
= qemu_write_full(fd
, buf
, size
);
727 if (written_size
!= size
) {
734 static int buf_write_note(const void *buf
, size_t size
, void *opaque
)
736 DumpState
*s
= opaque
;
738 /* note_buf is not enough */
739 if (s
->note_buf_offset
+ size
> s
->note_size
) {
743 memcpy(s
->note_buf
+ s
->note_buf_offset
, buf
, size
);
745 s
->note_buf_offset
+= size
;
751 * This function retrieves various sizes from an elf header.
753 * @note has to be a valid ELF note. The return sizes are unmodified
754 * (not padded or rounded up to be multiple of 4).
756 static void get_note_sizes(DumpState
*s
, const void *note
,
757 uint64_t *note_head_size
,
761 uint64_t note_head_sz
;
765 if (s
->dump_info
.d_class
== ELFCLASS64
) {
766 const Elf64_Nhdr
*hdr
= note
;
767 note_head_sz
= sizeof(Elf64_Nhdr
);
768 name_sz
= tswap64(hdr
->n_namesz
);
769 desc_sz
= tswap64(hdr
->n_descsz
);
771 const Elf32_Nhdr
*hdr
= note
;
772 note_head_sz
= sizeof(Elf32_Nhdr
);
773 name_sz
= tswap32(hdr
->n_namesz
);
774 desc_sz
= tswap32(hdr
->n_descsz
);
777 if (note_head_size
) {
778 *note_head_size
= note_head_sz
;
781 *name_size
= name_sz
;
784 *desc_size
= desc_sz
;
788 static bool note_name_equal(DumpState
*s
,
789 const uint8_t *note
, const char *name
)
791 int len
= strlen(name
) + 1;
792 uint64_t head_size
, name_size
;
794 get_note_sizes(s
, note
, &head_size
, &name_size
, NULL
);
795 head_size
= ROUND_UP(head_size
, 4);
797 return name_size
== len
&& memcmp(note
+ head_size
, name
, len
) == 0;
800 /* write common header, sub header and elf note to vmcore */
801 static void create_header32(DumpState
*s
, Error
**errp
)
803 DiskDumpHeader32
*dh
= NULL
;
804 KdumpSubHeader32
*kh
= NULL
;
807 uint32_t sub_hdr_size
;
808 uint32_t bitmap_blocks
;
810 uint64_t offset_note
;
811 Error
*local_err
= NULL
;
813 /* write common header, the version of kdump-compressed format is 6th */
814 size
= sizeof(DiskDumpHeader32
);
815 dh
= g_malloc0(size
);
817 memcpy(dh
->signature
, KDUMP_SIGNATURE
, SIG_LEN
);
818 dh
->header_version
= cpu_to_dump32(s
, 6);
819 block_size
= s
->dump_info
.page_size
;
820 dh
->block_size
= cpu_to_dump32(s
, block_size
);
821 sub_hdr_size
= sizeof(struct KdumpSubHeader32
) + s
->note_size
;
822 sub_hdr_size
= DIV_ROUND_UP(sub_hdr_size
, block_size
);
823 dh
->sub_hdr_size
= cpu_to_dump32(s
, sub_hdr_size
);
824 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
825 dh
->max_mapnr
= cpu_to_dump32(s
, MIN(s
->max_mapnr
, UINT_MAX
));
826 dh
->nr_cpus
= cpu_to_dump32(s
, s
->nr_cpus
);
827 bitmap_blocks
= DIV_ROUND_UP(s
->len_dump_bitmap
, block_size
) * 2;
828 dh
->bitmap_blocks
= cpu_to_dump32(s
, bitmap_blocks
);
829 strncpy(dh
->utsname
.machine
, ELF_MACHINE_UNAME
, sizeof(dh
->utsname
.machine
));
831 if (s
->flag_compress
& DUMP_DH_COMPRESSED_ZLIB
) {
832 status
|= DUMP_DH_COMPRESSED_ZLIB
;
835 if (s
->flag_compress
& DUMP_DH_COMPRESSED_LZO
) {
836 status
|= DUMP_DH_COMPRESSED_LZO
;
840 if (s
->flag_compress
& DUMP_DH_COMPRESSED_SNAPPY
) {
841 status
|= DUMP_DH_COMPRESSED_SNAPPY
;
844 dh
->status
= cpu_to_dump32(s
, status
);
846 if (write_buffer(s
->fd
, 0, dh
, size
) < 0) {
847 error_setg(errp
, "dump: failed to write disk dump header");
851 /* write sub header */
852 size
= sizeof(KdumpSubHeader32
);
853 kh
= g_malloc0(size
);
855 /* 64bit max_mapnr_64 */
856 kh
->max_mapnr_64
= cpu_to_dump64(s
, s
->max_mapnr
);
857 kh
->phys_base
= cpu_to_dump32(s
, s
->dump_info
.phys_base
);
858 kh
->dump_level
= cpu_to_dump32(s
, DUMP_LEVEL
);
860 offset_note
= DISKDUMP_HEADER_BLOCKS
* block_size
+ size
;
862 note_name_equal(s
, s
->guest_note
, "VMCOREINFO")) {
863 uint64_t hsize
, name_size
, size_vmcoreinfo_desc
, offset_vmcoreinfo
;
865 get_note_sizes(s
, s
->guest_note
,
866 &hsize
, &name_size
, &size_vmcoreinfo_desc
);
867 offset_vmcoreinfo
= offset_note
+ s
->note_size
- s
->guest_note_size
+
868 (DIV_ROUND_UP(hsize
, 4) + DIV_ROUND_UP(name_size
, 4)) * 4;
869 kh
->offset_vmcoreinfo
= cpu_to_dump64(s
, offset_vmcoreinfo
);
870 kh
->size_vmcoreinfo
= cpu_to_dump32(s
, size_vmcoreinfo_desc
);
873 kh
->offset_note
= cpu_to_dump64(s
, offset_note
);
874 kh
->note_size
= cpu_to_dump32(s
, s
->note_size
);
876 if (write_buffer(s
->fd
, DISKDUMP_HEADER_BLOCKS
*
877 block_size
, kh
, size
) < 0) {
878 error_setg(errp
, "dump: failed to write kdump sub header");
883 s
->note_buf
= g_malloc0(s
->note_size
);
884 s
->note_buf_offset
= 0;
886 /* use s->note_buf to store notes temporarily */
887 write_elf32_notes(buf_write_note
, s
, &local_err
);
889 error_propagate(errp
, local_err
);
892 if (write_buffer(s
->fd
, offset_note
, s
->note_buf
,
894 error_setg(errp
, "dump: failed to write notes");
898 /* get offset of dump_bitmap */
899 s
->offset_dump_bitmap
= (DISKDUMP_HEADER_BLOCKS
+ sub_hdr_size
) *
902 /* get offset of page */
903 s
->offset_page
= (DISKDUMP_HEADER_BLOCKS
+ sub_hdr_size
+ bitmap_blocks
) *
912 /* write common header, sub header and elf note to vmcore */
913 static void create_header64(DumpState
*s
, Error
**errp
)
915 DiskDumpHeader64
*dh
= NULL
;
916 KdumpSubHeader64
*kh
= NULL
;
919 uint32_t sub_hdr_size
;
920 uint32_t bitmap_blocks
;
922 uint64_t offset_note
;
923 Error
*local_err
= NULL
;
925 /* write common header, the version of kdump-compressed format is 6th */
926 size
= sizeof(DiskDumpHeader64
);
927 dh
= g_malloc0(size
);
929 memcpy(dh
->signature
, KDUMP_SIGNATURE
, SIG_LEN
);
930 dh
->header_version
= cpu_to_dump32(s
, 6);
931 block_size
= s
->dump_info
.page_size
;
932 dh
->block_size
= cpu_to_dump32(s
, block_size
);
933 sub_hdr_size
= sizeof(struct KdumpSubHeader64
) + s
->note_size
;
934 sub_hdr_size
= DIV_ROUND_UP(sub_hdr_size
, block_size
);
935 dh
->sub_hdr_size
= cpu_to_dump32(s
, sub_hdr_size
);
936 /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
937 dh
->max_mapnr
= cpu_to_dump32(s
, MIN(s
->max_mapnr
, UINT_MAX
));
938 dh
->nr_cpus
= cpu_to_dump32(s
, s
->nr_cpus
);
939 bitmap_blocks
= DIV_ROUND_UP(s
->len_dump_bitmap
, block_size
) * 2;
940 dh
->bitmap_blocks
= cpu_to_dump32(s
, bitmap_blocks
);
941 strncpy(dh
->utsname
.machine
, ELF_MACHINE_UNAME
, sizeof(dh
->utsname
.machine
));
943 if (s
->flag_compress
& DUMP_DH_COMPRESSED_ZLIB
) {
944 status
|= DUMP_DH_COMPRESSED_ZLIB
;
947 if (s
->flag_compress
& DUMP_DH_COMPRESSED_LZO
) {
948 status
|= DUMP_DH_COMPRESSED_LZO
;
952 if (s
->flag_compress
& DUMP_DH_COMPRESSED_SNAPPY
) {
953 status
|= DUMP_DH_COMPRESSED_SNAPPY
;
956 dh
->status
= cpu_to_dump32(s
, status
);
958 if (write_buffer(s
->fd
, 0, dh
, size
) < 0) {
959 error_setg(errp
, "dump: failed to write disk dump header");
963 /* write sub header */
964 size
= sizeof(KdumpSubHeader64
);
965 kh
= g_malloc0(size
);
967 /* 64bit max_mapnr_64 */
968 kh
->max_mapnr_64
= cpu_to_dump64(s
, s
->max_mapnr
);
969 kh
->phys_base
= cpu_to_dump64(s
, s
->dump_info
.phys_base
);
970 kh
->dump_level
= cpu_to_dump32(s
, DUMP_LEVEL
);
972 offset_note
= DISKDUMP_HEADER_BLOCKS
* block_size
+ size
;
974 note_name_equal(s
, s
->guest_note
, "VMCOREINFO")) {
975 uint64_t hsize
, name_size
, size_vmcoreinfo_desc
, offset_vmcoreinfo
;
977 get_note_sizes(s
, s
->guest_note
,
978 &hsize
, &name_size
, &size_vmcoreinfo_desc
);
979 offset_vmcoreinfo
= offset_note
+ s
->note_size
- s
->guest_note_size
+
980 (DIV_ROUND_UP(hsize
, 4) + DIV_ROUND_UP(name_size
, 4)) * 4;
981 kh
->offset_vmcoreinfo
= cpu_to_dump64(s
, offset_vmcoreinfo
);
982 kh
->size_vmcoreinfo
= cpu_to_dump64(s
, size_vmcoreinfo_desc
);
985 kh
->offset_note
= cpu_to_dump64(s
, offset_note
);
986 kh
->note_size
= cpu_to_dump64(s
, s
->note_size
);
988 if (write_buffer(s
->fd
, DISKDUMP_HEADER_BLOCKS
*
989 block_size
, kh
, size
) < 0) {
990 error_setg(errp
, "dump: failed to write kdump sub header");
995 s
->note_buf
= g_malloc0(s
->note_size
);
996 s
->note_buf_offset
= 0;
998 /* use s->note_buf to store notes temporarily */
999 write_elf64_notes(buf_write_note
, s
, &local_err
);
1001 error_propagate(errp
, local_err
);
1005 if (write_buffer(s
->fd
, offset_note
, s
->note_buf
,
1006 s
->note_size
) < 0) {
1007 error_setg(errp
, "dump: failed to write notes");
1011 /* get offset of dump_bitmap */
1012 s
->offset_dump_bitmap
= (DISKDUMP_HEADER_BLOCKS
+ sub_hdr_size
) *
1015 /* get offset of page */
1016 s
->offset_page
= (DISKDUMP_HEADER_BLOCKS
+ sub_hdr_size
+ bitmap_blocks
) *
1022 g_free(s
->note_buf
);
1025 static void write_dump_header(DumpState
*s
, Error
**errp
)
1027 Error
*local_err
= NULL
;
1029 if (s
->dump_info
.d_class
== ELFCLASS32
) {
1030 create_header32(s
, &local_err
);
1032 create_header64(s
, &local_err
);
1034 error_propagate(errp
, local_err
);
1037 static size_t dump_bitmap_get_bufsize(DumpState
*s
)
1039 return s
->dump_info
.page_size
;
1043 * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be
1044 * rewritten, so if need to set the first bit, set last_pfn and pfn to 0.
1045 * set_dump_bitmap will always leave the recently set bit un-sync. And setting
1046 * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into
1047 * vmcore, ie. synchronizing un-sync bit into vmcore.
1049 static int set_dump_bitmap(uint64_t last_pfn
, uint64_t pfn
, bool value
,
1050 uint8_t *buf
, DumpState
*s
)
1052 off_t old_offset
, new_offset
;
1053 off_t offset_bitmap1
, offset_bitmap2
;
1055 size_t bitmap_bufsize
= dump_bitmap_get_bufsize(s
);
1056 size_t bits_per_buf
= bitmap_bufsize
* CHAR_BIT
;
1058 /* should not set the previous place */
1059 assert(last_pfn
<= pfn
);
1062 * if the bit needed to be set is not cached in buf, flush the data in buf
1063 * to vmcore firstly.
1064 * making new_offset be bigger than old_offset can also sync remained data
1067 old_offset
= bitmap_bufsize
* (last_pfn
/ bits_per_buf
);
1068 new_offset
= bitmap_bufsize
* (pfn
/ bits_per_buf
);
1070 while (old_offset
< new_offset
) {
1071 /* calculate the offset and write dump_bitmap */
1072 offset_bitmap1
= s
->offset_dump_bitmap
+ old_offset
;
1073 if (write_buffer(s
->fd
, offset_bitmap1
, buf
,
1074 bitmap_bufsize
) < 0) {
1078 /* dump level 1 is chosen, so 1st and 2nd bitmap are same */
1079 offset_bitmap2
= s
->offset_dump_bitmap
+ s
->len_dump_bitmap
+
1081 if (write_buffer(s
->fd
, offset_bitmap2
, buf
,
1082 bitmap_bufsize
) < 0) {
1086 memset(buf
, 0, bitmap_bufsize
);
1087 old_offset
+= bitmap_bufsize
;
1090 /* get the exact place of the bit in the buf, and set it */
1091 byte
= (pfn
% bits_per_buf
) / CHAR_BIT
;
1092 bit
= (pfn
% bits_per_buf
) % CHAR_BIT
;
1094 buf
[byte
] |= 1u << bit
;
1096 buf
[byte
] &= ~(1u << bit
);
1102 static uint64_t dump_paddr_to_pfn(DumpState
*s
, uint64_t addr
)
1104 int target_page_shift
= ctz32(s
->dump_info
.page_size
);
1106 return (addr
>> target_page_shift
) - ARCH_PFN_OFFSET
;
1109 static uint64_t dump_pfn_to_paddr(DumpState
*s
, uint64_t pfn
)
1111 int target_page_shift
= ctz32(s
->dump_info
.page_size
);
1113 return (pfn
+ ARCH_PFN_OFFSET
) << target_page_shift
;
1117 * exam every page and return the page frame number and the address of the page.
1118 * bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys
1119 * blocks, so block->target_start and block->target_end should be interal
1120 * multiples of the target page size.
1122 static bool get_next_page(GuestPhysBlock
**blockptr
, uint64_t *pfnptr
,
1123 uint8_t **bufptr
, DumpState
*s
)
1125 GuestPhysBlock
*block
= *blockptr
;
1126 hwaddr addr
, target_page_mask
= ~((hwaddr
)s
->dump_info
.page_size
- 1);
1129 /* block == NULL means the start of the iteration */
1131 block
= QTAILQ_FIRST(&s
->guest_phys_blocks
.head
);
1133 assert((block
->target_start
& ~target_page_mask
) == 0);
1134 assert((block
->target_end
& ~target_page_mask
) == 0);
1135 *pfnptr
= dump_paddr_to_pfn(s
, block
->target_start
);
1137 *bufptr
= block
->host_addr
;
1142 *pfnptr
= *pfnptr
+ 1;
1143 addr
= dump_pfn_to_paddr(s
, *pfnptr
);
1145 if ((addr
>= block
->target_start
) &&
1146 (addr
+ s
->dump_info
.page_size
<= block
->target_end
)) {
1147 buf
= block
->host_addr
+ (addr
- block
->target_start
);
1149 /* the next page is in the next block */
1150 block
= QTAILQ_NEXT(block
, next
);
1155 assert((block
->target_start
& ~target_page_mask
) == 0);
1156 assert((block
->target_end
& ~target_page_mask
) == 0);
1157 *pfnptr
= dump_paddr_to_pfn(s
, block
->target_start
);
1158 buf
= block
->host_addr
;
1168 static void write_dump_bitmap(DumpState
*s
, Error
**errp
)
1171 uint64_t last_pfn
, pfn
;
1172 void *dump_bitmap_buf
;
1173 size_t num_dumpable
;
1174 GuestPhysBlock
*block_iter
= NULL
;
1175 size_t bitmap_bufsize
= dump_bitmap_get_bufsize(s
);
1176 size_t bits_per_buf
= bitmap_bufsize
* CHAR_BIT
;
1178 /* dump_bitmap_buf is used to store dump_bitmap temporarily */
1179 dump_bitmap_buf
= g_malloc0(bitmap_bufsize
);
1185 * exam memory page by page, and set the bit in dump_bitmap corresponded
1186 * to the existing page.
1188 while (get_next_page(&block_iter
, &pfn
, NULL
, s
)) {
1189 ret
= set_dump_bitmap(last_pfn
, pfn
, true, dump_bitmap_buf
, s
);
1191 error_setg(errp
, "dump: failed to set dump_bitmap");
1200 * set_dump_bitmap will always leave the recently set bit un-sync. Here we
1201 * set the remaining bits from last_pfn to the end of the bitmap buffer to
1202 * 0. With those set, the un-sync bit will be synchronized into the vmcore.
1204 if (num_dumpable
> 0) {
1205 ret
= set_dump_bitmap(last_pfn
, last_pfn
+ bits_per_buf
, false,
1206 dump_bitmap_buf
, s
);
1208 error_setg(errp
, "dump: failed to sync dump_bitmap");
1213 /* number of dumpable pages that will be dumped later */
1214 s
->num_dumpable
= num_dumpable
;
1217 g_free(dump_bitmap_buf
);
1220 static void prepare_data_cache(DataCache
*data_cache
, DumpState
*s
,
1223 data_cache
->fd
= s
->fd
;
1224 data_cache
->data_size
= 0;
1225 data_cache
->buf_size
= 4 * dump_bitmap_get_bufsize(s
);
1226 data_cache
->buf
= g_malloc0(data_cache
->buf_size
);
1227 data_cache
->offset
= offset
;
1230 static int write_cache(DataCache
*dc
, const void *buf
, size_t size
,
1234 * dc->buf_size should not be less than size, otherwise dc will never be
1237 assert(size
<= dc
->buf_size
);
1240 * if flag_sync is set, synchronize data in dc->buf into vmcore.
1241 * otherwise check if the space is enough for caching data in buf, if not,
1242 * write the data in dc->buf to dc->fd and reset dc->buf
1244 if ((!flag_sync
&& dc
->data_size
+ size
> dc
->buf_size
) ||
1245 (flag_sync
&& dc
->data_size
> 0)) {
1246 if (write_buffer(dc
->fd
, dc
->offset
, dc
->buf
, dc
->data_size
) < 0) {
1250 dc
->offset
+= dc
->data_size
;
1255 memcpy(dc
->buf
+ dc
->data_size
, buf
, size
);
1256 dc
->data_size
+= size
;
1262 static void free_data_cache(DataCache
*data_cache
)
1264 g_free(data_cache
->buf
);
1267 static size_t get_len_buf_out(size_t page_size
, uint32_t flag_compress
)
1269 switch (flag_compress
) {
1270 case DUMP_DH_COMPRESSED_ZLIB
:
1271 return compressBound(page_size
);
1273 case DUMP_DH_COMPRESSED_LZO
:
1275 * LZO will expand incompressible data by a little amount. Please check
1276 * the following URL to see the expansion calculation:
1277 * http://www.oberhumer.com/opensource/lzo/lzofaq.php
1279 return page_size
+ page_size
/ 16 + 64 + 3;
1281 #ifdef CONFIG_SNAPPY
1282 case DUMP_DH_COMPRESSED_SNAPPY
:
1283 return snappy_max_compressed_length(page_size
);
1290 * check if the page is all 0
1292 static inline bool is_zero_page(const uint8_t *buf
, size_t page_size
)
1294 return buffer_is_zero(buf
, page_size
);
1297 static void write_dump_pages(DumpState
*s
, Error
**errp
)
1300 DataCache page_desc
, page_data
;
1301 size_t len_buf_out
, size_out
;
1303 lzo_bytep wrkmem
= NULL
;
1305 uint8_t *buf_out
= NULL
;
1306 off_t offset_desc
, offset_data
;
1307 PageDescriptor pd
, pd_zero
;
1309 GuestPhysBlock
*block_iter
= NULL
;
1312 /* get offset of page_desc and page_data in dump file */
1313 offset_desc
= s
->offset_page
;
1314 offset_data
= offset_desc
+ sizeof(PageDescriptor
) * s
->num_dumpable
;
1316 prepare_data_cache(&page_desc
, s
, offset_desc
);
1317 prepare_data_cache(&page_data
, s
, offset_data
);
1319 /* prepare buffer to store compressed data */
1320 len_buf_out
= get_len_buf_out(s
->dump_info
.page_size
, s
->flag_compress
);
1321 assert(len_buf_out
!= 0);
1324 wrkmem
= g_malloc(LZO1X_1_MEM_COMPRESS
);
1327 buf_out
= g_malloc(len_buf_out
);
1330 * init zero page's page_desc and page_data, because every zero page
1331 * uses the same page_data
1333 pd_zero
.size
= cpu_to_dump32(s
, s
->dump_info
.page_size
);
1334 pd_zero
.flags
= cpu_to_dump32(s
, 0);
1335 pd_zero
.offset
= cpu_to_dump64(s
, offset_data
);
1336 pd_zero
.page_flags
= cpu_to_dump64(s
, 0);
1337 buf
= g_malloc0(s
->dump_info
.page_size
);
1338 ret
= write_cache(&page_data
, buf
, s
->dump_info
.page_size
, false);
1341 error_setg(errp
, "dump: failed to write page data (zero page)");
1345 offset_data
+= s
->dump_info
.page_size
;
1348 * dump memory to vmcore page by page. zero page will all be resided in the
1349 * first page of page section
1351 while (get_next_page(&block_iter
, &pfn_iter
, &buf
, s
)) {
1352 /* check zero page */
1353 if (is_zero_page(buf
, s
->dump_info
.page_size
)) {
1354 ret
= write_cache(&page_desc
, &pd_zero
, sizeof(PageDescriptor
),
1357 error_setg(errp
, "dump: failed to write page desc");
1362 * not zero page, then:
1363 * 1. compress the page
1364 * 2. write the compressed page into the cache of page_data
1365 * 3. get page desc of the compressed page and write it into the
1366 * cache of page_desc
1368 * only one compression format will be used here, for
1369 * s->flag_compress is set. But when compression fails to work,
1370 * we fall back to save in plaintext.
1372 size_out
= len_buf_out
;
1373 if ((s
->flag_compress
& DUMP_DH_COMPRESSED_ZLIB
) &&
1374 (compress2(buf_out
, (uLongf
*)&size_out
, buf
,
1375 s
->dump_info
.page_size
, Z_BEST_SPEED
) == Z_OK
) &&
1376 (size_out
< s
->dump_info
.page_size
)) {
1377 pd
.flags
= cpu_to_dump32(s
, DUMP_DH_COMPRESSED_ZLIB
);
1378 pd
.size
= cpu_to_dump32(s
, size_out
);
1380 ret
= write_cache(&page_data
, buf_out
, size_out
, false);
1382 error_setg(errp
, "dump: failed to write page data");
1386 } else if ((s
->flag_compress
& DUMP_DH_COMPRESSED_LZO
) &&
1387 (lzo1x_1_compress(buf
, s
->dump_info
.page_size
, buf_out
,
1388 (lzo_uint
*)&size_out
, wrkmem
) == LZO_E_OK
) &&
1389 (size_out
< s
->dump_info
.page_size
)) {
1390 pd
.flags
= cpu_to_dump32(s
, DUMP_DH_COMPRESSED_LZO
);
1391 pd
.size
= cpu_to_dump32(s
, size_out
);
1393 ret
= write_cache(&page_data
, buf_out
, size_out
, false);
1395 error_setg(errp
, "dump: failed to write page data");
1399 #ifdef CONFIG_SNAPPY
1400 } else if ((s
->flag_compress
& DUMP_DH_COMPRESSED_SNAPPY
) &&
1401 (snappy_compress((char *)buf
, s
->dump_info
.page_size
,
1402 (char *)buf_out
, &size_out
) == SNAPPY_OK
) &&
1403 (size_out
< s
->dump_info
.page_size
)) {
1404 pd
.flags
= cpu_to_dump32(s
, DUMP_DH_COMPRESSED_SNAPPY
);
1405 pd
.size
= cpu_to_dump32(s
, size_out
);
1407 ret
= write_cache(&page_data
, buf_out
, size_out
, false);
1409 error_setg(errp
, "dump: failed to write page data");
1415 * fall back to save in plaintext, size_out should be
1416 * assigned the target's page size
1418 pd
.flags
= cpu_to_dump32(s
, 0);
1419 size_out
= s
->dump_info
.page_size
;
1420 pd
.size
= cpu_to_dump32(s
, size_out
);
1422 ret
= write_cache(&page_data
, buf
,
1423 s
->dump_info
.page_size
, false);
1425 error_setg(errp
, "dump: failed to write page data");
1430 /* get and write page desc here */
1431 pd
.page_flags
= cpu_to_dump64(s
, 0);
1432 pd
.offset
= cpu_to_dump64(s
, offset_data
);
1433 offset_data
+= size_out
;
1435 ret
= write_cache(&page_desc
, &pd
, sizeof(PageDescriptor
), false);
1437 error_setg(errp
, "dump: failed to write page desc");
1441 s
->written_size
+= s
->dump_info
.page_size
;
1444 ret
= write_cache(&page_desc
, NULL
, 0, true);
1446 error_setg(errp
, "dump: failed to sync cache for page_desc");
1449 ret
= write_cache(&page_data
, NULL
, 0, true);
1451 error_setg(errp
, "dump: failed to sync cache for page_data");
1456 free_data_cache(&page_desc
);
1457 free_data_cache(&page_data
);
1466 static void create_kdump_vmcore(DumpState
*s
, Error
**errp
)
1469 Error
*local_err
= NULL
;
1472 * the kdump-compressed format is:
1474 * +------------------------------------------+ 0x0
1475 * | main header (struct disk_dump_header) |
1476 * |------------------------------------------+ block 1
1477 * | sub header (struct kdump_sub_header) |
1478 * |------------------------------------------+ block 2
1479 * | 1st-dump_bitmap |
1480 * |------------------------------------------+ block 2 + X blocks
1481 * | 2nd-dump_bitmap | (aligned by block)
1482 * |------------------------------------------+ block 2 + 2 * X blocks
1483 * | page desc for pfn 0 (struct page_desc) | (aligned by block)
1484 * | page desc for pfn 1 (struct page_desc) |
1486 * |------------------------------------------| (not aligned by block)
1487 * | page data (pfn 0) |
1488 * | page data (pfn 1) |
1490 * +------------------------------------------+
1493 ret
= write_start_flat_header(s
->fd
);
1495 error_setg(errp
, "dump: failed to write start flat header");
1499 write_dump_header(s
, &local_err
);
1501 error_propagate(errp
, local_err
);
1505 write_dump_bitmap(s
, &local_err
);
1507 error_propagate(errp
, local_err
);
1511 write_dump_pages(s
, &local_err
);
1513 error_propagate(errp
, local_err
);
1517 ret
= write_end_flat_header(s
->fd
);
1519 error_setg(errp
, "dump: failed to write end flat header");
1524 static ram_addr_t
get_start_block(DumpState
*s
)
1526 GuestPhysBlock
*block
;
1528 if (!s
->has_filter
) {
1529 s
->next_block
= QTAILQ_FIRST(&s
->guest_phys_blocks
.head
);
1533 QTAILQ_FOREACH(block
, &s
->guest_phys_blocks
.head
, next
) {
1534 if (block
->target_start
>= s
->begin
+ s
->length
||
1535 block
->target_end
<= s
->begin
) {
1536 /* This block is out of the range */
1540 s
->next_block
= block
;
1541 if (s
->begin
> block
->target_start
) {
1542 s
->start
= s
->begin
- block
->target_start
;
1552 static void get_max_mapnr(DumpState
*s
)
1554 GuestPhysBlock
*last_block
;
1556 last_block
= QTAILQ_LAST(&s
->guest_phys_blocks
.head
, GuestPhysBlockHead
);
1557 s
->max_mapnr
= dump_paddr_to_pfn(s
, last_block
->target_end
);
1560 static DumpState dump_state_global
= { .status
= DUMP_STATUS_NONE
};
1562 static void dump_state_prepare(DumpState
*s
)
1564 /* zero the struct, setting status to active */
1565 *s
= (DumpState
) { .status
= DUMP_STATUS_ACTIVE
};
1568 bool dump_in_progress(void)
1570 DumpState
*state
= &dump_state_global
;
1571 return (atomic_read(&state
->status
) == DUMP_STATUS_ACTIVE
);
1574 /* calculate total size of memory to be dumped (taking filter into
1576 static int64_t dump_calculate_size(DumpState
*s
)
1578 GuestPhysBlock
*block
;
1579 int64_t size
= 0, total
= 0, left
= 0, right
= 0;
1581 QTAILQ_FOREACH(block
, &s
->guest_phys_blocks
.head
, next
) {
1582 if (s
->has_filter
) {
1583 /* calculate the overlapped region. */
1584 left
= MAX(s
->begin
, block
->target_start
);
1585 right
= MIN(s
->begin
+ s
->length
, block
->target_end
);
1586 size
= right
- left
;
1587 size
= size
> 0 ? size
: 0;
1589 /* count the whole region in */
1590 size
= (block
->target_end
- block
->target_start
);
1598 static void vmcoreinfo_update_phys_base(DumpState
*s
)
1600 uint64_t size
, note_head_size
, name_size
, phys_base
;
1605 if (!note_name_equal(s
, s
->guest_note
, "VMCOREINFO")) {
1609 get_note_sizes(s
, s
->guest_note
, ¬e_head_size
, &name_size
, &size
);
1610 note_head_size
= ROUND_UP(note_head_size
, 4);
1612 vmci
= s
->guest_note
+ note_head_size
+ ROUND_UP(name_size
, 4);
1613 *(vmci
+ size
) = '\0';
1615 lines
= g_strsplit((char *)vmci
, "\n", -1);
1616 for (i
= 0; lines
[i
]; i
++) {
1617 const char *prefix
= NULL
;
1619 if (s
->dump_info
.d_machine
== EM_X86_64
) {
1620 prefix
= "NUMBER(phys_base)=";
1621 } else if (s
->dump_info
.d_machine
== EM_AARCH64
) {
1622 prefix
= "NUMBER(PHYS_OFFSET)=";
1625 if (prefix
&& g_str_has_prefix(lines
[i
], prefix
)) {
1626 if (qemu_strtou64(lines
[i
] + strlen(prefix
), NULL
, 16,
1628 warn_report("Failed to read %s", prefix
);
1630 s
->dump_info
.phys_base
= phys_base
;
1639 static void dump_init(DumpState
*s
, int fd
, bool has_format
,
1640 DumpGuestMemoryFormat format
, bool paging
, bool has_filter
,
1641 int64_t begin
, int64_t length
, Error
**errp
)
1643 VMCoreInfoState
*vmci
= vmcoreinfo_find();
1649 s
->has_format
= has_format
;
1651 s
->written_size
= 0;
1653 /* kdump-compressed is conflict with paging and filter */
1654 if (has_format
&& format
!= DUMP_GUEST_MEMORY_FORMAT_ELF
) {
1655 assert(!paging
&& !has_filter
);
1658 if (runstate_is_running()) {
1659 vm_stop(RUN_STATE_SAVE_VM
);
1665 /* If we use KVM, we should synchronize the registers before we get dump
1666 * info or physmap info.
1668 cpu_synchronize_all_states();
1675 s
->has_filter
= has_filter
;
1679 memory_mapping_list_init(&s
->list
);
1681 guest_phys_blocks_init(&s
->guest_phys_blocks
);
1682 guest_phys_blocks_append(&s
->guest_phys_blocks
);
1683 s
->total_size
= dump_calculate_size(s
);
1684 #ifdef DEBUG_DUMP_GUEST_MEMORY
1685 fprintf(stderr
, "DUMP: total memory to dump: %lu\n", s
->total_size
);
1688 /* it does not make sense to dump non-existent memory */
1689 if (!s
->total_size
) {
1690 error_setg(errp
, "dump: no guest memory to dump");
1694 s
->start
= get_start_block(s
);
1695 if (s
->start
== -1) {
1696 error_setg(errp
, QERR_INVALID_PARAMETER
, "begin");
1700 /* get dump info: endian, class and architecture.
1701 * If the target architecture is not supported, cpu_get_dump_info() will
1704 ret
= cpu_get_dump_info(&s
->dump_info
, &s
->guest_phys_blocks
);
1706 error_setg(errp
, QERR_UNSUPPORTED
);
1710 if (!s
->dump_info
.page_size
) {
1711 s
->dump_info
.page_size
= TARGET_PAGE_SIZE
;
1714 s
->note_size
= cpu_get_note_size(s
->dump_info
.d_class
,
1715 s
->dump_info
.d_machine
, nr_cpus
);
1716 if (s
->note_size
< 0) {
1717 error_setg(errp
, QERR_UNSUPPORTED
);
1722 * The goal of this block is to (a) update the previously guessed
1723 * phys_base, (b) copy the guest note out of the guest.
1724 * Failure to do so is not fatal for dumping.
1727 uint64_t addr
, note_head_size
, name_size
, desc_size
;
1731 note_head_size
= s
->dump_info
.d_class
== ELFCLASS32
?
1732 sizeof(Elf32_Nhdr
) : sizeof(Elf64_Nhdr
);
1734 format
= le16_to_cpu(vmci
->vmcoreinfo
.guest_format
);
1735 size
= le32_to_cpu(vmci
->vmcoreinfo
.size
);
1736 addr
= le64_to_cpu(vmci
->vmcoreinfo
.paddr
);
1737 if (!vmci
->has_vmcoreinfo
) {
1738 warn_report("guest note is not present");
1739 } else if (size
< note_head_size
|| size
> MAX_GUEST_NOTE_SIZE
) {
1740 warn_report("guest note size is invalid: %" PRIu32
, size
);
1741 } else if (format
!= VMCOREINFO_FORMAT_ELF
) {
1742 warn_report("guest note format is unsupported: %" PRIu16
, format
);
1744 s
->guest_note
= g_malloc(size
+ 1); /* +1 for adding \0 */
1745 cpu_physical_memory_read(addr
, s
->guest_note
, size
);
1747 get_note_sizes(s
, s
->guest_note
, NULL
, &name_size
, &desc_size
);
1748 s
->guest_note_size
= ELF_NOTE_SIZE(note_head_size
, name_size
,
1750 if (name_size
> MAX_GUEST_NOTE_SIZE
||
1751 desc_size
> MAX_GUEST_NOTE_SIZE
||
1752 s
->guest_note_size
> size
) {
1753 warn_report("Invalid guest note header");
1754 g_free(s
->guest_note
);
1755 s
->guest_note
= NULL
;
1757 vmcoreinfo_update_phys_base(s
);
1758 s
->note_size
+= s
->guest_note_size
;
1763 /* get memory mapping */
1765 qemu_get_guest_memory_mapping(&s
->list
, &s
->guest_phys_blocks
, &err
);
1767 error_propagate(errp
, err
);
1771 qemu_get_guest_simple_memory_mapping(&s
->list
, &s
->guest_phys_blocks
);
1774 s
->nr_cpus
= nr_cpus
;
1779 tmp
= DIV_ROUND_UP(DIV_ROUND_UP(s
->max_mapnr
, CHAR_BIT
),
1780 s
->dump_info
.page_size
);
1781 s
->len_dump_bitmap
= tmp
* s
->dump_info
.page_size
;
1783 /* init for kdump-compressed format */
1784 if (has_format
&& format
!= DUMP_GUEST_MEMORY_FORMAT_ELF
) {
1786 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB
:
1787 s
->flag_compress
= DUMP_DH_COMPRESSED_ZLIB
;
1790 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO
:
1792 if (lzo_init() != LZO_E_OK
) {
1793 error_setg(errp
, "failed to initialize the LZO library");
1797 s
->flag_compress
= DUMP_DH_COMPRESSED_LZO
;
1800 case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY
:
1801 s
->flag_compress
= DUMP_DH_COMPRESSED_SNAPPY
;
1805 s
->flag_compress
= 0;
1811 if (s
->has_filter
) {
1812 memory_mapping_filter(&s
->list
, s
->begin
, s
->length
);
1816 * calculate phdr_num
1818 * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
1820 s
->phdr_num
= 1; /* PT_NOTE */
1821 if (s
->list
.num
< UINT16_MAX
- 2) {
1822 s
->phdr_num
+= s
->list
.num
;
1823 s
->have_section
= false;
1825 s
->have_section
= true;
1826 s
->phdr_num
= PN_XNUM
;
1827 s
->sh_info
= 1; /* PT_NOTE */
1829 /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
1830 if (s
->list
.num
<= UINT32_MAX
- 1) {
1831 s
->sh_info
+= s
->list
.num
;
1833 s
->sh_info
= UINT32_MAX
;
1837 if (s
->dump_info
.d_class
== ELFCLASS64
) {
1838 if (s
->have_section
) {
1839 s
->memory_offset
= sizeof(Elf64_Ehdr
) +
1840 sizeof(Elf64_Phdr
) * s
->sh_info
+
1841 sizeof(Elf64_Shdr
) + s
->note_size
;
1843 s
->memory_offset
= sizeof(Elf64_Ehdr
) +
1844 sizeof(Elf64_Phdr
) * s
->phdr_num
+ s
->note_size
;
1847 if (s
->have_section
) {
1848 s
->memory_offset
= sizeof(Elf32_Ehdr
) +
1849 sizeof(Elf32_Phdr
) * s
->sh_info
+
1850 sizeof(Elf32_Shdr
) + s
->note_size
;
1852 s
->memory_offset
= sizeof(Elf32_Ehdr
) +
1853 sizeof(Elf32_Phdr
) * s
->phdr_num
+ s
->note_size
;
1863 /* this operation might be time consuming. */
1864 static void dump_process(DumpState
*s
, Error
**errp
)
1866 Error
*local_err
= NULL
;
1867 DumpQueryResult
*result
= NULL
;
1869 if (s
->has_format
&& s
->format
!= DUMP_GUEST_MEMORY_FORMAT_ELF
) {
1870 create_kdump_vmcore(s
, &local_err
);
1872 create_vmcore(s
, &local_err
);
1875 /* make sure status is written after written_size updates */
1877 atomic_set(&s
->status
,
1878 (local_err
? DUMP_STATUS_FAILED
: DUMP_STATUS_COMPLETED
));
1880 /* send DUMP_COMPLETED message (unconditionally) */
1881 result
= qmp_query_dump(NULL
);
1882 /* should never fail */
1884 qapi_event_send_dump_completed(result
, !!local_err
, (local_err
? \
1885 error_get_pretty(local_err
) : NULL
),
1887 qapi_free_DumpQueryResult(result
);
1889 error_propagate(errp
, local_err
);
1893 static void *dump_thread(void *data
)
1895 DumpState
*s
= (DumpState
*)data
;
1896 dump_process(s
, NULL
);
1900 DumpQueryResult
*qmp_query_dump(Error
**errp
)
1902 DumpQueryResult
*result
= g_new(DumpQueryResult
, 1);
1903 DumpState
*state
= &dump_state_global
;
1904 result
->status
= atomic_read(&state
->status
);
1905 /* make sure we are reading status and written_size in order */
1907 result
->completed
= state
->written_size
;
1908 result
->total
= state
->total_size
;
1912 void qmp_dump_guest_memory(bool paging
, const char *file
,
1913 bool has_detach
, bool detach
,
1914 bool has_begin
, int64_t begin
, bool has_length
,
1915 int64_t length
, bool has_format
,
1916 DumpGuestMemoryFormat format
, Error
**errp
)
1921 Error
*local_err
= NULL
;
1922 bool detach_p
= false;
1924 if (runstate_check(RUN_STATE_INMIGRATE
)) {
1925 error_setg(errp
, "Dump not allowed during incoming migration.");
1929 /* if there is a dump in background, we should wait until the dump
1931 if (dump_in_progress()) {
1932 error_setg(errp
, "There is a dump in process, please wait.");
1937 * kdump-compressed format need the whole memory dumped, so paging or
1938 * filter is not supported here.
1940 if ((has_format
&& format
!= DUMP_GUEST_MEMORY_FORMAT_ELF
) &&
1941 (paging
|| has_begin
|| has_length
)) {
1942 error_setg(errp
, "kdump-compressed format doesn't support paging or "
1946 if (has_begin
&& !has_length
) {
1947 error_setg(errp
, QERR_MISSING_PARAMETER
, "length");
1950 if (!has_begin
&& has_length
) {
1951 error_setg(errp
, QERR_MISSING_PARAMETER
, "begin");
1958 /* check whether lzo/snappy is supported */
1960 if (has_format
&& format
== DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO
) {
1961 error_setg(errp
, "kdump-lzo is not available now");
1966 #ifndef CONFIG_SNAPPY
1967 if (has_format
&& format
== DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY
) {
1968 error_setg(errp
, "kdump-snappy is not available now");
1974 if (strstart(file
, "fd:", &p
)) {
1975 fd
= monitor_get_fd(cur_mon
, p
, errp
);
1982 if (strstart(file
, "file:", &p
)) {
1983 fd
= qemu_open(p
, O_WRONLY
| O_CREAT
| O_TRUNC
| O_BINARY
, S_IRUSR
);
1985 error_setg_file_open(errp
, errno
, p
);
1991 error_setg(errp
, QERR_INVALID_PARAMETER
, "protocol");
1995 s
= &dump_state_global
;
1996 dump_state_prepare(s
);
1998 dump_init(s
, fd
, has_format
, format
, paging
, has_begin
,
1999 begin
, length
, &local_err
);
2001 error_propagate(errp
, local_err
);
2002 atomic_set(&s
->status
, DUMP_STATUS_FAILED
);
2009 qemu_thread_create(&s
->dump_thread
, "dump_thread", dump_thread
,
2010 s
, QEMU_THREAD_DETACHED
);
2013 dump_process(s
, errp
);
2017 DumpGuestMemoryCapability
*qmp_query_dump_guest_memory_capability(Error
**errp
)
2019 DumpGuestMemoryFormatList
*item
;
2020 DumpGuestMemoryCapability
*cap
=
2021 g_malloc0(sizeof(DumpGuestMemoryCapability
));
2023 /* elf is always available */
2024 item
= g_malloc0(sizeof(DumpGuestMemoryFormatList
));
2025 cap
->formats
= item
;
2026 item
->value
= DUMP_GUEST_MEMORY_FORMAT_ELF
;
2028 /* kdump-zlib is always available */
2029 item
->next
= g_malloc0(sizeof(DumpGuestMemoryFormatList
));
2031 item
->value
= DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB
;
2033 /* add new item if kdump-lzo is available */
2035 item
->next
= g_malloc0(sizeof(DumpGuestMemoryFormatList
));
2037 item
->value
= DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO
;
2040 /* add new item if kdump-snappy is available */
2041 #ifdef CONFIG_SNAPPY
2042 item
->next
= g_malloc0(sizeof(DumpGuestMemoryFormatList
));
2044 item
->value
= DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY
;