4 * Copyright Fujitsu, Corp. 2011, 2012
7 * Wen Congyang <wency@cn.fujitsu.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 #include "qemu-common.h"
17 #include "exec/cpu-all.h"
18 #include "exec/hwaddr.h"
19 #include "monitor/monitor.h"
20 #include "sysemu/kvm.h"
21 #include "sysemu/dump.h"
22 #include "sysemu/sysemu.h"
23 #include "sysemu/memory_mapping.h"
24 #include "sysemu/cpus.h"
25 #include "qapi/error.h"
26 #include "qmp-commands.h"
28 static uint16_t cpu_convert_to_target16(uint16_t val
, int endian
)
30 if (endian
== ELFDATA2LSB
) {
31 val
= cpu_to_le16(val
);
33 val
= cpu_to_be16(val
);
39 static uint32_t cpu_convert_to_target32(uint32_t val
, int endian
)
41 if (endian
== ELFDATA2LSB
) {
42 val
= cpu_to_le32(val
);
44 val
= cpu_to_be32(val
);
50 static uint64_t cpu_convert_to_target64(uint64_t val
, int endian
)
52 if (endian
== ELFDATA2LSB
) {
53 val
= cpu_to_le64(val
);
55 val
= cpu_to_be64(val
);
61 typedef struct DumpState
{
62 ArchDumpInfo dump_info
;
63 MemoryMappingList list
;
80 static int dump_cleanup(DumpState
*s
)
84 memory_mapping_list_free(&s
->list
);
95 static void dump_error(DumpState
*s
, const char *reason
)
100 static int fd_write_vmcore(void *buf
, size_t size
, void *opaque
)
102 DumpState
*s
= opaque
;
105 written_size
= qemu_write_full(s
->fd
, buf
, size
);
106 if (written_size
!= size
) {
113 static int write_elf64_header(DumpState
*s
)
115 Elf64_Ehdr elf_header
;
117 int endian
= s
->dump_info
.d_endian
;
119 memset(&elf_header
, 0, sizeof(Elf64_Ehdr
));
120 memcpy(&elf_header
, ELFMAG
, SELFMAG
);
121 elf_header
.e_ident
[EI_CLASS
] = ELFCLASS64
;
122 elf_header
.e_ident
[EI_DATA
] = s
->dump_info
.d_endian
;
123 elf_header
.e_ident
[EI_VERSION
] = EV_CURRENT
;
124 elf_header
.e_type
= cpu_convert_to_target16(ET_CORE
, endian
);
125 elf_header
.e_machine
= cpu_convert_to_target16(s
->dump_info
.d_machine
,
127 elf_header
.e_version
= cpu_convert_to_target32(EV_CURRENT
, endian
);
128 elf_header
.e_ehsize
= cpu_convert_to_target16(sizeof(elf_header
), endian
);
129 elf_header
.e_phoff
= cpu_convert_to_target64(sizeof(Elf64_Ehdr
), endian
);
130 elf_header
.e_phentsize
= cpu_convert_to_target16(sizeof(Elf64_Phdr
),
132 elf_header
.e_phnum
= cpu_convert_to_target16(s
->phdr_num
, endian
);
133 if (s
->have_section
) {
134 uint64_t shoff
= sizeof(Elf64_Ehdr
) + sizeof(Elf64_Phdr
) * s
->sh_info
;
136 elf_header
.e_shoff
= cpu_convert_to_target64(shoff
, endian
);
137 elf_header
.e_shentsize
= cpu_convert_to_target16(sizeof(Elf64_Shdr
),
139 elf_header
.e_shnum
= cpu_convert_to_target16(1, endian
);
142 ret
= fd_write_vmcore(&elf_header
, sizeof(elf_header
), s
);
144 dump_error(s
, "dump: failed to write elf header.\n");
151 static int write_elf32_header(DumpState
*s
)
153 Elf32_Ehdr elf_header
;
155 int endian
= s
->dump_info
.d_endian
;
157 memset(&elf_header
, 0, sizeof(Elf32_Ehdr
));
158 memcpy(&elf_header
, ELFMAG
, SELFMAG
);
159 elf_header
.e_ident
[EI_CLASS
] = ELFCLASS32
;
160 elf_header
.e_ident
[EI_DATA
] = endian
;
161 elf_header
.e_ident
[EI_VERSION
] = EV_CURRENT
;
162 elf_header
.e_type
= cpu_convert_to_target16(ET_CORE
, endian
);
163 elf_header
.e_machine
= cpu_convert_to_target16(s
->dump_info
.d_machine
,
165 elf_header
.e_version
= cpu_convert_to_target32(EV_CURRENT
, endian
);
166 elf_header
.e_ehsize
= cpu_convert_to_target16(sizeof(elf_header
), endian
);
167 elf_header
.e_phoff
= cpu_convert_to_target32(sizeof(Elf32_Ehdr
), endian
);
168 elf_header
.e_phentsize
= cpu_convert_to_target16(sizeof(Elf32_Phdr
),
170 elf_header
.e_phnum
= cpu_convert_to_target16(s
->phdr_num
, endian
);
171 if (s
->have_section
) {
172 uint32_t shoff
= sizeof(Elf32_Ehdr
) + sizeof(Elf32_Phdr
) * s
->sh_info
;
174 elf_header
.e_shoff
= cpu_convert_to_target32(shoff
, endian
);
175 elf_header
.e_shentsize
= cpu_convert_to_target16(sizeof(Elf32_Shdr
),
177 elf_header
.e_shnum
= cpu_convert_to_target16(1, endian
);
180 ret
= fd_write_vmcore(&elf_header
, sizeof(elf_header
), s
);
182 dump_error(s
, "dump: failed to write elf header.\n");
189 static int write_elf64_load(DumpState
*s
, MemoryMapping
*memory_mapping
,
190 int phdr_index
, hwaddr offset
)
194 int endian
= s
->dump_info
.d_endian
;
196 memset(&phdr
, 0, sizeof(Elf64_Phdr
));
197 phdr
.p_type
= cpu_convert_to_target32(PT_LOAD
, endian
);
198 phdr
.p_offset
= cpu_convert_to_target64(offset
, endian
);
199 phdr
.p_paddr
= cpu_convert_to_target64(memory_mapping
->phys_addr
, endian
);
201 /* When the memory is not stored into vmcore, offset will be -1 */
204 phdr
.p_filesz
= cpu_convert_to_target64(memory_mapping
->length
, endian
);
206 phdr
.p_memsz
= cpu_convert_to_target64(memory_mapping
->length
, endian
);
207 phdr
.p_vaddr
= cpu_convert_to_target64(memory_mapping
->virt_addr
, endian
);
209 ret
= fd_write_vmcore(&phdr
, sizeof(Elf64_Phdr
), s
);
211 dump_error(s
, "dump: failed to write program header table.\n");
218 static int write_elf32_load(DumpState
*s
, MemoryMapping
*memory_mapping
,
219 int phdr_index
, hwaddr offset
)
223 int endian
= s
->dump_info
.d_endian
;
225 memset(&phdr
, 0, sizeof(Elf32_Phdr
));
226 phdr
.p_type
= cpu_convert_to_target32(PT_LOAD
, endian
);
227 phdr
.p_offset
= cpu_convert_to_target32(offset
, endian
);
228 phdr
.p_paddr
= cpu_convert_to_target32(memory_mapping
->phys_addr
, endian
);
230 /* When the memory is not stored into vmcore, offset will be -1 */
233 phdr
.p_filesz
= cpu_convert_to_target32(memory_mapping
->length
, endian
);
235 phdr
.p_memsz
= cpu_convert_to_target32(memory_mapping
->length
, endian
);
236 phdr
.p_vaddr
= cpu_convert_to_target32(memory_mapping
->virt_addr
, endian
);
238 ret
= fd_write_vmcore(&phdr
, sizeof(Elf32_Phdr
), s
);
240 dump_error(s
, "dump: failed to write program header table.\n");
247 static int write_elf64_note(DumpState
*s
)
250 int endian
= s
->dump_info
.d_endian
;
251 hwaddr begin
= s
->memory_offset
- s
->note_size
;
254 memset(&phdr
, 0, sizeof(Elf64_Phdr
));
255 phdr
.p_type
= cpu_convert_to_target32(PT_NOTE
, endian
);
256 phdr
.p_offset
= cpu_convert_to_target64(begin
, endian
);
258 phdr
.p_filesz
= cpu_convert_to_target64(s
->note_size
, endian
);
259 phdr
.p_memsz
= cpu_convert_to_target64(s
->note_size
, endian
);
262 ret
= fd_write_vmcore(&phdr
, sizeof(Elf64_Phdr
), s
);
264 dump_error(s
, "dump: failed to write program header table.\n");
271 static inline int cpu_index(CPUState
*cpu
)
273 return cpu
->cpu_index
+ 1;
276 static int write_elf64_notes(DumpState
*s
)
282 for (cpu
= first_cpu
; cpu
!= NULL
; cpu
= cpu
->next_cpu
) {
284 ret
= cpu_write_elf64_note(fd_write_vmcore
, cpu
, id
, s
);
286 dump_error(s
, "dump: failed to write elf notes.\n");
291 for (cpu
= first_cpu
; cpu
!= NULL
; cpu
= cpu
->next_cpu
) {
292 ret
= cpu_write_elf64_qemunote(fd_write_vmcore
, cpu
, s
);
294 dump_error(s
, "dump: failed to write CPU status.\n");
302 static int write_elf32_note(DumpState
*s
)
304 hwaddr begin
= s
->memory_offset
- s
->note_size
;
306 int endian
= s
->dump_info
.d_endian
;
309 memset(&phdr
, 0, sizeof(Elf32_Phdr
));
310 phdr
.p_type
= cpu_convert_to_target32(PT_NOTE
, endian
);
311 phdr
.p_offset
= cpu_convert_to_target32(begin
, endian
);
313 phdr
.p_filesz
= cpu_convert_to_target32(s
->note_size
, endian
);
314 phdr
.p_memsz
= cpu_convert_to_target32(s
->note_size
, endian
);
317 ret
= fd_write_vmcore(&phdr
, sizeof(Elf32_Phdr
), s
);
319 dump_error(s
, "dump: failed to write program header table.\n");
326 static int write_elf32_notes(DumpState
*s
)
332 for (cpu
= first_cpu
; cpu
!= NULL
; cpu
= cpu
->next_cpu
) {
334 ret
= cpu_write_elf32_note(fd_write_vmcore
, cpu
, id
, s
);
336 dump_error(s
, "dump: failed to write elf notes.\n");
341 for (cpu
= first_cpu
; cpu
!= NULL
; cpu
= cpu
->next_cpu
) {
342 ret
= cpu_write_elf32_qemunote(fd_write_vmcore
, cpu
, s
);
344 dump_error(s
, "dump: failed to write CPU status.\n");
352 static int write_elf_section(DumpState
*s
, int type
)
356 int endian
= s
->dump_info
.d_endian
;
362 shdr_size
= sizeof(Elf32_Shdr
);
363 memset(&shdr32
, 0, shdr_size
);
364 shdr32
.sh_info
= cpu_convert_to_target32(s
->sh_info
, endian
);
367 shdr_size
= sizeof(Elf64_Shdr
);
368 memset(&shdr64
, 0, shdr_size
);
369 shdr64
.sh_info
= cpu_convert_to_target32(s
->sh_info
, endian
);
373 ret
= fd_write_vmcore(&shdr
, shdr_size
, s
);
375 dump_error(s
, "dump: failed to write section header table.\n");
382 static int write_data(DumpState
*s
, void *buf
, int length
)
386 ret
= fd_write_vmcore(buf
, length
, s
);
388 dump_error(s
, "dump: failed to save memory.\n");
395 /* write the memroy to vmcore. 1 page per I/O. */
396 static int write_memory(DumpState
*s
, RAMBlock
*block
, ram_addr_t start
,
402 for (i
= 0; i
< size
/ TARGET_PAGE_SIZE
; i
++) {
403 ret
= write_data(s
, block
->host
+ start
+ i
* TARGET_PAGE_SIZE
,
410 if ((size
% TARGET_PAGE_SIZE
) != 0) {
411 ret
= write_data(s
, block
->host
+ start
+ i
* TARGET_PAGE_SIZE
,
412 size
% TARGET_PAGE_SIZE
);
421 /* get the memory's offset in the vmcore */
422 static hwaddr
get_offset(hwaddr phys_addr
,
426 hwaddr offset
= s
->memory_offset
;
427 int64_t size_in_block
, start
;
430 if (phys_addr
< s
->begin
|| phys_addr
>= s
->begin
+ s
->length
) {
435 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
437 if (block
->offset
>= s
->begin
+ s
->length
||
438 block
->offset
+ block
->length
<= s
->begin
) {
439 /* This block is out of the range */
443 if (s
->begin
<= block
->offset
) {
444 start
= block
->offset
;
449 size_in_block
= block
->length
- (start
- block
->offset
);
450 if (s
->begin
+ s
->length
< block
->offset
+ block
->length
) {
451 size_in_block
-= block
->offset
+ block
->length
-
452 (s
->begin
+ s
->length
);
455 start
= block
->offset
;
456 size_in_block
= block
->length
;
459 if (phys_addr
>= start
&& phys_addr
< start
+ size_in_block
) {
460 return phys_addr
- start
+ offset
;
463 offset
+= size_in_block
;
469 static int write_elf_loads(DumpState
*s
)
472 MemoryMapping
*memory_mapping
;
473 uint32_t phdr_index
= 1;
477 if (s
->have_section
) {
478 max_index
= s
->sh_info
;
480 max_index
= s
->phdr_num
;
483 QTAILQ_FOREACH(memory_mapping
, &s
->list
.head
, next
) {
484 offset
= get_offset(memory_mapping
->phys_addr
, s
);
485 if (s
->dump_info
.d_class
== ELFCLASS64
) {
486 ret
= write_elf64_load(s
, memory_mapping
, phdr_index
++, offset
);
488 ret
= write_elf32_load(s
, memory_mapping
, phdr_index
++, offset
);
495 if (phdr_index
>= max_index
) {
503 /* write elf header, PT_NOTE and elf note to vmcore. */
504 static int dump_begin(DumpState
*s
)
509 * the vmcore's format is:
528 * we only know where the memory is saved after we write elf note into
532 /* write elf header to vmcore */
533 if (s
->dump_info
.d_class
== ELFCLASS64
) {
534 ret
= write_elf64_header(s
);
536 ret
= write_elf32_header(s
);
542 if (s
->dump_info
.d_class
== ELFCLASS64
) {
543 /* write PT_NOTE to vmcore */
544 if (write_elf64_note(s
) < 0) {
548 /* write all PT_LOAD to vmcore */
549 if (write_elf_loads(s
) < 0) {
553 /* write section to vmcore */
554 if (s
->have_section
) {
555 if (write_elf_section(s
, 1) < 0) {
560 /* write notes to vmcore */
561 if (write_elf64_notes(s
) < 0) {
566 /* write PT_NOTE to vmcore */
567 if (write_elf32_note(s
) < 0) {
571 /* write all PT_LOAD to vmcore */
572 if (write_elf_loads(s
) < 0) {
576 /* write section to vmcore */
577 if (s
->have_section
) {
578 if (write_elf_section(s
, 0) < 0) {
583 /* write notes to vmcore */
584 if (write_elf32_notes(s
) < 0) {
592 /* write PT_LOAD to vmcore */
593 static int dump_completed(DumpState
*s
)
599 static int get_next_block(DumpState
*s
, RAMBlock
*block
)
602 block
= QTAILQ_NEXT(block
, next
);
611 if (block
->offset
>= s
->begin
+ s
->length
||
612 block
->offset
+ block
->length
<= s
->begin
) {
613 /* This block is out of the range */
617 if (s
->begin
> block
->offset
) {
618 s
->start
= s
->begin
- block
->offset
;
626 /* write all memory to vmcore */
627 static int dump_iterate(DumpState
*s
)
636 size
= block
->length
;
639 if (s
->begin
+ s
->length
< block
->offset
+ block
->length
) {
640 size
-= block
->offset
+ block
->length
- (s
->begin
+ s
->length
);
643 ret
= write_memory(s
, block
, s
->start
, size
);
648 ret
= get_next_block(s
, block
);
656 static int create_vmcore(DumpState
*s
)
665 ret
= dump_iterate(s
);
673 static ram_addr_t
get_start_block(DumpState
*s
)
677 if (!s
->has_filter
) {
678 s
->block
= QTAILQ_FIRST(&ram_list
.blocks
);
682 QTAILQ_FOREACH(block
, &ram_list
.blocks
, next
) {
683 if (block
->offset
>= s
->begin
+ s
->length
||
684 block
->offset
+ block
->length
<= s
->begin
) {
685 /* This block is out of the range */
690 if (s
->begin
> block
->offset
) {
691 s
->start
= s
->begin
- block
->offset
;
701 static int dump_init(DumpState
*s
, int fd
, bool paging
, bool has_filter
,
702 int64_t begin
, int64_t length
, Error
**errp
)
709 if (runstate_is_running()) {
710 vm_stop(RUN_STATE_SAVE_VM
);
718 s
->has_filter
= has_filter
;
721 s
->start
= get_start_block(s
);
722 if (s
->start
== -1) {
723 error_set(errp
, QERR_INVALID_PARAMETER
, "begin");
728 * get dump info: endian, class and architecture.
729 * If the target architecture is not supported, cpu_get_dump_info() will
732 * If we use KVM, we should synchronize the registers before we get dump
735 cpu_synchronize_all_states();
737 for (cpu
= first_cpu
; cpu
!= NULL
; cpu
= cpu
->next_cpu
) {
741 ret
= cpu_get_dump_info(&s
->dump_info
);
743 error_set(errp
, QERR_UNSUPPORTED
);
747 s
->note_size
= cpu_get_note_size(s
->dump_info
.d_class
,
748 s
->dump_info
.d_machine
, nr_cpus
);
750 error_set(errp
, QERR_UNSUPPORTED
);
754 /* get memory mapping */
755 memory_mapping_list_init(&s
->list
);
757 qemu_get_guest_memory_mapping(&s
->list
, &err
);
759 error_propagate(errp
, err
);
763 qemu_get_guest_simple_memory_mapping(&s
->list
);
767 memory_mapping_filter(&s
->list
, s
->begin
, s
->length
);
773 * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
775 s
->phdr_num
= 1; /* PT_NOTE */
776 if (s
->list
.num
< UINT16_MAX
- 2) {
777 s
->phdr_num
+= s
->list
.num
;
778 s
->have_section
= false;
780 s
->have_section
= true;
781 s
->phdr_num
= PN_XNUM
;
782 s
->sh_info
= 1; /* PT_NOTE */
784 /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
785 if (s
->list
.num
<= UINT32_MAX
- 1) {
786 s
->sh_info
+= s
->list
.num
;
788 s
->sh_info
= UINT32_MAX
;
792 if (s
->dump_info
.d_class
== ELFCLASS64
) {
793 if (s
->have_section
) {
794 s
->memory_offset
= sizeof(Elf64_Ehdr
) +
795 sizeof(Elf64_Phdr
) * s
->sh_info
+
796 sizeof(Elf64_Shdr
) + s
->note_size
;
798 s
->memory_offset
= sizeof(Elf64_Ehdr
) +
799 sizeof(Elf64_Phdr
) * s
->phdr_num
+ s
->note_size
;
802 if (s
->have_section
) {
803 s
->memory_offset
= sizeof(Elf32_Ehdr
) +
804 sizeof(Elf32_Phdr
) * s
->sh_info
+
805 sizeof(Elf32_Shdr
) + s
->note_size
;
807 s
->memory_offset
= sizeof(Elf32_Ehdr
) +
808 sizeof(Elf32_Phdr
) * s
->phdr_num
+ s
->note_size
;
822 void qmp_dump_guest_memory(bool paging
, const char *file
, bool has_begin
,
823 int64_t begin
, bool has_length
, int64_t length
,
831 if (has_begin
&& !has_length
) {
832 error_set(errp
, QERR_MISSING_PARAMETER
, "length");
835 if (!has_begin
&& has_length
) {
836 error_set(errp
, QERR_MISSING_PARAMETER
, "begin");
841 if (strstart(file
, "fd:", &p
)) {
842 fd
= monitor_get_fd(cur_mon
, p
, errp
);
849 if (strstart(file
, "file:", &p
)) {
850 fd
= qemu_open(p
, O_WRONLY
| O_CREAT
| O_TRUNC
| O_BINARY
, S_IRUSR
);
852 error_setg_file_open(errp
, errno
, p
);
858 error_set(errp
, QERR_INVALID_PARAMETER
, "protocol");
862 s
= g_malloc(sizeof(DumpState
));
864 ret
= dump_init(s
, fd
, paging
, has_begin
, begin
, length
, errp
);
870 if (create_vmcore(s
) < 0 && !error_is_set(s
->errp
)) {
871 error_set(errp
, QERR_IO_ERROR
);