3 #include "kvm/interrupt.h"
4 #include "kvm/cpufeature.h"
9 #include <asm/bootparam.h>
11 #include <sys/ioctl.h>
25 * Compatibility code. Remove this when we move to tools/kvm.
27 #ifndef KVM_EXIT_INTERNAL_ERROR
28 # define KVM_EXIT_INTERNAL_ERROR 17
31 #define DEFINE_KVM_EXIT_REASON(reason) [reason] = #reason
33 const char *kvm_exit_reasons
[] = {
34 DEFINE_KVM_EXIT_REASON(KVM_EXIT_UNKNOWN
),
35 DEFINE_KVM_EXIT_REASON(KVM_EXIT_EXCEPTION
),
36 DEFINE_KVM_EXIT_REASON(KVM_EXIT_IO
),
37 DEFINE_KVM_EXIT_REASON(KVM_EXIT_HYPERCALL
),
38 DEFINE_KVM_EXIT_REASON(KVM_EXIT_DEBUG
),
39 DEFINE_KVM_EXIT_REASON(KVM_EXIT_HLT
),
40 DEFINE_KVM_EXIT_REASON(KVM_EXIT_MMIO
),
41 DEFINE_KVM_EXIT_REASON(KVM_EXIT_IRQ_WINDOW_OPEN
),
42 DEFINE_KVM_EXIT_REASON(KVM_EXIT_SHUTDOWN
),
43 DEFINE_KVM_EXIT_REASON(KVM_EXIT_FAIL_ENTRY
),
44 DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTR
),
45 DEFINE_KVM_EXIT_REASON(KVM_EXIT_SET_TPR
),
46 DEFINE_KVM_EXIT_REASON(KVM_EXIT_TPR_ACCESS
),
47 DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_SIEIC
),
48 DEFINE_KVM_EXIT_REASON(KVM_EXIT_S390_RESET
),
49 DEFINE_KVM_EXIT_REASON(KVM_EXIT_DCR
),
50 DEFINE_KVM_EXIT_REASON(KVM_EXIT_NMI
),
51 DEFINE_KVM_EXIT_REASON(KVM_EXIT_INTERNAL_ERROR
),
54 #define DEFINE_KVM_EXT(ext) \
62 { DEFINE_KVM_EXT(KVM_CAP_COALESCED_MMIO
) },
63 { DEFINE_KVM_EXT(KVM_CAP_SET_TSS_ADDR
) },
64 { DEFINE_KVM_EXT(KVM_CAP_PIT2
) },
65 { DEFINE_KVM_EXT(KVM_CAP_USER_MEMORY
) },
66 { DEFINE_KVM_EXT(KVM_CAP_IRQ_ROUTING
) },
67 { DEFINE_KVM_EXT(KVM_CAP_IRQCHIP
) },
68 { DEFINE_KVM_EXT(KVM_CAP_HLT
) },
69 { DEFINE_KVM_EXT(KVM_CAP_IRQ_INJECT_STATUS
) },
70 { DEFINE_KVM_EXT(KVM_CAP_EXT_CPUID
) },
73 static inline bool host_ptr_in_ram(struct kvm
*self
, void *p
)
75 return self
->ram_start
<= p
&& p
< (self
->ram_start
+ self
->ram_size
);
78 static inline uint32_t segment_to_flat(uint16_t selector
, uint16_t offset
)
80 return ((uint32_t)selector
<< 4) + (uint32_t) offset
;
83 static inline void *guest_flat_to_host(struct kvm
*self
, unsigned long offset
)
85 return self
->ram_start
+ offset
;
88 static inline void *guest_real_to_host(struct kvm
*self
, uint16_t selector
, uint16_t offset
)
90 unsigned long flat
= segment_to_flat(selector
, offset
);
92 return guest_flat_to_host(self
, flat
);
95 static bool kvm__supports_extension(struct kvm
*self
, unsigned int extension
)
99 ret
= ioctl(self
->sys_fd
, KVM_CHECK_EXTENSION
, extension
);
106 static int kvm__check_extensions(struct kvm
*self
)
110 for (i
= 0; i
< ARRAY_SIZE(kvm_req_ext
); i
++) {
111 if (!kvm__supports_extension(self
, kvm_req_ext
[i
].code
)) {
112 error("Unsuppored KVM extension detected: %s",
113 kvm_req_ext
[i
].name
);
121 static struct kvm
*kvm__new(void)
123 struct kvm
*self
= calloc(1, sizeof *self
);
126 die("out of memory");
131 void kvm__delete(struct kvm
*self
)
133 free(self
->ram_start
);
137 static bool kvm__cpu_supports_vm(void)
139 struct cpuid_regs regs
;
142 regs
= (struct cpuid_regs
) {
148 case CPUID_VENDOR_INTEL_1
:
151 regs
= (struct cpuid_regs
) { .eax
= 1 };
153 if (regs
.ecx
& (1 << KVM__X86_FEATURE_VMX
))
156 case CPUID_VENDOR_AMD_1
:
157 regs
= (struct cpuid_regs
) { .eax
= 0x80000000 };
159 if (regs
.eax
< 0x80000001)
161 regs
= (struct cpuid_regs
) { .eax
= 0x80000001 };
163 if (regs
.ecx
& (1 << KVM__X86_FEATURE_SVM
))
171 struct kvm
*kvm__init(const char *kvm_dev
)
173 struct kvm_userspace_memory_region mem
;
174 struct kvm_pit_config pit_config
= { .flags
= 0, };
180 if (!kvm__cpu_supports_vm())
181 die("Your CPU does not support hardware virtualization");
185 self
->sys_fd
= open(kvm_dev
, O_RDWR
);
186 if (self
->sys_fd
< 0) {
188 die("'%s' not found. Please make sure you have CONFIG_KVM enabled.", kvm_dev
);
193 ret
= ioctl(self
->sys_fd
, KVM_GET_API_VERSION
, 0);
194 if (ret
!= KVM_API_VERSION
)
195 die_perror("KVM_API_VERSION ioctl");
197 self
->vm_fd
= ioctl(self
->sys_fd
, KVM_CREATE_VM
, 0);
199 die_perror("KVM_CREATE_VM ioctl");
201 if (kvm__check_extensions(self
))
202 die("A required KVM extention is not supported by OS");
204 ret
= ioctl(self
->vm_fd
, KVM_SET_TSS_ADDR
, 0xfffbd000);
206 die_perror("KVM_SET_TSS_ADDR ioctl");
208 ret
= ioctl(self
->vm_fd
, KVM_CREATE_PIT2
, &pit_config
);
210 die_perror("KVM_CREATE_PIT2 ioctl");
212 self
->ram_size
= 64UL * 1024UL * 1024UL;
214 page_size
= sysconf(_SC_PAGESIZE
);
215 if (posix_memalign(&self
->ram_start
, page_size
, self
->ram_size
) != 0)
216 die("out of memory");
218 mem
= (struct kvm_userspace_memory_region
) {
220 .guest_phys_addr
= 0x0UL
,
221 .memory_size
= self
->ram_size
,
222 .userspace_addr
= (unsigned long) self
->ram_start
,
225 ret
= ioctl(self
->vm_fd
, KVM_SET_USER_MEMORY_REGION
, &mem
);
227 die_perror("KVM_SET_USER_MEMORY_REGION ioctl");
229 ret
= ioctl(self
->vm_fd
, KVM_CREATE_IRQCHIP
);
231 die_perror("KVM_CREATE_IRQCHIP ioctl");
233 self
->vcpu_fd
= ioctl(self
->vm_fd
, KVM_CREATE_VCPU
, 0);
234 if (self
->vcpu_fd
< 0)
235 die_perror("KVM_CREATE_VCPU ioctl");
237 mmap_size
= ioctl(self
->sys_fd
, KVM_GET_VCPU_MMAP_SIZE
, 0);
239 die_perror("KVM_GET_VCPU_MMAP_SIZE ioctl");
241 self
->kvm_run
= mmap(NULL
, mmap_size
, PROT_READ
|PROT_WRITE
, MAP_SHARED
, self
->vcpu_fd
, 0);
242 if (self
->kvm_run
== MAP_FAILED
)
243 die("unable to mmap vcpu fd");
248 void kvm__enable_singlestep(struct kvm
*self
)
250 struct kvm_guest_debug debug
= {
251 .control
= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_SINGLESTEP
,
254 if (ioctl(self
->vcpu_fd
, KVM_SET_GUEST_DEBUG
, &debug
) < 0)
255 warning("KVM_SET_GUEST_DEBUG failed");
258 #define BOOT_LOADER_SELECTOR 0x1000
259 #define BOOT_LOADER_IP 0x0000
260 #define BOOT_LOADER_SP 0x8000
261 #define BOOT_CMDLINE_OFFSET 0x20000
263 #define BOOT_PROTOCOL_REQUIRED 0x202
264 #define LOAD_HIGH 0x01
266 static int load_flat_binary(struct kvm
*self
, int fd
)
271 if (lseek(fd
, 0, SEEK_SET
) < 0)
274 p
= guest_real_to_host(self
, BOOT_LOADER_SELECTOR
, BOOT_LOADER_IP
);
276 while ((nr
= read(fd
, p
, 65536)) > 0)
279 self
->boot_selector
= BOOT_LOADER_SELECTOR
;
280 self
->boot_ip
= BOOT_LOADER_IP
;
281 self
->boot_sp
= BOOT_LOADER_SP
;
287 * The protected mode kernel part of a modern bzImage is loaded at 1 MB by
290 #define BZ_KERNEL_START 0x100000UL
292 static const char *BZIMAGE_MAGIC
= "HdrS";
294 #define BZ_DEFAULT_SETUP_SECTS 4
296 static bool load_bzimage(struct kvm
*self
, int fd
, const char *kernel_cmdline
)
298 struct real_intr_desc intr
;
299 struct boot_params boot
;
300 unsigned long setup_sects
;
301 unsigned int intr_addr
;
308 * See Documentation/x86/boot.txt for details no bzImage on-disk and
312 if (lseek(fd
, 0, SEEK_SET
) < 0)
315 read(fd
, &boot
, sizeof(boot
));
317 if (memcmp(&boot
.hdr
.header
, BZIMAGE_MAGIC
, strlen(BZIMAGE_MAGIC
)) != 0)
320 if (boot
.hdr
.version
< BOOT_PROTOCOL_REQUIRED
) {
321 warning("Too old kernel");
325 if (lseek(fd
, 0, SEEK_SET
) < 0)
328 if (!boot
.hdr
.setup_sects
)
329 boot
.hdr
.setup_sects
= BZ_DEFAULT_SETUP_SECTS
;
330 setup_sects
= boot
.hdr
.setup_sects
+ 1;
332 setup_size
= setup_sects
<< 9;
333 p
= guest_real_to_host(self
, BOOT_LOADER_SELECTOR
, BOOT_LOADER_IP
);
335 if (read(fd
, p
, setup_size
) != setup_size
)
338 p
= guest_flat_to_host(self
, BZ_KERNEL_START
);
340 while ((nr
= read(fd
, p
, 65536)) > 0)
343 p
= guest_flat_to_host(self
, BOOT_CMDLINE_OFFSET
);
344 if (kernel_cmdline
) {
345 cmdline_size
= strlen(kernel_cmdline
) + 1;
346 if (cmdline_size
> boot
.hdr
.cmdline_size
)
347 cmdline_size
= boot
.hdr
.cmdline_size
;
349 memset(p
, 0, boot
.hdr
.cmdline_size
);
350 memcpy(p
, kernel_cmdline
, cmdline_size
- 1);
353 #define hdr_offset(member) \
354 offsetof(struct boot_params, hdr) + \
355 offsetof(struct setup_header, member)
356 #define guest_hdr(kvm, member) \
357 guest_real_to_host(kvm, \
358 BOOT_LOADER_SELECTOR, \
361 /* some fields in guest header have to be updated */
362 p
= guest_hdr(self
, cmd_line_ptr
);
363 *(uint32_t *)p
= BOOT_CMDLINE_OFFSET
;
365 p
= guest_hdr(self
, type_of_loader
);
366 *(uint8_t *)p
= 0xff;
368 p
= guest_hdr(self
, heap_end_ptr
);
369 *(uint16_t *)p
= 0xfe00;
371 p
= guest_hdr(self
, loadflags
);
372 *(uint8_t *)p
|= CAN_USE_HEAP
;
374 self
->boot_selector
= BOOT_LOADER_SELECTOR
;
376 * The real-mode setup code starts at offset 0x200 of a bzImage. See
377 * Documentation/x86/boot.txt for details.
379 self
->boot_ip
= BOOT_LOADER_IP
+ 0x200;
380 self
->boot_sp
= BOOT_LOADER_SP
;
383 * Setup a *fake* real mode vector table, it has only
384 * one real hadler which does just iret
386 * This is where the BIOS lives -- BDA area
388 intr_addr
= BIOS_INTR_NEXT(BDA_START
+ 0, 16);
389 p
= guest_flat_to_host(self
, intr_addr
);
390 memcpy(p
, intfake
, intfake_end
- intfake
);
391 intr
= (struct real_intr_desc
) {
392 .segment
= REAL_SEGMENT(intr_addr
),
395 interrupt_table__setup(&self
->interrupt_table
, &intr
);
397 intr_addr
= BIOS_INTR_NEXT(BDA_START
+ (intfake_end
- intfake
), 16);
398 p
= guest_flat_to_host(self
, intr_addr
);
399 memcpy(p
, int10
, int10_end
- int10
);
400 intr
= (struct real_intr_desc
) {
401 .segment
= REAL_SEGMENT(intr_addr
),
404 interrupt_table__set(&self
->interrupt_table
, &intr
, 0x10);
406 p
= guest_flat_to_host(self
, 0);
407 interrupt_table__copy(&self
->interrupt_table
, p
, REAL_INTR_SIZE
);
412 bool kvm__load_kernel(struct kvm
*kvm
, const char *kernel_filename
,
413 const char *kernel_cmdline
)
418 fd
= open(kernel_filename
, O_RDONLY
);
420 die("unable to open kernel");
422 ret
= load_bzimage(kvm
, fd
, kernel_cmdline
);
426 ret
= load_flat_binary(kvm
, fd
);
430 die("%s is not a valid bzImage or flat binary", kernel_filename
);
436 static inline uint64_t ip_flat_to_real(struct kvm
*self
, uint64_t ip
)
438 uint64_t cs
= self
->sregs
.cs
.selector
;
440 return ip
- (cs
<< 4);
443 static inline bool is_in_protected_mode(struct kvm
*self
)
445 return self
->sregs
.cr0
& 0x01;
448 static inline uint64_t ip_to_flat(struct kvm
*self
, uint64_t ip
)
453 * NOTE! We should take code segment base address into account here.
454 * Luckily it's usually zero because Linux uses flat memory model.
456 if (is_in_protected_mode(self
))
459 cs
= self
->sregs
.cs
.selector
;
461 return ip
+ (cs
<< 4);
464 static inline uint32_t selector_to_base(uint16_t selector
)
467 * KVM on Intel requires 'base' to be 'selector * 16' in real mode.
469 return (uint32_t)selector
* 16;
472 static struct kvm_msrs
*kvm_msrs__new(size_t nmsrs
)
474 struct kvm_msrs
*self
= calloc(1, sizeof(*self
) + (sizeof(struct kvm_msr_entry
) * nmsrs
));
477 die("out of memory");
482 #define MSR_IA32_TIME_STAMP_COUNTER 0x10
484 #define MSR_IA32_SYSENTER_CS 0x174
485 #define MSR_IA32_SYSENTER_ESP 0x175
486 #define MSR_IA32_SYSENTER_EIP 0x176
488 #define MSR_IA32_STAR 0xc0000081
489 #define MSR_IA32_LSTAR 0xc0000082
490 #define MSR_IA32_CSTAR 0xc0000083
491 #define MSR_IA32_FMASK 0xc0000084
492 #define MSR_IA32_KERNEL_GS_BASE 0xc0000102
494 #define KVM_MSR_ENTRY(_index, _data) \
495 (struct kvm_msr_entry) { .index = _index, .data = _data }
497 static void kvm__setup_msrs(struct kvm
*self
)
499 unsigned long ndx
= 0;
501 self
->msrs
= kvm_msrs__new(100);
503 self
->msrs
->entries
[ndx
++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_CS
, 0x0);
504 self
->msrs
->entries
[ndx
++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_ESP
, 0x0);
505 self
->msrs
->entries
[ndx
++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_EIP
, 0x0);
507 self
->msrs
->entries
[ndx
++] = KVM_MSR_ENTRY(MSR_IA32_STAR
, 0x0);
508 self
->msrs
->entries
[ndx
++] = KVM_MSR_ENTRY(MSR_IA32_CSTAR
, 0x0);
509 self
->msrs
->entries
[ndx
++] = KVM_MSR_ENTRY(MSR_IA32_KERNEL_GS_BASE
, 0x0);
510 self
->msrs
->entries
[ndx
++] = KVM_MSR_ENTRY(MSR_IA32_FMASK
, 0x0);
511 self
->msrs
->entries
[ndx
++] = KVM_MSR_ENTRY(MSR_IA32_LSTAR
, 0x0);
513 self
->msrs
->entries
[ndx
++] = KVM_MSR_ENTRY(MSR_IA32_TIME_STAMP_COUNTER
, 0x0);
515 self
->msrs
->nmsrs
= ndx
;
517 if (ioctl(self
->vcpu_fd
, KVM_SET_MSRS
, self
->msrs
) < 0)
518 die_perror("KVM_SET_MSRS failed");
521 static void kvm__setup_fpu(struct kvm
*self
)
523 self
->fpu
= (struct kvm_fpu
) {
528 if (ioctl(self
->vcpu_fd
, KVM_SET_FPU
, &self
->fpu
) < 0)
529 die_perror("KVM_SET_FPU failed");
532 static void kvm__setup_regs(struct kvm
*self
)
534 self
->regs
= (struct kvm_regs
) {
535 /* We start the guest in 16-bit real mode */
536 .rflags
= 0x0000000000000002ULL
,
538 .rip
= self
->boot_ip
,
539 .rsp
= self
->boot_sp
,
540 .rbp
= self
->boot_sp
,
543 if (self
->regs
.rip
> USHRT_MAX
)
544 die("ip 0x%" PRIx64
" is too high for real mode", (uint64_t) self
->regs
.rip
);
546 if (ioctl(self
->vcpu_fd
, KVM_SET_REGS
, &self
->regs
) < 0)
547 die_perror("KVM_SET_REGS failed");
550 static void kvm__setup_sregs(struct kvm
*self
)
553 if (ioctl(self
->vcpu_fd
, KVM_GET_SREGS
, &self
->sregs
) < 0)
554 die_perror("KVM_GET_SREGS failed");
556 self
->sregs
.cs
.selector
= self
->boot_selector
;
557 self
->sregs
.cs
.base
= selector_to_base(self
->boot_selector
);
558 self
->sregs
.ss
.selector
= self
->boot_selector
;
559 self
->sregs
.ss
.base
= selector_to_base(self
->boot_selector
);
560 self
->sregs
.ds
.selector
= self
->boot_selector
;
561 self
->sregs
.ds
.base
= selector_to_base(self
->boot_selector
);
562 self
->sregs
.es
.selector
= self
->boot_selector
;
563 self
->sregs
.es
.base
= selector_to_base(self
->boot_selector
);
564 self
->sregs
.fs
.selector
= self
->boot_selector
;
565 self
->sregs
.fs
.base
= selector_to_base(self
->boot_selector
);
566 self
->sregs
.gs
.selector
= self
->boot_selector
;
567 self
->sregs
.gs
.base
= selector_to_base(self
->boot_selector
);
569 if (ioctl(self
->vcpu_fd
, KVM_SET_SREGS
, &self
->sregs
) < 0)
570 die_perror("KVM_SET_SREGS failed");
573 void kvm__reset_vcpu(struct kvm
*self
)
575 kvm__setup_sregs(self
);
577 kvm__setup_regs(self
);
579 kvm__setup_fpu(self
);
581 kvm__setup_msrs(self
);
584 void kvm__run(struct kvm
*self
)
586 if (ioctl(self
->vcpu_fd
, KVM_RUN
, 0) < 0)
587 die_perror("KVM_RUN failed");
590 static void print_dtable(const char *name
, struct kvm_dtable
*dtable
)
592 printf(" %s %016" PRIx64
" %08" PRIx16
"\n",
593 name
, (uint64_t) dtable
->base
, (uint16_t) dtable
->limit
);
596 static void print_segment(const char *name
, struct kvm_segment
*seg
)
598 printf(" %s %04" PRIx16
" %016" PRIx64
" %08" PRIx32
" %02" PRIx8
" %x %x %x %x %x %x %x\n",
599 name
, (uint16_t) seg
->selector
, (uint64_t) seg
->base
, (uint32_t) seg
->limit
,
600 (uint8_t) seg
->type
, seg
->present
, seg
->dpl
, seg
->db
, seg
->s
, seg
->l
, seg
->g
, seg
->avl
);
603 void kvm__show_registers(struct kvm
*self
)
605 unsigned long cr0
, cr2
, cr3
;
606 unsigned long cr4
, cr8
;
607 unsigned long rax
, rbx
, rcx
;
608 unsigned long rdx
, rsi
, rdi
;
609 unsigned long rbp
, r8
, r9
;
610 unsigned long r10
, r11
, r12
;
611 unsigned long r13
, r14
, r15
;
612 unsigned long rip
, rsp
;
613 struct kvm_sregs sregs
;
614 unsigned long rflags
;
615 struct kvm_regs regs
;
618 if (ioctl(self
->vcpu_fd
, KVM_GET_REGS
, ®s
) < 0)
619 die("KVM_GET_REGS failed");
621 rflags
= regs
.rflags
;
623 rip
= regs
.rip
; rsp
= regs
.rsp
;
624 rax
= regs
.rax
; rbx
= regs
.rbx
; rcx
= regs
.rcx
;
625 rdx
= regs
.rdx
; rsi
= regs
.rsi
; rdi
= regs
.rdi
;
626 rbp
= regs
.rbp
; r8
= regs
.r8
; r9
= regs
.r9
;
627 r10
= regs
.r10
; r11
= regs
.r11
; r12
= regs
.r12
;
628 r13
= regs
.r13
; r14
= regs
.r14
; r15
= regs
.r15
;
630 printf("Registers:\n");
631 printf(" rip: %016lx rsp: %016lx flags: %016lx\n", rip
, rsp
, rflags
);
632 printf(" rax: %016lx rbx: %016lx rcx: %016lx\n", rax
, rbx
, rcx
);
633 printf(" rdx: %016lx rsi: %016lx rdi: %016lx\n", rdx
, rsi
, rdi
);
634 printf(" rbp: %016lx r8: %016lx r9: %016lx\n", rbp
, r8
, r9
);
635 printf(" r10: %016lx r11: %016lx r12: %016lx\n", r10
, r11
, r12
);
636 printf(" r13: %016lx r14: %016lx r15: %016lx\n", r13
, r14
, r15
);
638 if (ioctl(self
->vcpu_fd
, KVM_GET_SREGS
, &sregs
) < 0)
639 die("KVM_GET_REGS failed");
641 cr0
= sregs
.cr0
; cr2
= sregs
.cr2
; cr3
= sregs
.cr3
;
642 cr4
= sregs
.cr4
; cr8
= sregs
.cr8
;
644 printf(" cr0: %016lx cr2: %016lx cr3: %016lx\n", cr0
, cr2
, cr3
);
645 printf(" cr4: %016lx cr8: %016lx\n", cr4
, cr8
);
646 printf("Segment registers:\n");
647 printf(" register selector base limit type p dpl db s l g avl\n");
648 print_segment("cs ", &sregs
.cs
);
649 print_segment("ss ", &sregs
.ss
);
650 print_segment("ds ", &sregs
.ds
);
651 print_segment("es ", &sregs
.es
);
652 print_segment("fs ", &sregs
.fs
);
653 print_segment("gs ", &sregs
.gs
);
654 print_segment("tr ", &sregs
.tr
);
655 print_segment("ldt", &sregs
.ldt
);
656 print_dtable("gdt", &sregs
.gdt
);
657 print_dtable("idt", &sregs
.idt
);
658 printf(" [ efer: %016" PRIx64
" apic base: %016" PRIx64
" nmi: %s ]\n",
659 (uint64_t) sregs
.efer
, (uint64_t) sregs
.apic_base
,
660 (self
->nmi_disabled
? "disabled" : "enabled"));
661 printf("Interrupt bitmap:\n");
663 for (i
= 0; i
< (KVM_NR_INTERRUPTS
+ 63) / 64; i
++)
664 printf("%016" PRIx64
" ", (uint64_t) sregs
.interrupt_bitmap
[i
]);
668 void kvm__show_code(struct kvm
*self
)
670 unsigned int code_bytes
= 64;
671 unsigned int code_prologue
= code_bytes
* 43 / 64;
672 unsigned int code_len
= code_bytes
;
677 if (ioctl(self
->vcpu_fd
, KVM_GET_REGS
, &self
->regs
) < 0)
678 die("KVM_GET_REGS failed");
680 if (ioctl(self
->vcpu_fd
, KVM_GET_SREGS
, &self
->sregs
) < 0)
681 die("KVM_GET_SREGS failed");
683 ip
= guest_flat_to_host(self
, ip_to_flat(self
, self
->regs
.rip
) - code_prologue
);
687 for (i
= 0; i
< code_len
; i
++, ip
++) {
688 if (!host_ptr_in_ram(self
, ip
))
693 if (ip
== guest_flat_to_host(self
, ip_to_flat(self
, self
->regs
.rip
)))
694 printf("<%02x> ", c
);
702 kvm__dump_mem(self
, self
->regs
.rsp
, 32);
705 void kvm__show_page_tables(struct kvm
*self
)
712 if (!is_in_protected_mode(self
))
715 if (ioctl(self
->vcpu_fd
, KVM_GET_SREGS
, &self
->sregs
) < 0)
716 die("KVM_GET_SREGS failed");
718 pte4
= guest_flat_to_host(self
, self
->sregs
.cr3
);
719 if (!host_ptr_in_ram(self
, pte4
))
722 pte3
= guest_flat_to_host(self
, (*pte4
& ~0xfff));
723 if (!host_ptr_in_ram(self
, pte3
))
726 pte2
= guest_flat_to_host(self
, (*pte3
& ~0xfff));
727 if (!host_ptr_in_ram(self
, pte2
))
730 pte1
= guest_flat_to_host(self
, (*pte2
& ~0xfff));
731 if (!host_ptr_in_ram(self
, pte1
))
734 printf("Page Tables:\n");
735 if (*pte2
& (1 << 7))
736 printf(" pte4: %016" PRIx64
" pte3: %016" PRIx64
737 " pte2: %016" PRIx64
"\n",
738 *pte4
, *pte3
, *pte2
);
740 printf(" pte4: %016" PRIx64
" pte3: %016" PRIx64
" pte2: %016"
741 PRIx64
" pte1: %016" PRIx64
"\n",
742 *pte4
, *pte3
, *pte2
, *pte1
);
745 void kvm__dump_mem(struct kvm
*self
, unsigned long addr
, unsigned long size
)
750 size
&= ~7; /* mod 8 */
754 p
= guest_flat_to_host(self
, addr
);
756 for (n
= 0; n
< size
; n
+=8) {
757 if (!host_ptr_in_ram(self
, p
+ n
))
760 printf(" 0x%08lx: %02x %02x %02x %02x %02x %02x %02x %02x\n",
761 addr
+ n
, p
[n
+ 0], p
[n
+ 1], p
[n
+ 2], p
[n
+ 3],
762 p
[n
+ 4], p
[n
+ 5], p
[n
+ 6], p
[n
+ 7]);