2 * qemu/kvm integration, x86 specific code
4 * Copyright (C) 2006-2008 Qumranet Technologies
6 * Licensed under the terms of the GNU GPL version 2 or higher.
10 #include "config-host.h"
20 #include <sys/utsname.h>
21 #include <linux/kvm_para.h>
22 #include <sys/ioctl.h>
27 #define MSR_IA32_TSC 0x10
29 static struct kvm_msr_list
*kvm_msr_list
;
30 extern unsigned int kvm_shadow_memory
;
31 static int kvm_has_msr_star
;
32 static int kvm_has_vm_hsave_pa
;
34 static int lm_capable_kernel
;
36 int kvm_set_tss_addr(kvm_context_t kvm
, unsigned long addr
)
38 #ifdef KVM_CAP_SET_TSS_ADDR
41 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_SET_TSS_ADDR
);
43 r
= kvm_vm_ioctl(kvm_state
, KVM_SET_TSS_ADDR
, addr
);
45 fprintf(stderr
, "kvm_set_tss_addr: %m\n");
54 static int kvm_init_tss(kvm_context_t kvm
)
56 #ifdef KVM_CAP_SET_TSS_ADDR
59 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_SET_TSS_ADDR
);
62 * this address is 3 pages before the bios, and the bios should present
65 r
= kvm_set_tss_addr(kvm
, 0xfeffd000);
67 fprintf(stderr
, "kvm_init_tss: unable to set tss addr\n");
76 static int kvm_set_identity_map_addr(kvm_context_t kvm
, uint64_t addr
)
78 #ifdef KVM_CAP_SET_IDENTITY_MAP_ADDR
81 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_SET_IDENTITY_MAP_ADDR
);
83 r
= kvm_vm_ioctl(kvm_state
, KVM_SET_IDENTITY_MAP_ADDR
, &addr
);
85 fprintf(stderr
, "kvm_set_identity_map_addr: %m\n");
94 static int kvm_init_identity_map_page(kvm_context_t kvm
)
96 #ifdef KVM_CAP_SET_IDENTITY_MAP_ADDR
99 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_SET_IDENTITY_MAP_ADDR
);
102 * this address is 4 pages before the bios, and the bios should present
103 * as unavaible memory
105 r
= kvm_set_identity_map_addr(kvm
, 0xfeffc000);
107 fprintf(stderr
, "kvm_init_identity_map_page: "
108 "unable to set identity mapping addr\n");
117 static int kvm_create_pit(kvm_context_t kvm
)
122 kvm
->pit_in_kernel
= 0;
123 if (!kvm
->no_pit_creation
) {
124 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_PIT
);
126 r
= kvm_vm_ioctl(kvm_state
, KVM_CREATE_PIT
);
128 kvm
->pit_in_kernel
= 1;
130 fprintf(stderr
, "Create kernel PIC irqchip failed\n");
139 int kvm_arch_create(kvm_context_t kvm
, unsigned long phys_mem_bytes
,
144 r
= kvm_init_tss(kvm
);
148 r
= kvm_init_identity_map_page(kvm
);
152 r
= kvm_create_pit(kvm
);
156 r
= kvm_init_coalesced_mmio(kvm
);
163 #ifdef KVM_EXIT_TPR_ACCESS
165 static int kvm_handle_tpr_access(kvm_vcpu_context_t vcpu
)
167 struct kvm_run
*run
= vcpu
->run
;
168 kvm_tpr_access_report(cpu_single_env
,
170 run
->tpr_access
.is_write
);
175 int kvm_enable_vapic(kvm_vcpu_context_t vcpu
, uint64_t vapic
)
178 struct kvm_vapic_addr va
= {
182 r
= ioctl(vcpu
->fd
, KVM_SET_VAPIC_ADDR
, &va
);
185 perror("kvm_enable_vapic");
193 int kvm_arch_run(kvm_vcpu_context_t vcpu
)
196 struct kvm_run
*run
= vcpu
->run
;
199 switch (run
->exit_reason
) {
200 #ifdef KVM_EXIT_SET_TPR
201 case KVM_EXIT_SET_TPR
:
204 #ifdef KVM_EXIT_TPR_ACCESS
205 case KVM_EXIT_TPR_ACCESS
:
206 r
= kvm_handle_tpr_access(vcpu
);
217 #define MAX_ALIAS_SLOTS 4
221 } kvm_aliases
[MAX_ALIAS_SLOTS
];
223 static int get_alias_slot(uint64_t start
)
227 for (i
=0; i
<MAX_ALIAS_SLOTS
; i
++)
228 if (kvm_aliases
[i
].start
== start
)
232 static int get_free_alias_slot(void)
236 for (i
=0; i
<MAX_ALIAS_SLOTS
; i
++)
237 if (kvm_aliases
[i
].len
== 0)
242 static void register_alias(int slot
, uint64_t start
, uint64_t len
)
244 kvm_aliases
[slot
].start
= start
;
245 kvm_aliases
[slot
].len
= len
;
248 int kvm_create_memory_alias(kvm_context_t kvm
,
251 uint64_t target_phys
)
253 struct kvm_memory_alias alias
= {
255 .guest_phys_addr
= phys_start
,
257 .target_phys_addr
= target_phys
,
262 slot
= get_alias_slot(phys_start
);
264 slot
= get_free_alias_slot();
269 r
= kvm_vm_ioctl(kvm_state
, KVM_SET_MEMORY_ALIAS
, &alias
);
273 register_alias(slot
, phys_start
, len
);
277 int kvm_destroy_memory_alias(kvm_context_t kvm
, uint64_t phys_start
)
279 return kvm_create_memory_alias(kvm
, phys_start
, 0, 0);
282 #ifdef KVM_CAP_IRQCHIP
284 int kvm_get_lapic(kvm_vcpu_context_t vcpu
, struct kvm_lapic_state
*s
)
287 if (!kvm_irqchip_in_kernel(vcpu
->kvm
))
289 r
= ioctl(vcpu
->fd
, KVM_GET_LAPIC
, s
);
292 perror("kvm_get_lapic");
297 int kvm_set_lapic(kvm_vcpu_context_t vcpu
, struct kvm_lapic_state
*s
)
300 if (!kvm_irqchip_in_kernel(vcpu
->kvm
))
302 r
= ioctl(vcpu
->fd
, KVM_SET_LAPIC
, s
);
305 perror("kvm_set_lapic");
314 int kvm_get_pit(kvm_context_t kvm
, struct kvm_pit_state
*s
)
316 if (!kvm
->pit_in_kernel
)
318 return kvm_vm_ioctl(kvm_state
, KVM_GET_PIT
, s
);
321 int kvm_set_pit(kvm_context_t kvm
, struct kvm_pit_state
*s
)
323 if (!kvm
->pit_in_kernel
)
325 return kvm_vm_ioctl(kvm_state
, KVM_SET_PIT
, s
);
328 #ifdef KVM_CAP_PIT_STATE2
329 int kvm_get_pit2(kvm_context_t kvm
, struct kvm_pit_state2
*ps2
)
331 if (!kvm
->pit_in_kernel
)
333 return kvm_vm_ioctl(kvm_state
, KVM_GET_PIT2
, ps2
);
336 int kvm_set_pit2(kvm_context_t kvm
, struct kvm_pit_state2
*ps2
)
338 if (!kvm
->pit_in_kernel
)
340 return kvm_vm_ioctl(kvm_state
, KVM_SET_PIT2
, ps2
);
346 int kvm_has_pit_state2(kvm_context_t kvm
)
350 #ifdef KVM_CAP_PIT_STATE2
351 r
= kvm_check_extension(kvm_state
, KVM_CAP_PIT_STATE2
);
356 void kvm_show_code(kvm_vcpu_context_t vcpu
)
358 #define SHOW_CODE_LEN 50
360 struct kvm_regs regs
;
361 struct kvm_sregs sregs
;
365 char code_str
[SHOW_CODE_LEN
* 3 + 1];
367 kvm_context_t kvm
= vcpu
->kvm
;
369 r
= ioctl(fd
, KVM_GET_SREGS
, &sregs
);
371 perror("KVM_GET_SREGS");
374 r
= ioctl(fd
, KVM_GET_REGS
, ®s
);
376 perror("KVM_GET_REGS");
379 rip
= sregs
.cs
.base
+ regs
.rip
;
380 back_offset
= regs
.rip
;
381 if (back_offset
> 20)
384 for (n
= -back_offset
; n
< SHOW_CODE_LEN
-back_offset
; ++n
) {
386 strcat(code_str
, " -->");
387 r
= kvm_mmio_read(kvm
->opaque
, rip
+ n
, &code
, 1);
389 strcat(code_str
, " xx");
392 sprintf(code_str
+ strlen(code_str
), " %02x", code
);
394 fprintf(stderr
, "code:%s\n", code_str
);
399 * Returns available msr list. User must free.
401 struct kvm_msr_list
*kvm_get_msr_list(kvm_context_t kvm
)
403 struct kvm_msr_list sizer
, *msrs
;
407 r
= kvm_ioctl(kvm_state
, KVM_GET_MSR_INDEX_LIST
, &sizer
);
408 if (r
< 0 && r
!= -E2BIG
)
410 /* Old kernel modules had a bug and could write beyond the provided
411 memory. Allocate at least a safe amount of 1K. */
412 msrs
= qemu_malloc(MAX(1024, sizeof(*msrs
) +
413 sizer
.nmsrs
* sizeof(*msrs
->indices
)));
415 msrs
->nmsrs
= sizer
.nmsrs
;
416 r
= kvm_ioctl(kvm_state
, KVM_GET_MSR_INDEX_LIST
, msrs
);
425 int kvm_get_msrs(kvm_vcpu_context_t vcpu
, struct kvm_msr_entry
*msrs
, int n
)
427 struct kvm_msrs
*kmsrs
= qemu_malloc(sizeof *kmsrs
+ n
* sizeof *msrs
);
431 memcpy(kmsrs
->entries
, msrs
, n
* sizeof *msrs
);
432 r
= ioctl(vcpu
->fd
, KVM_GET_MSRS
, kmsrs
);
434 memcpy(msrs
, kmsrs
->entries
, n
* sizeof *msrs
);
440 int kvm_set_msrs(kvm_vcpu_context_t vcpu
, struct kvm_msr_entry
*msrs
, int n
)
442 struct kvm_msrs
*kmsrs
= qemu_malloc(sizeof *kmsrs
+ n
* sizeof *msrs
);
446 memcpy(kmsrs
->entries
, msrs
, n
* sizeof *msrs
);
447 r
= ioctl(vcpu
->fd
, KVM_SET_MSRS
, kmsrs
);
454 int kvm_get_mce_cap_supported(kvm_context_t kvm
, uint64_t *mce_cap
,
460 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_MCE
);
463 return kvm_ioctl(kvm_state
, KVM_X86_GET_MCE_CAP_SUPPORTED
, mce_cap
);
469 int kvm_setup_mce(kvm_vcpu_context_t vcpu
, uint64_t *mcg_cap
)
472 return ioctl(vcpu
->fd
, KVM_X86_SETUP_MCE
, mcg_cap
);
478 int kvm_set_mce(kvm_vcpu_context_t vcpu
, struct kvm_x86_mce
*m
)
481 return ioctl(vcpu
->fd
, KVM_X86_SET_MCE
, m
);
487 static void print_seg(FILE *file
, const char *name
, struct kvm_segment
*seg
)
490 "%s %04x (%08llx/%08x p %d dpl %d db %d s %d type %x l %d"
492 name
, seg
->selector
, seg
->base
, seg
->limit
, seg
->present
,
493 seg
->dpl
, seg
->db
, seg
->s
, seg
->type
, seg
->l
, seg
->g
,
497 static void print_dt(FILE *file
, const char *name
, struct kvm_dtable
*dt
)
499 fprintf(stderr
, "%s %llx/%x\n", name
, dt
->base
, dt
->limit
);
502 void kvm_show_regs(kvm_vcpu_context_t vcpu
)
505 struct kvm_regs regs
;
506 struct kvm_sregs sregs
;
509 r
= ioctl(fd
, KVM_GET_REGS
, ®s
);
511 perror("KVM_GET_REGS");
515 "rax %016llx rbx %016llx rcx %016llx rdx %016llx\n"
516 "rsi %016llx rdi %016llx rsp %016llx rbp %016llx\n"
517 "r8 %016llx r9 %016llx r10 %016llx r11 %016llx\n"
518 "r12 %016llx r13 %016llx r14 %016llx r15 %016llx\n"
519 "rip %016llx rflags %08llx\n",
520 regs
.rax
, regs
.rbx
, regs
.rcx
, regs
.rdx
,
521 regs
.rsi
, regs
.rdi
, regs
.rsp
, regs
.rbp
,
522 regs
.r8
, regs
.r9
, regs
.r10
, regs
.r11
,
523 regs
.r12
, regs
.r13
, regs
.r14
, regs
.r15
,
524 regs
.rip
, regs
.rflags
);
525 r
= ioctl(fd
, KVM_GET_SREGS
, &sregs
);
527 perror("KVM_GET_SREGS");
530 print_seg(stderr
, "cs", &sregs
.cs
);
531 print_seg(stderr
, "ds", &sregs
.ds
);
532 print_seg(stderr
, "es", &sregs
.es
);
533 print_seg(stderr
, "ss", &sregs
.ss
);
534 print_seg(stderr
, "fs", &sregs
.fs
);
535 print_seg(stderr
, "gs", &sregs
.gs
);
536 print_seg(stderr
, "tr", &sregs
.tr
);
537 print_seg(stderr
, "ldt", &sregs
.ldt
);
538 print_dt(stderr
, "gdt", &sregs
.gdt
);
539 print_dt(stderr
, "idt", &sregs
.idt
);
540 fprintf(stderr
, "cr0 %llx cr2 %llx cr3 %llx cr4 %llx cr8 %llx"
542 sregs
.cr0
, sregs
.cr2
, sregs
.cr3
, sregs
.cr4
, sregs
.cr8
,
546 uint64_t kvm_get_apic_base(kvm_vcpu_context_t vcpu
)
548 return vcpu
->run
->apic_base
;
551 void kvm_set_cr8(kvm_vcpu_context_t vcpu
, uint64_t cr8
)
553 vcpu
->run
->cr8
= cr8
;
556 __u64
kvm_get_cr8(kvm_vcpu_context_t vcpu
)
558 return vcpu
->run
->cr8
;
561 int kvm_setup_cpuid(kvm_vcpu_context_t vcpu
, int nent
,
562 struct kvm_cpuid_entry
*entries
)
564 struct kvm_cpuid
*cpuid
;
567 cpuid
= qemu_malloc(sizeof(*cpuid
) + nent
* sizeof(*entries
));
570 memcpy(cpuid
->entries
, entries
, nent
* sizeof(*entries
));
571 r
= ioctl(vcpu
->fd
, KVM_SET_CPUID
, cpuid
);
577 int kvm_setup_cpuid2(kvm_vcpu_context_t vcpu
, int nent
,
578 struct kvm_cpuid_entry2
*entries
)
580 struct kvm_cpuid2
*cpuid
;
583 cpuid
= qemu_malloc(sizeof(*cpuid
) + nent
* sizeof(*entries
));
586 memcpy(cpuid
->entries
, entries
, nent
* sizeof(*entries
));
587 r
= ioctl(vcpu
->fd
, KVM_SET_CPUID2
, cpuid
);
589 fprintf(stderr
, "kvm_setup_cpuid2: %m\n");
596 int kvm_set_shadow_pages(kvm_context_t kvm
, unsigned int nrshadow_pages
)
598 #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
601 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
,
602 KVM_CAP_MMU_SHADOW_CACHE_CONTROL
);
604 r
= kvm_vm_ioctl(kvm_state
, KVM_SET_NR_MMU_PAGES
, nrshadow_pages
);
606 fprintf(stderr
, "kvm_set_shadow_pages: %m\n");
615 int kvm_get_shadow_pages(kvm_context_t kvm
, unsigned int *nrshadow_pages
)
617 #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
620 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
,
621 KVM_CAP_MMU_SHADOW_CACHE_CONTROL
);
623 *nrshadow_pages
= kvm_vm_ioctl(kvm_state
, KVM_GET_NR_MMU_PAGES
);
632 static int tpr_access_reporting(kvm_vcpu_context_t vcpu
, int enabled
)
635 struct kvm_tpr_access_ctl tac
= {
639 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_VAPIC
);
642 r
= ioctl(vcpu
->fd
, KVM_TPR_ACCESS_REPORTING
, &tac
);
645 perror("KVM_TPR_ACCESS_REPORTING");
651 int kvm_enable_tpr_access_reporting(kvm_vcpu_context_t vcpu
)
653 return tpr_access_reporting(vcpu
, 1);
656 int kvm_disable_tpr_access_reporting(kvm_vcpu_context_t vcpu
)
658 return tpr_access_reporting(vcpu
, 0);
663 #ifdef KVM_CAP_EXT_CPUID
665 static struct kvm_cpuid2
*try_get_cpuid(kvm_context_t kvm
, int max
)
667 struct kvm_cpuid2
*cpuid
;
670 size
= sizeof(*cpuid
) + max
* sizeof(*cpuid
->entries
);
671 cpuid
= qemu_malloc(size
);
673 r
= kvm_ioctl(kvm_state
, KVM_GET_SUPPORTED_CPUID
, cpuid
);
674 if (r
== 0 && cpuid
->nent
>= max
)
681 fprintf(stderr
, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
698 uint32_t kvm_get_supported_cpuid(kvm_context_t kvm
, uint32_t function
, int reg
)
700 struct kvm_cpuid2
*cpuid
;
703 uint32_t cpuid_1_edx
;
705 if (!kvm_check_extension(kvm_state
, KVM_CAP_EXT_CPUID
)) {
710 while ((cpuid
= try_get_cpuid(kvm
, max
)) == NULL
) {
714 for (i
= 0; i
< cpuid
->nent
; ++i
) {
715 if (cpuid
->entries
[i
].function
== function
) {
718 ret
= cpuid
->entries
[i
].eax
;
721 ret
= cpuid
->entries
[i
].ebx
;
724 ret
= cpuid
->entries
[i
].ecx
;
727 ret
= cpuid
->entries
[i
].edx
;
729 /* kvm misreports the following features
731 ret
|= 1 << 12; /* MTRR */
732 ret
|= 1 << 16; /* PAT */
733 ret
|= 1 << 7; /* MCE */
734 ret
|= 1 << 14; /* MCA */
737 /* On Intel, kvm returns cpuid according to
738 * the Intel spec, so add missing bits
739 * according to the AMD spec:
741 if (function
== 0x80000001) {
742 cpuid_1_edx
= kvm_get_supported_cpuid(kvm
, 1, R_EDX
);
743 ret
|= cpuid_1_edx
& 0xdfeff7ff;
757 uint32_t kvm_get_supported_cpuid(kvm_context_t kvm
, uint32_t function
, int reg
)
763 int kvm_qemu_create_memory_alias(uint64_t phys_start
,
765 uint64_t target_phys
)
767 return kvm_create_memory_alias(kvm_context
, phys_start
, len
, target_phys
);
770 int kvm_qemu_destroy_memory_alias(uint64_t phys_start
)
772 return kvm_destroy_memory_alias(kvm_context
, phys_start
);
775 int kvm_arch_qemu_create_context(void)
778 struct utsname utsname
;
781 lm_capable_kernel
= strcmp(utsname
.machine
, "x86_64") == 0;
783 if (kvm_shadow_memory
)
784 kvm_set_shadow_pages(kvm_context
, kvm_shadow_memory
);
786 kvm_msr_list
= kvm_get_msr_list(kvm_context
);
789 for (i
= 0; i
< kvm_msr_list
->nmsrs
; ++i
) {
790 if (kvm_msr_list
->indices
[i
] == MSR_STAR
)
791 kvm_has_msr_star
= 1;
792 if (kvm_msr_list
->indices
[i
] == MSR_VM_HSAVE_PA
)
793 kvm_has_vm_hsave_pa
= 1;
799 static void set_msr_entry(struct kvm_msr_entry
*entry
, uint32_t index
,
802 entry
->index
= index
;
806 /* returns 0 on success, non-0 on failure */
807 static int get_msr_entry(struct kvm_msr_entry
*entry
, CPUState
*env
)
809 switch (entry
->index
) {
810 case MSR_IA32_SYSENTER_CS
:
811 env
->sysenter_cs
= entry
->data
;
813 case MSR_IA32_SYSENTER_ESP
:
814 env
->sysenter_esp
= entry
->data
;
816 case MSR_IA32_SYSENTER_EIP
:
817 env
->sysenter_eip
= entry
->data
;
820 env
->star
= entry
->data
;
824 env
->cstar
= entry
->data
;
826 case MSR_KERNELGSBASE
:
827 env
->kernelgsbase
= entry
->data
;
830 env
->fmask
= entry
->data
;
833 env
->lstar
= entry
->data
;
837 env
->tsc
= entry
->data
;
839 case MSR_VM_HSAVE_PA
:
840 env
->vm_hsave
= entry
->data
;
843 printf("Warning unknown msr index 0x%x\n", entry
->index
);
855 static void set_v8086_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
857 lhs
->selector
= rhs
->selector
;
858 lhs
->base
= rhs
->base
;
859 lhs
->limit
= rhs
->limit
;
871 static void set_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
873 unsigned flags
= rhs
->flags
;
874 lhs
->selector
= rhs
->selector
;
875 lhs
->base
= rhs
->base
;
876 lhs
->limit
= rhs
->limit
;
877 lhs
->type
= (flags
>> DESC_TYPE_SHIFT
) & 15;
878 lhs
->present
= (flags
& DESC_P_MASK
) != 0;
879 lhs
->dpl
= rhs
->selector
& 3;
880 lhs
->db
= (flags
>> DESC_B_SHIFT
) & 1;
881 lhs
->s
= (flags
& DESC_S_MASK
) != 0;
882 lhs
->l
= (flags
>> DESC_L_SHIFT
) & 1;
883 lhs
->g
= (flags
& DESC_G_MASK
) != 0;
884 lhs
->avl
= (flags
& DESC_AVL_MASK
) != 0;
888 static void get_seg(SegmentCache
*lhs
, const struct kvm_segment
*rhs
)
890 lhs
->selector
= rhs
->selector
;
891 lhs
->base
= rhs
->base
;
892 lhs
->limit
= rhs
->limit
;
894 (rhs
->type
<< DESC_TYPE_SHIFT
)
895 | (rhs
->present
* DESC_P_MASK
)
896 | (rhs
->dpl
<< DESC_DPL_SHIFT
)
897 | (rhs
->db
<< DESC_B_SHIFT
)
898 | (rhs
->s
* DESC_S_MASK
)
899 | (rhs
->l
<< DESC_L_SHIFT
)
900 | (rhs
->g
* DESC_G_MASK
)
901 | (rhs
->avl
* DESC_AVL_MASK
);
904 void kvm_arch_load_regs(CPUState
*env
)
906 struct kvm_regs regs
;
908 struct kvm_sregs sregs
;
909 struct kvm_msr_entry msrs
[MSR_COUNT
];
912 regs
.rax
= env
->regs
[R_EAX
];
913 regs
.rbx
= env
->regs
[R_EBX
];
914 regs
.rcx
= env
->regs
[R_ECX
];
915 regs
.rdx
= env
->regs
[R_EDX
];
916 regs
.rsi
= env
->regs
[R_ESI
];
917 regs
.rdi
= env
->regs
[R_EDI
];
918 regs
.rsp
= env
->regs
[R_ESP
];
919 regs
.rbp
= env
->regs
[R_EBP
];
921 regs
.r8
= env
->regs
[8];
922 regs
.r9
= env
->regs
[9];
923 regs
.r10
= env
->regs
[10];
924 regs
.r11
= env
->regs
[11];
925 regs
.r12
= env
->regs
[12];
926 regs
.r13
= env
->regs
[13];
927 regs
.r14
= env
->regs
[14];
928 regs
.r15
= env
->regs
[15];
931 regs
.rflags
= env
->eflags
;
934 kvm_set_regs(env
->kvm_cpu_state
.vcpu_ctx
, ®s
);
936 memset(&fpu
, 0, sizeof fpu
);
937 fpu
.fsw
= env
->fpus
& ~(7 << 11);
938 fpu
.fsw
|= (env
->fpstt
& 7) << 11;
940 for (i
= 0; i
< 8; ++i
)
941 fpu
.ftwx
|= (!env
->fptags
[i
]) << i
;
942 memcpy(fpu
.fpr
, env
->fpregs
, sizeof env
->fpregs
);
943 memcpy(fpu
.xmm
, env
->xmm_regs
, sizeof env
->xmm_regs
);
944 fpu
.mxcsr
= env
->mxcsr
;
945 kvm_set_fpu(env
->kvm_cpu_state
.vcpu_ctx
, &fpu
);
947 memcpy(sregs
.interrupt_bitmap
, env
->interrupt_bitmap
, sizeof(sregs
.interrupt_bitmap
));
949 if ((env
->eflags
& VM_MASK
)) {
950 set_v8086_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
951 set_v8086_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
952 set_v8086_seg(&sregs
.es
, &env
->segs
[R_ES
]);
953 set_v8086_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
954 set_v8086_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
955 set_v8086_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
957 set_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
958 set_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
959 set_seg(&sregs
.es
, &env
->segs
[R_ES
]);
960 set_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
961 set_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
962 set_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
964 if (env
->cr
[0] & CR0_PE_MASK
) {
965 /* force ss cpl to cs cpl */
966 sregs
.ss
.selector
= (sregs
.ss
.selector
& ~3) |
967 (sregs
.cs
.selector
& 3);
968 sregs
.ss
.dpl
= sregs
.ss
.selector
& 3;
972 set_seg(&sregs
.tr
, &env
->tr
);
973 set_seg(&sregs
.ldt
, &env
->ldt
);
975 sregs
.idt
.limit
= env
->idt
.limit
;
976 sregs
.idt
.base
= env
->idt
.base
;
977 sregs
.gdt
.limit
= env
->gdt
.limit
;
978 sregs
.gdt
.base
= env
->gdt
.base
;
980 sregs
.cr0
= env
->cr
[0];
981 sregs
.cr2
= env
->cr
[2];
982 sregs
.cr3
= env
->cr
[3];
983 sregs
.cr4
= env
->cr
[4];
985 sregs
.cr8
= cpu_get_apic_tpr(env
);
986 sregs
.apic_base
= cpu_get_apic_base(env
);
988 sregs
.efer
= env
->efer
;
990 kvm_set_sregs(env
->kvm_cpu_state
.vcpu_ctx
, &sregs
);
994 set_msr_entry(&msrs
[n
++], MSR_IA32_SYSENTER_CS
, env
->sysenter_cs
);
995 set_msr_entry(&msrs
[n
++], MSR_IA32_SYSENTER_ESP
, env
->sysenter_esp
);
996 set_msr_entry(&msrs
[n
++], MSR_IA32_SYSENTER_EIP
, env
->sysenter_eip
);
997 if (kvm_has_msr_star
)
998 set_msr_entry(&msrs
[n
++], MSR_STAR
, env
->star
);
999 if (kvm_has_vm_hsave_pa
)
1000 set_msr_entry(&msrs
[n
++], MSR_VM_HSAVE_PA
, env
->vm_hsave
);
1001 #ifdef TARGET_X86_64
1002 if (lm_capable_kernel
) {
1003 set_msr_entry(&msrs
[n
++], MSR_CSTAR
, env
->cstar
);
1004 set_msr_entry(&msrs
[n
++], MSR_KERNELGSBASE
, env
->kernelgsbase
);
1005 set_msr_entry(&msrs
[n
++], MSR_FMASK
, env
->fmask
);
1006 set_msr_entry(&msrs
[n
++], MSR_LSTAR
, env
->lstar
);
1010 rc
= kvm_set_msrs(env
->kvm_cpu_state
.vcpu_ctx
, msrs
, n
);
1012 perror("kvm_set_msrs FAILED");
1015 void kvm_load_tsc(CPUState
*env
)
1018 struct kvm_msr_entry msr
;
1020 set_msr_entry(&msr
, MSR_IA32_TSC
, env
->tsc
);
1022 rc
= kvm_set_msrs(env
->kvm_cpu_state
.vcpu_ctx
, &msr
, 1);
1024 perror("kvm_set_tsc FAILED.\n");
1027 void kvm_arch_save_mpstate(CPUState
*env
)
1029 #ifdef KVM_CAP_MP_STATE
1031 struct kvm_mp_state mp_state
;
1033 r
= kvm_get_mpstate(env
->kvm_cpu_state
.vcpu_ctx
, &mp_state
);
1037 env
->mp_state
= mp_state
.mp_state
;
1043 void kvm_arch_load_mpstate(CPUState
*env
)
1045 #ifdef KVM_CAP_MP_STATE
1046 struct kvm_mp_state mp_state
= { .mp_state
= env
->mp_state
};
1049 * -1 indicates that the host did not support GET_MP_STATE ioctl,
1050 * so don't touch it.
1052 if (env
->mp_state
!= -1)
1053 kvm_set_mpstate(env
->kvm_cpu_state
.vcpu_ctx
, &mp_state
);
1057 void kvm_arch_save_regs(CPUState
*env
)
1059 struct kvm_regs regs
;
1061 struct kvm_sregs sregs
;
1062 struct kvm_msr_entry msrs
[MSR_COUNT
];
1066 kvm_get_regs(env
->kvm_cpu_state
.vcpu_ctx
, ®s
);
1068 env
->regs
[R_EAX
] = regs
.rax
;
1069 env
->regs
[R_EBX
] = regs
.rbx
;
1070 env
->regs
[R_ECX
] = regs
.rcx
;
1071 env
->regs
[R_EDX
] = regs
.rdx
;
1072 env
->regs
[R_ESI
] = regs
.rsi
;
1073 env
->regs
[R_EDI
] = regs
.rdi
;
1074 env
->regs
[R_ESP
] = regs
.rsp
;
1075 env
->regs
[R_EBP
] = regs
.rbp
;
1076 #ifdef TARGET_X86_64
1077 env
->regs
[8] = regs
.r8
;
1078 env
->regs
[9] = regs
.r9
;
1079 env
->regs
[10] = regs
.r10
;
1080 env
->regs
[11] = regs
.r11
;
1081 env
->regs
[12] = regs
.r12
;
1082 env
->regs
[13] = regs
.r13
;
1083 env
->regs
[14] = regs
.r14
;
1084 env
->regs
[15] = regs
.r15
;
1087 env
->eflags
= regs
.rflags
;
1088 env
->eip
= regs
.rip
;
1090 kvm_get_fpu(env
->kvm_cpu_state
.vcpu_ctx
, &fpu
);
1091 env
->fpstt
= (fpu
.fsw
>> 11) & 7;
1092 env
->fpus
= fpu
.fsw
;
1093 env
->fpuc
= fpu
.fcw
;
1094 for (i
= 0; i
< 8; ++i
)
1095 env
->fptags
[i
] = !((fpu
.ftwx
>> i
) & 1);
1096 memcpy(env
->fpregs
, fpu
.fpr
, sizeof env
->fpregs
);
1097 memcpy(env
->xmm_regs
, fpu
.xmm
, sizeof env
->xmm_regs
);
1098 env
->mxcsr
= fpu
.mxcsr
;
1100 kvm_get_sregs(env
->kvm_cpu_state
.vcpu_ctx
, &sregs
);
1102 memcpy(env
->interrupt_bitmap
, sregs
.interrupt_bitmap
, sizeof(env
->interrupt_bitmap
));
1104 get_seg(&env
->segs
[R_CS
], &sregs
.cs
);
1105 get_seg(&env
->segs
[R_DS
], &sregs
.ds
);
1106 get_seg(&env
->segs
[R_ES
], &sregs
.es
);
1107 get_seg(&env
->segs
[R_FS
], &sregs
.fs
);
1108 get_seg(&env
->segs
[R_GS
], &sregs
.gs
);
1109 get_seg(&env
->segs
[R_SS
], &sregs
.ss
);
1111 get_seg(&env
->tr
, &sregs
.tr
);
1112 get_seg(&env
->ldt
, &sregs
.ldt
);
1114 env
->idt
.limit
= sregs
.idt
.limit
;
1115 env
->idt
.base
= sregs
.idt
.base
;
1116 env
->gdt
.limit
= sregs
.gdt
.limit
;
1117 env
->gdt
.base
= sregs
.gdt
.base
;
1119 env
->cr
[0] = sregs
.cr0
;
1120 env
->cr
[2] = sregs
.cr2
;
1121 env
->cr
[3] = sregs
.cr3
;
1122 env
->cr
[4] = sregs
.cr4
;
1124 cpu_set_apic_base(env
, sregs
.apic_base
);
1126 env
->efer
= sregs
.efer
;
1127 //cpu_set_apic_tpr(env, sregs.cr8);
1129 #define HFLAG_COPY_MASK ~( \
1130 HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
1131 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
1132 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
1133 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
1137 hflags
= (env
->segs
[R_CS
].flags
>> DESC_DPL_SHIFT
) & HF_CPL_MASK
;
1138 hflags
|= (env
->cr
[0] & CR0_PE_MASK
) << (HF_PE_SHIFT
- CR0_PE_SHIFT
);
1139 hflags
|= (env
->cr
[0] << (HF_MP_SHIFT
- CR0_MP_SHIFT
)) &
1140 (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
);
1141 hflags
|= (env
->eflags
& (HF_TF_MASK
| HF_VM_MASK
| HF_IOPL_MASK
));
1142 hflags
|= (env
->cr
[4] & CR4_OSFXSR_MASK
) <<
1143 (HF_OSFXSR_SHIFT
- CR4_OSFXSR_SHIFT
);
1145 if (env
->efer
& MSR_EFER_LMA
) {
1146 hflags
|= HF_LMA_MASK
;
1149 if ((hflags
& HF_LMA_MASK
) && (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
1150 hflags
|= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
1152 hflags
|= (env
->segs
[R_CS
].flags
& DESC_B_MASK
) >>
1153 (DESC_B_SHIFT
- HF_CS32_SHIFT
);
1154 hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
) >>
1155 (DESC_B_SHIFT
- HF_SS32_SHIFT
);
1156 if (!(env
->cr
[0] & CR0_PE_MASK
) ||
1157 (env
->eflags
& VM_MASK
) ||
1158 !(hflags
& HF_CS32_MASK
)) {
1159 hflags
|= HF_ADDSEG_MASK
;
1161 hflags
|= ((env
->segs
[R_DS
].base
|
1162 env
->segs
[R_ES
].base
|
1163 env
->segs
[R_SS
].base
) != 0) <<
1167 env
->hflags
= (env
->hflags
& HFLAG_COPY_MASK
) | hflags
;
1171 msrs
[n
++].index
= MSR_IA32_SYSENTER_CS
;
1172 msrs
[n
++].index
= MSR_IA32_SYSENTER_ESP
;
1173 msrs
[n
++].index
= MSR_IA32_SYSENTER_EIP
;
1174 if (kvm_has_msr_star
)
1175 msrs
[n
++].index
= MSR_STAR
;
1176 msrs
[n
++].index
= MSR_IA32_TSC
;
1177 if (kvm_has_vm_hsave_pa
)
1178 msrs
[n
++].index
= MSR_VM_HSAVE_PA
;
1179 #ifdef TARGET_X86_64
1180 if (lm_capable_kernel
) {
1181 msrs
[n
++].index
= MSR_CSTAR
;
1182 msrs
[n
++].index
= MSR_KERNELGSBASE
;
1183 msrs
[n
++].index
= MSR_FMASK
;
1184 msrs
[n
++].index
= MSR_LSTAR
;
1187 rc
= kvm_get_msrs(env
->kvm_cpu_state
.vcpu_ctx
, msrs
, n
);
1189 perror("kvm_get_msrs FAILED");
1192 n
= rc
; /* actual number of MSRs */
1193 for (i
=0 ; i
<n
; i
++) {
1194 if (get_msr_entry(&msrs
[i
], env
))
1200 static void do_cpuid_ent(struct kvm_cpuid_entry2
*e
, uint32_t function
,
1201 uint32_t count
, CPUState
*env
)
1203 env
->regs
[R_EAX
] = function
;
1204 env
->regs
[R_ECX
] = count
;
1205 qemu_kvm_cpuid_on_env(env
);
1206 e
->function
= function
;
1209 e
->eax
= env
->regs
[R_EAX
];
1210 e
->ebx
= env
->regs
[R_EBX
];
1211 e
->ecx
= env
->regs
[R_ECX
];
1212 e
->edx
= env
->regs
[R_EDX
];
1215 struct kvm_para_features
{
1218 } para_features
[] = {
1219 #ifdef KVM_CAP_CLOCKSOURCE
1220 { KVM_CAP_CLOCKSOURCE
, KVM_FEATURE_CLOCKSOURCE
},
1222 #ifdef KVM_CAP_NOP_IO_DELAY
1223 { KVM_CAP_NOP_IO_DELAY
, KVM_FEATURE_NOP_IO_DELAY
},
1225 #ifdef KVM_CAP_PV_MMU
1226 { KVM_CAP_PV_MMU
, KVM_FEATURE_MMU_OP
},
1228 #ifdef KVM_CAP_CR3_CACHE
1229 { KVM_CAP_CR3_CACHE
, KVM_FEATURE_CR3_CACHE
},
1234 static int get_para_features(kvm_context_t kvm_context
)
1236 int i
, features
= 0;
1238 for (i
= 0; i
< ARRAY_SIZE(para_features
)-1; i
++) {
1239 if (kvm_check_extension(kvm_state
, para_features
[i
].cap
))
1240 features
|= (1 << para_features
[i
].feature
);
1246 static void kvm_trim_features(uint32_t *features
, uint32_t supported
)
1251 for (i
= 0; i
< 32; ++i
) {
1253 if ((*features
& mask
) && !(supported
& mask
)) {
1259 int kvm_arch_init_vcpu(CPUState
*cenv
)
1261 struct kvm_cpuid_entry2 cpuid_ent
[100];
1262 #ifdef KVM_CPUID_SIGNATURE
1263 struct kvm_cpuid_entry2
*pv_ent
;
1264 uint32_t signature
[3];
1268 uint32_t i
, j
, limit
;
1270 qemu_kvm_load_lapic(cenv
);
1273 #ifdef KVM_CPUID_SIGNATURE
1274 /* Paravirtualization CPUIDs */
1275 memcpy(signature
, "KVMKVMKVM\0\0\0", 12);
1276 pv_ent
= &cpuid_ent
[cpuid_nent
++];
1277 memset(pv_ent
, 0, sizeof(*pv_ent
));
1278 pv_ent
->function
= KVM_CPUID_SIGNATURE
;
1280 pv_ent
->ebx
= signature
[0];
1281 pv_ent
->ecx
= signature
[1];
1282 pv_ent
->edx
= signature
[2];
1284 pv_ent
= &cpuid_ent
[cpuid_nent
++];
1285 memset(pv_ent
, 0, sizeof(*pv_ent
));
1286 pv_ent
->function
= KVM_CPUID_FEATURES
;
1287 pv_ent
->eax
= get_para_features(kvm_context
);
1290 kvm_trim_features(&cenv
->cpuid_features
,
1291 kvm_arch_get_supported_cpuid(cenv
, 1, R_EDX
));
1293 /* prevent the hypervisor bit from being cleared by the kernel */
1294 i
= cenv
->cpuid_ext_features
& CPUID_EXT_HYPERVISOR
;
1295 kvm_trim_features(&cenv
->cpuid_ext_features
,
1296 kvm_arch_get_supported_cpuid(cenv
, 1, R_ECX
));
1297 cenv
->cpuid_ext_features
|= i
;
1299 kvm_trim_features(&cenv
->cpuid_ext2_features
,
1300 kvm_arch_get_supported_cpuid(cenv
, 0x80000001, R_EDX
));
1301 kvm_trim_features(&cenv
->cpuid_ext3_features
,
1302 kvm_arch_get_supported_cpuid(cenv
, 0x80000001, R_ECX
));
1306 copy
.regs
[R_EAX
] = 0;
1307 qemu_kvm_cpuid_on_env(©
);
1308 limit
= copy
.regs
[R_EAX
];
1310 for (i
= 0; i
<= limit
; ++i
) {
1311 if (i
== 4 || i
== 0xb || i
== 0xd) {
1312 for (j
= 0; ; ++j
) {
1313 do_cpuid_ent(&cpuid_ent
[cpuid_nent
], i
, j
, ©
);
1315 cpuid_ent
[cpuid_nent
].flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
1316 cpuid_ent
[cpuid_nent
].index
= j
;
1320 if (i
== 4 && copy
.regs
[R_EAX
] == 0)
1322 if (i
== 0xb && !(copy
.regs
[R_ECX
] & 0xff00))
1324 if (i
== 0xd && copy
.regs
[R_EAX
] == 0)
1328 do_cpuid_ent(&cpuid_ent
[cpuid_nent
++], i
, 0, ©
);
1331 copy
.regs
[R_EAX
] = 0x80000000;
1332 qemu_kvm_cpuid_on_env(©
);
1333 limit
= copy
.regs
[R_EAX
];
1335 for (i
= 0x80000000; i
<= limit
; ++i
)
1336 do_cpuid_ent(&cpuid_ent
[cpuid_nent
++], i
, 0, ©
);
1338 kvm_setup_cpuid2(cenv
->kvm_cpu_state
.vcpu_ctx
, cpuid_nent
, cpuid_ent
);
1341 if (((cenv
->cpuid_version
>> 8)&0xF) >= 6
1342 && (cenv
->cpuid_features
&(CPUID_MCE
|CPUID_MCA
)) == (CPUID_MCE
|CPUID_MCA
)
1343 && kvm_check_extension(kvm_state
, KVM_CAP_MCE
) > 0) {
1347 if (kvm_get_mce_cap_supported(kvm_context
, &mcg_cap
, &banks
))
1348 perror("kvm_get_mce_cap_supported FAILED");
1350 if (banks
> MCE_BANKS_DEF
)
1351 banks
= MCE_BANKS_DEF
;
1352 mcg_cap
&= MCE_CAP_DEF
;
1354 if (kvm_setup_mce(cenv
->kvm_cpu_state
.vcpu_ctx
, &mcg_cap
))
1355 perror("kvm_setup_mce FAILED");
1357 cenv
->mcg_cap
= mcg_cap
;
1365 int kvm_arch_halt(void *opaque
, kvm_vcpu_context_t vcpu
)
1367 CPUState
*env
= cpu_single_env
;
1369 if (!((env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
1370 (env
->eflags
& IF_MASK
)) &&
1371 !(env
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
1377 void kvm_arch_pre_kvm_run(void *opaque
, CPUState
*env
)
1379 if (!kvm_irqchip_in_kernel(kvm_context
))
1380 kvm_set_cr8(env
->kvm_cpu_state
.vcpu_ctx
, cpu_get_apic_tpr(env
));
1383 void kvm_arch_post_kvm_run(void *opaque
, CPUState
*env
)
1385 cpu_single_env
= env
;
1387 env
->eflags
= kvm_get_interrupt_flag(env
->kvm_cpu_state
.vcpu_ctx
)
1388 ? env
->eflags
| IF_MASK
: env
->eflags
& ~IF_MASK
;
1390 cpu_set_apic_tpr(env
, kvm_get_cr8(env
->kvm_cpu_state
.vcpu_ctx
));
1391 cpu_set_apic_base(env
, kvm_get_apic_base(env
->kvm_cpu_state
.vcpu_ctx
));
1394 int kvm_arch_has_work(CPUState
*env
)
1396 if (((env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
1397 (env
->eflags
& IF_MASK
)) ||
1398 (env
->interrupt_request
& CPU_INTERRUPT_NMI
))
1403 int kvm_arch_try_push_interrupts(void *opaque
)
1405 CPUState
*env
= cpu_single_env
;
1408 if (kvm_is_ready_for_interrupt_injection(env
->kvm_cpu_state
.vcpu_ctx
) &&
1409 (env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
1410 (env
->eflags
& IF_MASK
)) {
1411 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
1412 irq
= cpu_get_pic_interrupt(env
);
1414 r
= kvm_inject_irq(env
->kvm_cpu_state
.vcpu_ctx
, irq
);
1416 printf("cpu %d fail inject %x\n", env
->cpu_index
, irq
);
1420 return (env
->interrupt_request
& CPU_INTERRUPT_HARD
) != 0;
1423 #ifdef KVM_CAP_USER_NMI
1424 void kvm_arch_push_nmi(void *opaque
)
1426 CPUState
*env
= cpu_single_env
;
1429 if (likely(!(env
->interrupt_request
& CPU_INTERRUPT_NMI
)))
1432 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
1433 r
= kvm_inject_nmi(env
->kvm_cpu_state
.vcpu_ctx
);
1435 printf("cpu %d fail inject NMI\n", env
->cpu_index
);
1437 #endif /* KVM_CAP_USER_NMI */
1439 void kvm_arch_update_regs_for_sipi(CPUState
*env
)
1441 SegmentCache cs
= env
->segs
[R_CS
];
1443 kvm_arch_save_regs(env
);
1444 env
->segs
[R_CS
] = cs
;
1446 kvm_arch_load_regs(env
);
1449 void kvm_arch_cpu_reset(CPUState
*env
)
1451 kvm_arch_load_regs(env
);
1452 if (!cpu_is_bsp(env
)) {
1453 if (kvm_irqchip_in_kernel(kvm_context
)) {
1454 #ifdef KVM_CAP_MP_STATE
1455 kvm_reset_mpstate(env
->kvm_cpu_state
.vcpu_ctx
);
1458 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
1464 int kvm_arch_insert_sw_breakpoint(CPUState
*env
, struct kvm_sw_breakpoint
*bp
)
1466 uint8_t int3
= 0xcc;
1468 if (cpu_memory_rw_debug(env
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 0) ||
1469 cpu_memory_rw_debug(env
, bp
->pc
, &int3
, 1, 1))
1474 int kvm_arch_remove_sw_breakpoint(CPUState
*env
, struct kvm_sw_breakpoint
*bp
)
1478 if (cpu_memory_rw_debug(env
, bp
->pc
, &int3
, 1, 0) || int3
!= 0xcc ||
1479 cpu_memory_rw_debug(env
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 1))
1484 #ifdef KVM_CAP_SET_GUEST_DEBUG
1491 static int nb_hw_breakpoint
;
1493 static int find_hw_breakpoint(target_ulong addr
, int len
, int type
)
1497 for (n
= 0; n
< nb_hw_breakpoint
; n
++)
1498 if (hw_breakpoint
[n
].addr
== addr
&& hw_breakpoint
[n
].type
== type
&&
1499 (hw_breakpoint
[n
].len
== len
|| len
== -1))
1504 int kvm_arch_insert_hw_breakpoint(target_ulong addr
,
1505 target_ulong len
, int type
)
1508 case GDB_BREAKPOINT_HW
:
1511 case GDB_WATCHPOINT_WRITE
:
1512 case GDB_WATCHPOINT_ACCESS
:
1519 if (addr
& (len
- 1))
1530 if (nb_hw_breakpoint
== 4)
1533 if (find_hw_breakpoint(addr
, len
, type
) >= 0)
1536 hw_breakpoint
[nb_hw_breakpoint
].addr
= addr
;
1537 hw_breakpoint
[nb_hw_breakpoint
].len
= len
;
1538 hw_breakpoint
[nb_hw_breakpoint
].type
= type
;
1544 int kvm_arch_remove_hw_breakpoint(target_ulong addr
,
1545 target_ulong len
, int type
)
1549 n
= find_hw_breakpoint(addr
, (type
== GDB_BREAKPOINT_HW
) ? 1 : len
, type
);
1554 hw_breakpoint
[n
] = hw_breakpoint
[nb_hw_breakpoint
];
1559 void kvm_arch_remove_all_hw_breakpoints(void)
1561 nb_hw_breakpoint
= 0;
1564 static CPUWatchpoint hw_watchpoint
;
1566 int kvm_arch_debug(struct kvm_debug_exit_arch
*arch_info
)
1571 if (arch_info
->exception
== 1) {
1572 if (arch_info
->dr6
& (1 << 14)) {
1573 if (cpu_single_env
->singlestep_enabled
)
1576 for (n
= 0; n
< 4; n
++)
1577 if (arch_info
->dr6
& (1 << n
))
1578 switch ((arch_info
->dr7
>> (16 + n
*4)) & 0x3) {
1584 cpu_single_env
->watchpoint_hit
= &hw_watchpoint
;
1585 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
1586 hw_watchpoint
.flags
= BP_MEM_WRITE
;
1590 cpu_single_env
->watchpoint_hit
= &hw_watchpoint
;
1591 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
1592 hw_watchpoint
.flags
= BP_MEM_ACCESS
;
1596 } else if (kvm_find_sw_breakpoint(cpu_single_env
, arch_info
->pc
))
1600 kvm_update_guest_debug(cpu_single_env
,
1601 (arch_info
->exception
== 1) ?
1602 KVM_GUESTDBG_INJECT_DB
: KVM_GUESTDBG_INJECT_BP
);
1607 void kvm_arch_update_guest_debug(CPUState
*env
, struct kvm_guest_debug
*dbg
)
1609 const uint8_t type_code
[] = {
1610 [GDB_BREAKPOINT_HW
] = 0x0,
1611 [GDB_WATCHPOINT_WRITE
] = 0x1,
1612 [GDB_WATCHPOINT_ACCESS
] = 0x3
1614 const uint8_t len_code
[] = {
1615 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
1619 if (kvm_sw_breakpoints_active(env
))
1620 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_SW_BP
;
1622 if (nb_hw_breakpoint
> 0) {
1623 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_HW_BP
;
1624 dbg
->arch
.debugreg
[7] = 0x0600;
1625 for (n
= 0; n
< nb_hw_breakpoint
; n
++) {
1626 dbg
->arch
.debugreg
[n
] = hw_breakpoint
[n
].addr
;
1627 dbg
->arch
.debugreg
[7] |= (2 << (n
* 2)) |
1628 (type_code
[hw_breakpoint
[n
].type
] << (16 + n
*4)) |
1629 (len_code
[hw_breakpoint
[n
].len
] << (18 + n
*4));
1635 void kvm_arch_do_ioperm(void *_data
)
1637 struct ioperm_data
*data
= _data
;
1638 ioperm(data
->start_port
, data
->num
, data
->turn_on
);
1642 * Setup x86 specific IRQ routing
1644 int kvm_arch_init_irq_routing(void)
1648 if (kvm_irqchip
&& kvm_has_gsi_routing(kvm_context
)) {
1649 kvm_clear_gsi_routes(kvm_context
);
1650 for (i
= 0; i
< 8; ++i
) {
1653 r
= kvm_add_irq_route(kvm_context
, i
, KVM_IRQCHIP_PIC_MASTER
, i
);
1657 for (i
= 8; i
< 16; ++i
) {
1658 r
= kvm_add_irq_route(kvm_context
, i
, KVM_IRQCHIP_PIC_SLAVE
, i
- 8);
1662 for (i
= 0; i
< 24; ++i
) {
1664 r
= kvm_add_irq_route(kvm_context
, i
, KVM_IRQCHIP_IOAPIC
, 2);
1665 } else if (i
!= 2) {
1666 r
= kvm_add_irq_route(kvm_context
, i
, KVM_IRQCHIP_IOAPIC
, i
);
1671 kvm_commit_irq_routes(kvm_context
);
1676 uint32_t kvm_arch_get_supported_cpuid(CPUState
*env
, uint32_t function
,
1679 return kvm_get_supported_cpuid(kvm_context
, function
, reg
);
1682 void kvm_arch_process_irqchip_events(CPUState
*env
)
1684 kvm_arch_save_regs(env
);
1685 if (env
->interrupt_request
& CPU_INTERRUPT_INIT
)
1687 if (env
->interrupt_request
& CPU_INTERRUPT_SIPI
)
1689 kvm_arch_load_regs(env
);