4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
15 #include <sys/types.h>
16 #include <sys/ioctl.h>
18 #include <sys/utsname.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_para.h>
23 #include "qemu-common.h"
28 #include "host-utils.h"
36 #define DPRINTF(fmt, ...) \
37 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
39 #define DPRINTF(fmt, ...) \
43 #define MSR_KVM_WALL_CLOCK 0x11
44 #define MSR_KVM_SYSTEM_TIME 0x12
47 #define BUS_MCEERR_AR 4
50 #define BUS_MCEERR_AO 5
53 const KVMCapabilityInfo kvm_arch_required_capabilities
[] = {
54 KVM_CAP_INFO(SET_TSS_ADDR
),
55 KVM_CAP_INFO(EXT_CPUID
),
56 KVM_CAP_INFO(MP_STATE
),
60 static bool has_msr_star
;
61 static bool has_msr_hsave_pa
;
62 static bool has_msr_tsc_deadline
;
63 static bool has_msr_async_pf_en
;
64 static bool has_msr_misc_enable
;
65 static int lm_capable_kernel
;
67 static struct kvm_cpuid2
*try_get_cpuid(KVMState
*s
, int max
)
69 struct kvm_cpuid2
*cpuid
;
72 size
= sizeof(*cpuid
) + max
* sizeof(*cpuid
->entries
);
73 cpuid
= (struct kvm_cpuid2
*)g_malloc0(size
);
75 r
= kvm_ioctl(s
, KVM_GET_SUPPORTED_CPUID
, cpuid
);
76 if (r
== 0 && cpuid
->nent
>= max
) {
84 fprintf(stderr
, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
92 struct kvm_para_features
{
96 { KVM_CAP_CLOCKSOURCE
, KVM_FEATURE_CLOCKSOURCE
},
97 { KVM_CAP_NOP_IO_DELAY
, KVM_FEATURE_NOP_IO_DELAY
},
98 { KVM_CAP_PV_MMU
, KVM_FEATURE_MMU_OP
},
99 { KVM_CAP_ASYNC_PF
, KVM_FEATURE_ASYNC_PF
},
103 static int get_para_features(KVMState
*s
)
107 for (i
= 0; i
< ARRAY_SIZE(para_features
) - 1; i
++) {
108 if (kvm_check_extension(s
, para_features
[i
].cap
)) {
109 features
|= (1 << para_features
[i
].feature
);
117 uint32_t kvm_arch_get_supported_cpuid(KVMState
*s
, uint32_t function
,
118 uint32_t index
, int reg
)
120 struct kvm_cpuid2
*cpuid
;
123 uint32_t cpuid_1_edx
;
124 int has_kvm_features
= 0;
127 while ((cpuid
= try_get_cpuid(s
, max
)) == NULL
) {
131 for (i
= 0; i
< cpuid
->nent
; ++i
) {
132 if (cpuid
->entries
[i
].function
== function
&&
133 cpuid
->entries
[i
].index
== index
) {
134 if (cpuid
->entries
[i
].function
== KVM_CPUID_FEATURES
) {
135 has_kvm_features
= 1;
139 ret
= cpuid
->entries
[i
].eax
;
142 ret
= cpuid
->entries
[i
].ebx
;
145 ret
= cpuid
->entries
[i
].ecx
;
148 ret
= cpuid
->entries
[i
].edx
;
151 /* KVM before 2.6.30 misreports the following features */
152 ret
|= CPUID_MTRR
| CPUID_PAT
| CPUID_MCE
| CPUID_MCA
;
155 /* On Intel, kvm returns cpuid according to the Intel spec,
156 * so add missing bits according to the AMD spec:
158 cpuid_1_edx
= kvm_arch_get_supported_cpuid(s
, 1, 0, R_EDX
);
159 ret
|= cpuid_1_edx
& 0x183f7ff;
169 /* fallback for older kernels */
170 if (!has_kvm_features
&& (function
== KVM_CPUID_FEATURES
)) {
171 ret
= get_para_features(s
);
177 typedef struct HWPoisonPage
{
179 QLIST_ENTRY(HWPoisonPage
) list
;
182 static QLIST_HEAD(, HWPoisonPage
) hwpoison_page_list
=
183 QLIST_HEAD_INITIALIZER(hwpoison_page_list
);
185 static void kvm_unpoison_all(void *param
)
187 HWPoisonPage
*page
, *next_page
;
189 QLIST_FOREACH_SAFE(page
, &hwpoison_page_list
, list
, next_page
) {
190 QLIST_REMOVE(page
, list
);
191 qemu_ram_remap(page
->ram_addr
, TARGET_PAGE_SIZE
);
196 static void kvm_hwpoison_page_add(ram_addr_t ram_addr
)
200 QLIST_FOREACH(page
, &hwpoison_page_list
, list
) {
201 if (page
->ram_addr
== ram_addr
) {
205 page
= g_malloc(sizeof(HWPoisonPage
));
206 page
->ram_addr
= ram_addr
;
207 QLIST_INSERT_HEAD(&hwpoison_page_list
, page
, list
);
210 static int kvm_get_mce_cap_supported(KVMState
*s
, uint64_t *mce_cap
,
215 r
= kvm_check_extension(s
, KVM_CAP_MCE
);
218 return kvm_ioctl(s
, KVM_X86_GET_MCE_CAP_SUPPORTED
, mce_cap
);
223 static void kvm_mce_inject(CPUState
*env
, target_phys_addr_t paddr
, int code
)
225 uint64_t status
= MCI_STATUS_VAL
| MCI_STATUS_UC
| MCI_STATUS_EN
|
226 MCI_STATUS_MISCV
| MCI_STATUS_ADDRV
| MCI_STATUS_S
;
227 uint64_t mcg_status
= MCG_STATUS_MCIP
;
229 if (code
== BUS_MCEERR_AR
) {
230 status
|= MCI_STATUS_AR
| 0x134;
231 mcg_status
|= MCG_STATUS_EIPV
;
234 mcg_status
|= MCG_STATUS_RIPV
;
236 cpu_x86_inject_mce(NULL
, env
, 9, status
, mcg_status
, paddr
,
237 (MCM_ADDR_PHYS
<< 6) | 0xc,
238 cpu_x86_support_mca_broadcast(env
) ?
239 MCE_INJECT_BROADCAST
: 0);
242 static void hardware_memory_error(void)
244 fprintf(stderr
, "Hardware memory error!\n");
248 int kvm_arch_on_sigbus_vcpu(CPUState
*env
, int code
, void *addr
)
251 target_phys_addr_t paddr
;
253 if ((env
->mcg_cap
& MCG_SER_P
) && addr
254 && (code
== BUS_MCEERR_AR
|| code
== BUS_MCEERR_AO
)) {
255 if (qemu_ram_addr_from_host(addr
, &ram_addr
) ||
256 !kvm_physical_memory_addr_from_ram(env
->kvm_state
, ram_addr
,
258 fprintf(stderr
, "Hardware memory error for memory used by "
259 "QEMU itself instead of guest system!\n");
260 /* Hope we are lucky for AO MCE */
261 if (code
== BUS_MCEERR_AO
) {
264 hardware_memory_error();
267 kvm_hwpoison_page_add(ram_addr
);
268 kvm_mce_inject(env
, paddr
, code
);
270 if (code
== BUS_MCEERR_AO
) {
272 } else if (code
== BUS_MCEERR_AR
) {
273 hardware_memory_error();
281 int kvm_arch_on_sigbus(int code
, void *addr
)
283 if ((first_cpu
->mcg_cap
& MCG_SER_P
) && addr
&& code
== BUS_MCEERR_AO
) {
285 target_phys_addr_t paddr
;
287 /* Hope we are lucky for AO MCE */
288 if (qemu_ram_addr_from_host(addr
, &ram_addr
) ||
289 !kvm_physical_memory_addr_from_ram(first_cpu
->kvm_state
, ram_addr
,
291 fprintf(stderr
, "Hardware memory error for memory used by "
292 "QEMU itself instead of guest system!: %p\n", addr
);
295 kvm_hwpoison_page_add(ram_addr
);
296 kvm_mce_inject(first_cpu
, paddr
, code
);
298 if (code
== BUS_MCEERR_AO
) {
300 } else if (code
== BUS_MCEERR_AR
) {
301 hardware_memory_error();
309 static int kvm_inject_mce_oldstyle(CPUState
*env
)
311 if (!kvm_has_vcpu_events() && env
->exception_injected
== EXCP12_MCHK
) {
312 unsigned int bank
, bank_num
= env
->mcg_cap
& 0xff;
313 struct kvm_x86_mce mce
;
315 env
->exception_injected
= -1;
318 * There must be at least one bank in use if an MCE is pending.
319 * Find it and use its values for the event injection.
321 for (bank
= 0; bank
< bank_num
; bank
++) {
322 if (env
->mce_banks
[bank
* 4 + 1] & MCI_STATUS_VAL
) {
326 assert(bank
< bank_num
);
329 mce
.status
= env
->mce_banks
[bank
* 4 + 1];
330 mce
.mcg_status
= env
->mcg_status
;
331 mce
.addr
= env
->mce_banks
[bank
* 4 + 2];
332 mce
.misc
= env
->mce_banks
[bank
* 4 + 3];
334 return kvm_vcpu_ioctl(env
, KVM_X86_SET_MCE
, &mce
);
339 static void cpu_update_state(void *opaque
, int running
, RunState state
)
341 CPUState
*env
= opaque
;
344 env
->tsc_valid
= false;
348 int kvm_arch_init_vcpu(CPUState
*env
)
351 struct kvm_cpuid2 cpuid
;
352 struct kvm_cpuid_entry2 entries
[100];
353 } QEMU_PACKED cpuid_data
;
354 KVMState
*s
= env
->kvm_state
;
355 uint32_t limit
, i
, j
, cpuid_i
;
357 struct kvm_cpuid_entry2
*c
;
358 uint32_t signature
[3];
361 env
->cpuid_features
&= kvm_arch_get_supported_cpuid(s
, 1, 0, R_EDX
);
363 i
= env
->cpuid_ext_features
& CPUID_EXT_HYPERVISOR
;
364 env
->cpuid_ext_features
&= kvm_arch_get_supported_cpuid(s
, 1, 0, R_ECX
);
365 env
->cpuid_ext_features
|= i
;
367 env
->cpuid_ext2_features
&= kvm_arch_get_supported_cpuid(s
, 0x80000001,
369 env
->cpuid_ext3_features
&= kvm_arch_get_supported_cpuid(s
, 0x80000001,
371 env
->cpuid_svm_features
&= kvm_arch_get_supported_cpuid(s
, 0x8000000A,
376 /* Paravirtualization CPUIDs */
377 memcpy(signature
, "KVMKVMKVM\0\0\0", 12);
378 c
= &cpuid_data
.entries
[cpuid_i
++];
379 memset(c
, 0, sizeof(*c
));
380 c
->function
= KVM_CPUID_SIGNATURE
;
382 c
->ebx
= signature
[0];
383 c
->ecx
= signature
[1];
384 c
->edx
= signature
[2];
386 c
= &cpuid_data
.entries
[cpuid_i
++];
387 memset(c
, 0, sizeof(*c
));
388 c
->function
= KVM_CPUID_FEATURES
;
389 c
->eax
= env
->cpuid_kvm_features
&
390 kvm_arch_get_supported_cpuid(s
, KVM_CPUID_FEATURES
, 0, R_EAX
);
392 has_msr_async_pf_en
= c
->eax
& (1 << KVM_FEATURE_ASYNC_PF
);
394 cpu_x86_cpuid(env
, 0, 0, &limit
, &unused
, &unused
, &unused
);
396 for (i
= 0; i
<= limit
; i
++) {
397 c
= &cpuid_data
.entries
[cpuid_i
++];
401 /* Keep reading function 2 till all the input is received */
405 c
->flags
= KVM_CPUID_FLAG_STATEFUL_FUNC
|
406 KVM_CPUID_FLAG_STATE_READ_NEXT
;
407 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
408 times
= c
->eax
& 0xff;
410 for (j
= 1; j
< times
; ++j
) {
411 c
= &cpuid_data
.entries
[cpuid_i
++];
413 c
->flags
= KVM_CPUID_FLAG_STATEFUL_FUNC
;
414 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
422 if (i
== 0xd && j
== 64) {
426 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
428 cpu_x86_cpuid(env
, i
, j
, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
430 if (i
== 4 && c
->eax
== 0) {
433 if (i
== 0xb && !(c
->ecx
& 0xff00)) {
436 if (i
== 0xd && c
->eax
== 0) {
439 c
= &cpuid_data
.entries
[cpuid_i
++];
445 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
449 cpu_x86_cpuid(env
, 0x80000000, 0, &limit
, &unused
, &unused
, &unused
);
451 for (i
= 0x80000000; i
<= limit
; i
++) {
452 c
= &cpuid_data
.entries
[cpuid_i
++];
456 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
459 /* Call Centaur's CPUID instructions they are supported. */
460 if (env
->cpuid_xlevel2
> 0) {
461 env
->cpuid_ext4_features
&=
462 kvm_arch_get_supported_cpuid(s
, 0xC0000001, 0, R_EDX
);
463 cpu_x86_cpuid(env
, 0xC0000000, 0, &limit
, &unused
, &unused
, &unused
);
465 for (i
= 0xC0000000; i
<= limit
; i
++) {
466 c
= &cpuid_data
.entries
[cpuid_i
++];
470 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
474 cpuid_data
.cpuid
.nent
= cpuid_i
;
476 if (((env
->cpuid_version
>> 8)&0xF) >= 6
477 && (env
->cpuid_features
&(CPUID_MCE
|CPUID_MCA
)) == (CPUID_MCE
|CPUID_MCA
)
478 && kvm_check_extension(env
->kvm_state
, KVM_CAP_MCE
) > 0) {
483 ret
= kvm_get_mce_cap_supported(env
->kvm_state
, &mcg_cap
, &banks
);
485 fprintf(stderr
, "kvm_get_mce_cap_supported: %s", strerror(-ret
));
489 if (banks
> MCE_BANKS_DEF
) {
490 banks
= MCE_BANKS_DEF
;
492 mcg_cap
&= MCE_CAP_DEF
;
494 ret
= kvm_vcpu_ioctl(env
, KVM_X86_SETUP_MCE
, &mcg_cap
);
496 fprintf(stderr
, "KVM_X86_SETUP_MCE: %s", strerror(-ret
));
500 env
->mcg_cap
= mcg_cap
;
503 qemu_add_vm_change_state_handler(cpu_update_state
, env
);
505 r
= kvm_vcpu_ioctl(env
, KVM_SET_CPUID2
, &cpuid_data
);
510 r
= kvm_check_extension(env
->kvm_state
, KVM_CAP_TSC_CONTROL
);
511 if (r
&& env
->tsc_khz
) {
512 r
= kvm_vcpu_ioctl(env
, KVM_SET_TSC_KHZ
, env
->tsc_khz
);
514 fprintf(stderr
, "KVM_SET_TSC_KHZ failed\n");
522 void kvm_arch_reset_vcpu(CPUState
*env
)
524 env
->exception_injected
= -1;
525 env
->interrupt_injected
= -1;
527 if (kvm_irqchip_in_kernel()) {
528 env
->mp_state
= cpu_is_bsp(env
) ? KVM_MP_STATE_RUNNABLE
:
529 KVM_MP_STATE_UNINITIALIZED
;
531 env
->mp_state
= KVM_MP_STATE_RUNNABLE
;
535 static int kvm_get_supported_msrs(KVMState
*s
)
537 static int kvm_supported_msrs
;
541 if (kvm_supported_msrs
== 0) {
542 struct kvm_msr_list msr_list
, *kvm_msr_list
;
544 kvm_supported_msrs
= -1;
546 /* Obtain MSR list from KVM. These are the MSRs that we must
549 ret
= kvm_ioctl(s
, KVM_GET_MSR_INDEX_LIST
, &msr_list
);
550 if (ret
< 0 && ret
!= -E2BIG
) {
553 /* Old kernel modules had a bug and could write beyond the provided
554 memory. Allocate at least a safe amount of 1K. */
555 kvm_msr_list
= g_malloc0(MAX(1024, sizeof(msr_list
) +
557 sizeof(msr_list
.indices
[0])));
559 kvm_msr_list
->nmsrs
= msr_list
.nmsrs
;
560 ret
= kvm_ioctl(s
, KVM_GET_MSR_INDEX_LIST
, kvm_msr_list
);
564 for (i
= 0; i
< kvm_msr_list
->nmsrs
; i
++) {
565 if (kvm_msr_list
->indices
[i
] == MSR_STAR
) {
569 if (kvm_msr_list
->indices
[i
] == MSR_VM_HSAVE_PA
) {
570 has_msr_hsave_pa
= true;
573 if (kvm_msr_list
->indices
[i
] == MSR_IA32_TSCDEADLINE
) {
574 has_msr_tsc_deadline
= true;
577 if (kvm_msr_list
->indices
[i
] == MSR_IA32_MISC_ENABLE
) {
578 has_msr_misc_enable
= true;
584 g_free(kvm_msr_list
);
590 int kvm_arch_init(KVMState
*s
)
592 uint64_t identity_base
= 0xfffbc000;
594 struct utsname utsname
;
596 ret
= kvm_get_supported_msrs(s
);
602 lm_capable_kernel
= strcmp(utsname
.machine
, "x86_64") == 0;
605 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
606 * In order to use vm86 mode, an EPT identity map and a TSS are needed.
607 * Since these must be part of guest physical memory, we need to allocate
608 * them, both by setting their start addresses in the kernel and by
609 * creating a corresponding e820 entry. We need 4 pages before the BIOS.
611 * Older KVM versions may not support setting the identity map base. In
612 * that case we need to stick with the default, i.e. a 256K maximum BIOS
615 if (kvm_check_extension(s
, KVM_CAP_SET_IDENTITY_MAP_ADDR
)) {
616 /* Allows up to 16M BIOSes. */
617 identity_base
= 0xfeffc000;
619 ret
= kvm_vm_ioctl(s
, KVM_SET_IDENTITY_MAP_ADDR
, &identity_base
);
625 /* Set TSS base one page after EPT identity map. */
626 ret
= kvm_vm_ioctl(s
, KVM_SET_TSS_ADDR
, identity_base
+ 0x1000);
631 /* Tell fw_cfg to notify the BIOS to reserve the range. */
632 ret
= e820_add_entry(identity_base
, 0x4000, E820_RESERVED
);
634 fprintf(stderr
, "e820_add_entry() table is full\n");
637 qemu_register_reset(kvm_unpoison_all
, NULL
);
642 static void set_v8086_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
644 lhs
->selector
= rhs
->selector
;
645 lhs
->base
= rhs
->base
;
646 lhs
->limit
= rhs
->limit
;
658 static void set_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
660 unsigned flags
= rhs
->flags
;
661 lhs
->selector
= rhs
->selector
;
662 lhs
->base
= rhs
->base
;
663 lhs
->limit
= rhs
->limit
;
664 lhs
->type
= (flags
>> DESC_TYPE_SHIFT
) & 15;
665 lhs
->present
= (flags
& DESC_P_MASK
) != 0;
666 lhs
->dpl
= (flags
>> DESC_DPL_SHIFT
) & 3;
667 lhs
->db
= (flags
>> DESC_B_SHIFT
) & 1;
668 lhs
->s
= (flags
& DESC_S_MASK
) != 0;
669 lhs
->l
= (flags
>> DESC_L_SHIFT
) & 1;
670 lhs
->g
= (flags
& DESC_G_MASK
) != 0;
671 lhs
->avl
= (flags
& DESC_AVL_MASK
) != 0;
675 static void get_seg(SegmentCache
*lhs
, const struct kvm_segment
*rhs
)
677 lhs
->selector
= rhs
->selector
;
678 lhs
->base
= rhs
->base
;
679 lhs
->limit
= rhs
->limit
;
680 lhs
->flags
= (rhs
->type
<< DESC_TYPE_SHIFT
) |
681 (rhs
->present
* DESC_P_MASK
) |
682 (rhs
->dpl
<< DESC_DPL_SHIFT
) |
683 (rhs
->db
<< DESC_B_SHIFT
) |
684 (rhs
->s
* DESC_S_MASK
) |
685 (rhs
->l
<< DESC_L_SHIFT
) |
686 (rhs
->g
* DESC_G_MASK
) |
687 (rhs
->avl
* DESC_AVL_MASK
);
690 static void kvm_getput_reg(__u64
*kvm_reg
, target_ulong
*qemu_reg
, int set
)
693 *kvm_reg
= *qemu_reg
;
695 *qemu_reg
= *kvm_reg
;
699 static int kvm_getput_regs(CPUState
*env
, int set
)
701 struct kvm_regs regs
;
705 ret
= kvm_vcpu_ioctl(env
, KVM_GET_REGS
, ®s
);
711 kvm_getput_reg(®s
.rax
, &env
->regs
[R_EAX
], set
);
712 kvm_getput_reg(®s
.rbx
, &env
->regs
[R_EBX
], set
);
713 kvm_getput_reg(®s
.rcx
, &env
->regs
[R_ECX
], set
);
714 kvm_getput_reg(®s
.rdx
, &env
->regs
[R_EDX
], set
);
715 kvm_getput_reg(®s
.rsi
, &env
->regs
[R_ESI
], set
);
716 kvm_getput_reg(®s
.rdi
, &env
->regs
[R_EDI
], set
);
717 kvm_getput_reg(®s
.rsp
, &env
->regs
[R_ESP
], set
);
718 kvm_getput_reg(®s
.rbp
, &env
->regs
[R_EBP
], set
);
720 kvm_getput_reg(®s
.r8
, &env
->regs
[8], set
);
721 kvm_getput_reg(®s
.r9
, &env
->regs
[9], set
);
722 kvm_getput_reg(®s
.r10
, &env
->regs
[10], set
);
723 kvm_getput_reg(®s
.r11
, &env
->regs
[11], set
);
724 kvm_getput_reg(®s
.r12
, &env
->regs
[12], set
);
725 kvm_getput_reg(®s
.r13
, &env
->regs
[13], set
);
726 kvm_getput_reg(®s
.r14
, &env
->regs
[14], set
);
727 kvm_getput_reg(®s
.r15
, &env
->regs
[15], set
);
730 kvm_getput_reg(®s
.rflags
, &env
->eflags
, set
);
731 kvm_getput_reg(®s
.rip
, &env
->eip
, set
);
734 ret
= kvm_vcpu_ioctl(env
, KVM_SET_REGS
, ®s
);
740 static int kvm_put_fpu(CPUState
*env
)
745 memset(&fpu
, 0, sizeof fpu
);
746 fpu
.fsw
= env
->fpus
& ~(7 << 11);
747 fpu
.fsw
|= (env
->fpstt
& 7) << 11;
749 fpu
.last_opcode
= env
->fpop
;
750 fpu
.last_ip
= env
->fpip
;
751 fpu
.last_dp
= env
->fpdp
;
752 for (i
= 0; i
< 8; ++i
) {
753 fpu
.ftwx
|= (!env
->fptags
[i
]) << i
;
755 memcpy(fpu
.fpr
, env
->fpregs
, sizeof env
->fpregs
);
756 memcpy(fpu
.xmm
, env
->xmm_regs
, sizeof env
->xmm_regs
);
757 fpu
.mxcsr
= env
->mxcsr
;
759 return kvm_vcpu_ioctl(env
, KVM_SET_FPU
, &fpu
);
762 #define XSAVE_CWD_RIP 2
763 #define XSAVE_CWD_RDP 4
764 #define XSAVE_MXCSR 6
765 #define XSAVE_ST_SPACE 8
766 #define XSAVE_XMM_SPACE 40
767 #define XSAVE_XSTATE_BV 128
768 #define XSAVE_YMMH_SPACE 144
770 static int kvm_put_xsave(CPUState
*env
)
773 struct kvm_xsave
* xsave
;
774 uint16_t cwd
, swd
, twd
;
776 if (!kvm_has_xsave()) {
777 return kvm_put_fpu(env
);
780 xsave
= qemu_memalign(4096, sizeof(struct kvm_xsave
));
781 memset(xsave
, 0, sizeof(struct kvm_xsave
));
783 swd
= env
->fpus
& ~(7 << 11);
784 swd
|= (env
->fpstt
& 7) << 11;
786 for (i
= 0; i
< 8; ++i
) {
787 twd
|= (!env
->fptags
[i
]) << i
;
789 xsave
->region
[0] = (uint32_t)(swd
<< 16) + cwd
;
790 xsave
->region
[1] = (uint32_t)(env
->fpop
<< 16) + twd
;
791 memcpy(&xsave
->region
[XSAVE_CWD_RIP
], &env
->fpip
, sizeof(env
->fpip
));
792 memcpy(&xsave
->region
[XSAVE_CWD_RDP
], &env
->fpdp
, sizeof(env
->fpdp
));
793 memcpy(&xsave
->region
[XSAVE_ST_SPACE
], env
->fpregs
,
795 memcpy(&xsave
->region
[XSAVE_XMM_SPACE
], env
->xmm_regs
,
796 sizeof env
->xmm_regs
);
797 xsave
->region
[XSAVE_MXCSR
] = env
->mxcsr
;
798 *(uint64_t *)&xsave
->region
[XSAVE_XSTATE_BV
] = env
->xstate_bv
;
799 memcpy(&xsave
->region
[XSAVE_YMMH_SPACE
], env
->ymmh_regs
,
800 sizeof env
->ymmh_regs
);
801 r
= kvm_vcpu_ioctl(env
, KVM_SET_XSAVE
, xsave
);
806 static int kvm_put_xcrs(CPUState
*env
)
808 struct kvm_xcrs xcrs
;
810 if (!kvm_has_xcrs()) {
816 xcrs
.xcrs
[0].xcr
= 0;
817 xcrs
.xcrs
[0].value
= env
->xcr0
;
818 return kvm_vcpu_ioctl(env
, KVM_SET_XCRS
, &xcrs
);
821 static int kvm_put_sregs(CPUState
*env
)
823 struct kvm_sregs sregs
;
825 memset(sregs
.interrupt_bitmap
, 0, sizeof(sregs
.interrupt_bitmap
));
826 if (env
->interrupt_injected
>= 0) {
827 sregs
.interrupt_bitmap
[env
->interrupt_injected
/ 64] |=
828 (uint64_t)1 << (env
->interrupt_injected
% 64);
831 if ((env
->eflags
& VM_MASK
)) {
832 set_v8086_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
833 set_v8086_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
834 set_v8086_seg(&sregs
.es
, &env
->segs
[R_ES
]);
835 set_v8086_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
836 set_v8086_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
837 set_v8086_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
839 set_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
840 set_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
841 set_seg(&sregs
.es
, &env
->segs
[R_ES
]);
842 set_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
843 set_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
844 set_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
847 set_seg(&sregs
.tr
, &env
->tr
);
848 set_seg(&sregs
.ldt
, &env
->ldt
);
850 sregs
.idt
.limit
= env
->idt
.limit
;
851 sregs
.idt
.base
= env
->idt
.base
;
852 sregs
.gdt
.limit
= env
->gdt
.limit
;
853 sregs
.gdt
.base
= env
->gdt
.base
;
855 sregs
.cr0
= env
->cr
[0];
856 sregs
.cr2
= env
->cr
[2];
857 sregs
.cr3
= env
->cr
[3];
858 sregs
.cr4
= env
->cr
[4];
860 sregs
.cr8
= cpu_get_apic_tpr(env
->apic_state
);
861 sregs
.apic_base
= cpu_get_apic_base(env
->apic_state
);
863 sregs
.efer
= env
->efer
;
865 return kvm_vcpu_ioctl(env
, KVM_SET_SREGS
, &sregs
);
868 static void kvm_msr_entry_set(struct kvm_msr_entry
*entry
,
869 uint32_t index
, uint64_t value
)
871 entry
->index
= index
;
875 static int kvm_put_msrs(CPUState
*env
, int level
)
878 struct kvm_msrs info
;
879 struct kvm_msr_entry entries
[100];
881 struct kvm_msr_entry
*msrs
= msr_data
.entries
;
884 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_CS
, env
->sysenter_cs
);
885 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_ESP
, env
->sysenter_esp
);
886 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_EIP
, env
->sysenter_eip
);
887 kvm_msr_entry_set(&msrs
[n
++], MSR_PAT
, env
->pat
);
889 kvm_msr_entry_set(&msrs
[n
++], MSR_STAR
, env
->star
);
891 if (has_msr_hsave_pa
) {
892 kvm_msr_entry_set(&msrs
[n
++], MSR_VM_HSAVE_PA
, env
->vm_hsave
);
894 if (has_msr_tsc_deadline
) {
895 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_TSCDEADLINE
, env
->tsc_deadline
);
897 if (has_msr_misc_enable
) {
898 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_MISC_ENABLE
,
899 env
->msr_ia32_misc_enable
);
902 if (lm_capable_kernel
) {
903 kvm_msr_entry_set(&msrs
[n
++], MSR_CSTAR
, env
->cstar
);
904 kvm_msr_entry_set(&msrs
[n
++], MSR_KERNELGSBASE
, env
->kernelgsbase
);
905 kvm_msr_entry_set(&msrs
[n
++], MSR_FMASK
, env
->fmask
);
906 kvm_msr_entry_set(&msrs
[n
++], MSR_LSTAR
, env
->lstar
);
909 if (level
== KVM_PUT_FULL_STATE
) {
911 * KVM is yet unable to synchronize TSC values of multiple VCPUs on
912 * writeback. Until this is fixed, we only write the offset to SMP
913 * guests after migration, desynchronizing the VCPUs, but avoiding
914 * huge jump-backs that would occur without any writeback at all.
916 if (smp_cpus
== 1 || env
->tsc
!= 0) {
917 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_TSC
, env
->tsc
);
921 * The following paravirtual MSRs have side effects on the guest or are
922 * too heavy for normal writeback. Limit them to reset or full state
925 if (level
>= KVM_PUT_RESET_STATE
) {
926 kvm_msr_entry_set(&msrs
[n
++], MSR_KVM_SYSTEM_TIME
,
927 env
->system_time_msr
);
928 kvm_msr_entry_set(&msrs
[n
++], MSR_KVM_WALL_CLOCK
, env
->wall_clock_msr
);
929 if (has_msr_async_pf_en
) {
930 kvm_msr_entry_set(&msrs
[n
++], MSR_KVM_ASYNC_PF_EN
,
931 env
->async_pf_en_msr
);
937 kvm_msr_entry_set(&msrs
[n
++], MSR_MCG_STATUS
, env
->mcg_status
);
938 kvm_msr_entry_set(&msrs
[n
++], MSR_MCG_CTL
, env
->mcg_ctl
);
939 for (i
= 0; i
< (env
->mcg_cap
& 0xff) * 4; i
++) {
940 kvm_msr_entry_set(&msrs
[n
++], MSR_MC0_CTL
+ i
, env
->mce_banks
[i
]);
944 msr_data
.info
.nmsrs
= n
;
946 return kvm_vcpu_ioctl(env
, KVM_SET_MSRS
, &msr_data
);
951 static int kvm_get_fpu(CPUState
*env
)
956 ret
= kvm_vcpu_ioctl(env
, KVM_GET_FPU
, &fpu
);
961 env
->fpstt
= (fpu
.fsw
>> 11) & 7;
964 env
->fpop
= fpu
.last_opcode
;
965 env
->fpip
= fpu
.last_ip
;
966 env
->fpdp
= fpu
.last_dp
;
967 for (i
= 0; i
< 8; ++i
) {
968 env
->fptags
[i
] = !((fpu
.ftwx
>> i
) & 1);
970 memcpy(env
->fpregs
, fpu
.fpr
, sizeof env
->fpregs
);
971 memcpy(env
->xmm_regs
, fpu
.xmm
, sizeof env
->xmm_regs
);
972 env
->mxcsr
= fpu
.mxcsr
;
977 static int kvm_get_xsave(CPUState
*env
)
979 struct kvm_xsave
* xsave
;
981 uint16_t cwd
, swd
, twd
;
983 if (!kvm_has_xsave()) {
984 return kvm_get_fpu(env
);
987 xsave
= qemu_memalign(4096, sizeof(struct kvm_xsave
));
988 ret
= kvm_vcpu_ioctl(env
, KVM_GET_XSAVE
, xsave
);
994 cwd
= (uint16_t)xsave
->region
[0];
995 swd
= (uint16_t)(xsave
->region
[0] >> 16);
996 twd
= (uint16_t)xsave
->region
[1];
997 env
->fpop
= (uint16_t)(xsave
->region
[1] >> 16);
998 env
->fpstt
= (swd
>> 11) & 7;
1001 for (i
= 0; i
< 8; ++i
) {
1002 env
->fptags
[i
] = !((twd
>> i
) & 1);
1004 memcpy(&env
->fpip
, &xsave
->region
[XSAVE_CWD_RIP
], sizeof(env
->fpip
));
1005 memcpy(&env
->fpdp
, &xsave
->region
[XSAVE_CWD_RDP
], sizeof(env
->fpdp
));
1006 env
->mxcsr
= xsave
->region
[XSAVE_MXCSR
];
1007 memcpy(env
->fpregs
, &xsave
->region
[XSAVE_ST_SPACE
],
1008 sizeof env
->fpregs
);
1009 memcpy(env
->xmm_regs
, &xsave
->region
[XSAVE_XMM_SPACE
],
1010 sizeof env
->xmm_regs
);
1011 env
->xstate_bv
= *(uint64_t *)&xsave
->region
[XSAVE_XSTATE_BV
];
1012 memcpy(env
->ymmh_regs
, &xsave
->region
[XSAVE_YMMH_SPACE
],
1013 sizeof env
->ymmh_regs
);
1018 static int kvm_get_xcrs(CPUState
*env
)
1021 struct kvm_xcrs xcrs
;
1023 if (!kvm_has_xcrs()) {
1027 ret
= kvm_vcpu_ioctl(env
, KVM_GET_XCRS
, &xcrs
);
1032 for (i
= 0; i
< xcrs
.nr_xcrs
; i
++) {
1033 /* Only support xcr0 now */
1034 if (xcrs
.xcrs
[0].xcr
== 0) {
1035 env
->xcr0
= xcrs
.xcrs
[0].value
;
1042 static int kvm_get_sregs(CPUState
*env
)
1044 struct kvm_sregs sregs
;
1048 ret
= kvm_vcpu_ioctl(env
, KVM_GET_SREGS
, &sregs
);
1053 /* There can only be one pending IRQ set in the bitmap at a time, so try
1054 to find it and save its number instead (-1 for none). */
1055 env
->interrupt_injected
= -1;
1056 for (i
= 0; i
< ARRAY_SIZE(sregs
.interrupt_bitmap
); i
++) {
1057 if (sregs
.interrupt_bitmap
[i
]) {
1058 bit
= ctz64(sregs
.interrupt_bitmap
[i
]);
1059 env
->interrupt_injected
= i
* 64 + bit
;
1064 get_seg(&env
->segs
[R_CS
], &sregs
.cs
);
1065 get_seg(&env
->segs
[R_DS
], &sregs
.ds
);
1066 get_seg(&env
->segs
[R_ES
], &sregs
.es
);
1067 get_seg(&env
->segs
[R_FS
], &sregs
.fs
);
1068 get_seg(&env
->segs
[R_GS
], &sregs
.gs
);
1069 get_seg(&env
->segs
[R_SS
], &sregs
.ss
);
1071 get_seg(&env
->tr
, &sregs
.tr
);
1072 get_seg(&env
->ldt
, &sregs
.ldt
);
1074 env
->idt
.limit
= sregs
.idt
.limit
;
1075 env
->idt
.base
= sregs
.idt
.base
;
1076 env
->gdt
.limit
= sregs
.gdt
.limit
;
1077 env
->gdt
.base
= sregs
.gdt
.base
;
1079 env
->cr
[0] = sregs
.cr0
;
1080 env
->cr
[2] = sregs
.cr2
;
1081 env
->cr
[3] = sregs
.cr3
;
1082 env
->cr
[4] = sregs
.cr4
;
1084 cpu_set_apic_base(env
->apic_state
, sregs
.apic_base
);
1086 env
->efer
= sregs
.efer
;
1087 //cpu_set_apic_tpr(env->apic_state, sregs.cr8);
1089 #define HFLAG_COPY_MASK \
1090 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
1091 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
1092 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
1093 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
1095 hflags
= (env
->segs
[R_CS
].flags
>> DESC_DPL_SHIFT
) & HF_CPL_MASK
;
1096 hflags
|= (env
->cr
[0] & CR0_PE_MASK
) << (HF_PE_SHIFT
- CR0_PE_SHIFT
);
1097 hflags
|= (env
->cr
[0] << (HF_MP_SHIFT
- CR0_MP_SHIFT
)) &
1098 (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
);
1099 hflags
|= (env
->eflags
& (HF_TF_MASK
| HF_VM_MASK
| HF_IOPL_MASK
));
1100 hflags
|= (env
->cr
[4] & CR4_OSFXSR_MASK
) <<
1101 (HF_OSFXSR_SHIFT
- CR4_OSFXSR_SHIFT
);
1103 if (env
->efer
& MSR_EFER_LMA
) {
1104 hflags
|= HF_LMA_MASK
;
1107 if ((hflags
& HF_LMA_MASK
) && (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
1108 hflags
|= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
1110 hflags
|= (env
->segs
[R_CS
].flags
& DESC_B_MASK
) >>
1111 (DESC_B_SHIFT
- HF_CS32_SHIFT
);
1112 hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
) >>
1113 (DESC_B_SHIFT
- HF_SS32_SHIFT
);
1114 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
) ||
1115 !(hflags
& HF_CS32_MASK
)) {
1116 hflags
|= HF_ADDSEG_MASK
;
1118 hflags
|= ((env
->segs
[R_DS
].base
| env
->segs
[R_ES
].base
|
1119 env
->segs
[R_SS
].base
) != 0) << HF_ADDSEG_SHIFT
;
1122 env
->hflags
= (env
->hflags
& HFLAG_COPY_MASK
) | hflags
;
1127 static int kvm_get_msrs(CPUState
*env
)
1130 struct kvm_msrs info
;
1131 struct kvm_msr_entry entries
[100];
1133 struct kvm_msr_entry
*msrs
= msr_data
.entries
;
1137 msrs
[n
++].index
= MSR_IA32_SYSENTER_CS
;
1138 msrs
[n
++].index
= MSR_IA32_SYSENTER_ESP
;
1139 msrs
[n
++].index
= MSR_IA32_SYSENTER_EIP
;
1140 msrs
[n
++].index
= MSR_PAT
;
1142 msrs
[n
++].index
= MSR_STAR
;
1144 if (has_msr_hsave_pa
) {
1145 msrs
[n
++].index
= MSR_VM_HSAVE_PA
;
1147 if (has_msr_tsc_deadline
) {
1148 msrs
[n
++].index
= MSR_IA32_TSCDEADLINE
;
1150 if (has_msr_misc_enable
) {
1151 msrs
[n
++].index
= MSR_IA32_MISC_ENABLE
;
1154 if (!env
->tsc_valid
) {
1155 msrs
[n
++].index
= MSR_IA32_TSC
;
1156 env
->tsc_valid
= !runstate_is_running();
1159 #ifdef TARGET_X86_64
1160 if (lm_capable_kernel
) {
1161 msrs
[n
++].index
= MSR_CSTAR
;
1162 msrs
[n
++].index
= MSR_KERNELGSBASE
;
1163 msrs
[n
++].index
= MSR_FMASK
;
1164 msrs
[n
++].index
= MSR_LSTAR
;
1167 msrs
[n
++].index
= MSR_KVM_SYSTEM_TIME
;
1168 msrs
[n
++].index
= MSR_KVM_WALL_CLOCK
;
1169 if (has_msr_async_pf_en
) {
1170 msrs
[n
++].index
= MSR_KVM_ASYNC_PF_EN
;
1174 msrs
[n
++].index
= MSR_MCG_STATUS
;
1175 msrs
[n
++].index
= MSR_MCG_CTL
;
1176 for (i
= 0; i
< (env
->mcg_cap
& 0xff) * 4; i
++) {
1177 msrs
[n
++].index
= MSR_MC0_CTL
+ i
;
1181 msr_data
.info
.nmsrs
= n
;
1182 ret
= kvm_vcpu_ioctl(env
, KVM_GET_MSRS
, &msr_data
);
1187 for (i
= 0; i
< ret
; i
++) {
1188 switch (msrs
[i
].index
) {
1189 case MSR_IA32_SYSENTER_CS
:
1190 env
->sysenter_cs
= msrs
[i
].data
;
1192 case MSR_IA32_SYSENTER_ESP
:
1193 env
->sysenter_esp
= msrs
[i
].data
;
1195 case MSR_IA32_SYSENTER_EIP
:
1196 env
->sysenter_eip
= msrs
[i
].data
;
1199 env
->pat
= msrs
[i
].data
;
1202 env
->star
= msrs
[i
].data
;
1204 #ifdef TARGET_X86_64
1206 env
->cstar
= msrs
[i
].data
;
1208 case MSR_KERNELGSBASE
:
1209 env
->kernelgsbase
= msrs
[i
].data
;
1212 env
->fmask
= msrs
[i
].data
;
1215 env
->lstar
= msrs
[i
].data
;
1219 env
->tsc
= msrs
[i
].data
;
1221 case MSR_IA32_TSCDEADLINE
:
1222 env
->tsc_deadline
= msrs
[i
].data
;
1224 case MSR_VM_HSAVE_PA
:
1225 env
->vm_hsave
= msrs
[i
].data
;
1227 case MSR_KVM_SYSTEM_TIME
:
1228 env
->system_time_msr
= msrs
[i
].data
;
1230 case MSR_KVM_WALL_CLOCK
:
1231 env
->wall_clock_msr
= msrs
[i
].data
;
1233 case MSR_MCG_STATUS
:
1234 env
->mcg_status
= msrs
[i
].data
;
1237 env
->mcg_ctl
= msrs
[i
].data
;
1239 case MSR_IA32_MISC_ENABLE
:
1240 env
->msr_ia32_misc_enable
= msrs
[i
].data
;
1243 if (msrs
[i
].index
>= MSR_MC0_CTL
&&
1244 msrs
[i
].index
< MSR_MC0_CTL
+ (env
->mcg_cap
& 0xff) * 4) {
1245 env
->mce_banks
[msrs
[i
].index
- MSR_MC0_CTL
] = msrs
[i
].data
;
1248 case MSR_KVM_ASYNC_PF_EN
:
1249 env
->async_pf_en_msr
= msrs
[i
].data
;
1257 static int kvm_put_mp_state(CPUState
*env
)
1259 struct kvm_mp_state mp_state
= { .mp_state
= env
->mp_state
};
1261 return kvm_vcpu_ioctl(env
, KVM_SET_MP_STATE
, &mp_state
);
1264 static int kvm_get_mp_state(CPUState
*env
)
1266 struct kvm_mp_state mp_state
;
1269 ret
= kvm_vcpu_ioctl(env
, KVM_GET_MP_STATE
, &mp_state
);
1273 env
->mp_state
= mp_state
.mp_state
;
1274 if (kvm_irqchip_in_kernel()) {
1275 env
->halted
= (mp_state
.mp_state
== KVM_MP_STATE_HALTED
);
1280 static int kvm_put_vcpu_events(CPUState
*env
, int level
)
1282 struct kvm_vcpu_events events
;
1284 if (!kvm_has_vcpu_events()) {
1288 events
.exception
.injected
= (env
->exception_injected
>= 0);
1289 events
.exception
.nr
= env
->exception_injected
;
1290 events
.exception
.has_error_code
= env
->has_error_code
;
1291 events
.exception
.error_code
= env
->error_code
;
1293 events
.interrupt
.injected
= (env
->interrupt_injected
>= 0);
1294 events
.interrupt
.nr
= env
->interrupt_injected
;
1295 events
.interrupt
.soft
= env
->soft_interrupt
;
1297 events
.nmi
.injected
= env
->nmi_injected
;
1298 events
.nmi
.pending
= env
->nmi_pending
;
1299 events
.nmi
.masked
= !!(env
->hflags2
& HF2_NMI_MASK
);
1301 events
.sipi_vector
= env
->sipi_vector
;
1304 if (level
>= KVM_PUT_RESET_STATE
) {
1306 KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SIPI_VECTOR
;
1309 return kvm_vcpu_ioctl(env
, KVM_SET_VCPU_EVENTS
, &events
);
1312 static int kvm_get_vcpu_events(CPUState
*env
)
1314 struct kvm_vcpu_events events
;
1317 if (!kvm_has_vcpu_events()) {
1321 ret
= kvm_vcpu_ioctl(env
, KVM_GET_VCPU_EVENTS
, &events
);
1325 env
->exception_injected
=
1326 events
.exception
.injected
? events
.exception
.nr
: -1;
1327 env
->has_error_code
= events
.exception
.has_error_code
;
1328 env
->error_code
= events
.exception
.error_code
;
1330 env
->interrupt_injected
=
1331 events
.interrupt
.injected
? events
.interrupt
.nr
: -1;
1332 env
->soft_interrupt
= events
.interrupt
.soft
;
1334 env
->nmi_injected
= events
.nmi
.injected
;
1335 env
->nmi_pending
= events
.nmi
.pending
;
1336 if (events
.nmi
.masked
) {
1337 env
->hflags2
|= HF2_NMI_MASK
;
1339 env
->hflags2
&= ~HF2_NMI_MASK
;
1342 env
->sipi_vector
= events
.sipi_vector
;
1347 static int kvm_guest_debug_workarounds(CPUState
*env
)
1350 unsigned long reinject_trap
= 0;
1352 if (!kvm_has_vcpu_events()) {
1353 if (env
->exception_injected
== 1) {
1354 reinject_trap
= KVM_GUESTDBG_INJECT_DB
;
1355 } else if (env
->exception_injected
== 3) {
1356 reinject_trap
= KVM_GUESTDBG_INJECT_BP
;
1358 env
->exception_injected
= -1;
1362 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
1363 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
1364 * by updating the debug state once again if single-stepping is on.
1365 * Another reason to call kvm_update_guest_debug here is a pending debug
1366 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
1367 * reinject them via SET_GUEST_DEBUG.
1369 if (reinject_trap
||
1370 (!kvm_has_robust_singlestep() && env
->singlestep_enabled
)) {
1371 ret
= kvm_update_guest_debug(env
, reinject_trap
);
1376 static int kvm_put_debugregs(CPUState
*env
)
1378 struct kvm_debugregs dbgregs
;
1381 if (!kvm_has_debugregs()) {
1385 for (i
= 0; i
< 4; i
++) {
1386 dbgregs
.db
[i
] = env
->dr
[i
];
1388 dbgregs
.dr6
= env
->dr
[6];
1389 dbgregs
.dr7
= env
->dr
[7];
1392 return kvm_vcpu_ioctl(env
, KVM_SET_DEBUGREGS
, &dbgregs
);
1395 static int kvm_get_debugregs(CPUState
*env
)
1397 struct kvm_debugregs dbgregs
;
1400 if (!kvm_has_debugregs()) {
1404 ret
= kvm_vcpu_ioctl(env
, KVM_GET_DEBUGREGS
, &dbgregs
);
1408 for (i
= 0; i
< 4; i
++) {
1409 env
->dr
[i
] = dbgregs
.db
[i
];
1411 env
->dr
[4] = env
->dr
[6] = dbgregs
.dr6
;
1412 env
->dr
[5] = env
->dr
[7] = dbgregs
.dr7
;
1417 int kvm_arch_put_registers(CPUState
*env
, int level
)
1421 assert(cpu_is_stopped(env
) || qemu_cpu_is_self(env
));
1423 ret
= kvm_getput_regs(env
, 1);
1427 ret
= kvm_put_xsave(env
);
1431 ret
= kvm_put_xcrs(env
);
1435 ret
= kvm_put_sregs(env
);
1439 /* must be before kvm_put_msrs */
1440 ret
= kvm_inject_mce_oldstyle(env
);
1444 ret
= kvm_put_msrs(env
, level
);
1448 if (level
>= KVM_PUT_RESET_STATE
) {
1449 ret
= kvm_put_mp_state(env
);
1454 ret
= kvm_put_vcpu_events(env
, level
);
1458 ret
= kvm_put_debugregs(env
);
1463 ret
= kvm_guest_debug_workarounds(env
);
1470 int kvm_arch_get_registers(CPUState
*env
)
1474 assert(cpu_is_stopped(env
) || qemu_cpu_is_self(env
));
1476 ret
= kvm_getput_regs(env
, 0);
1480 ret
= kvm_get_xsave(env
);
1484 ret
= kvm_get_xcrs(env
);
1488 ret
= kvm_get_sregs(env
);
1492 ret
= kvm_get_msrs(env
);
1496 ret
= kvm_get_mp_state(env
);
1500 ret
= kvm_get_vcpu_events(env
);
1504 ret
= kvm_get_debugregs(env
);
1511 void kvm_arch_pre_run(CPUState
*env
, struct kvm_run
*run
)
1516 if (env
->interrupt_request
& CPU_INTERRUPT_NMI
) {
1517 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
1518 DPRINTF("injected NMI\n");
1519 ret
= kvm_vcpu_ioctl(env
, KVM_NMI
);
1521 fprintf(stderr
, "KVM: injection failed, NMI lost (%s)\n",
1526 if (!kvm_irqchip_in_kernel()) {
1527 /* Force the VCPU out of its inner loop to process the INIT request */
1528 if (env
->interrupt_request
& CPU_INTERRUPT_INIT
) {
1529 env
->exit_request
= 1;
1532 /* Try to inject an interrupt if the guest can accept it */
1533 if (run
->ready_for_interrupt_injection
&&
1534 (env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
1535 (env
->eflags
& IF_MASK
)) {
1538 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
1539 irq
= cpu_get_pic_interrupt(env
);
1541 struct kvm_interrupt intr
;
1544 DPRINTF("injected interrupt %d\n", irq
);
1545 ret
= kvm_vcpu_ioctl(env
, KVM_INTERRUPT
, &intr
);
1548 "KVM: injection failed, interrupt lost (%s)\n",
1554 /* If we have an interrupt but the guest is not ready to receive an
1555 * interrupt, request an interrupt window exit. This will
1556 * cause a return to userspace as soon as the guest is ready to
1557 * receive interrupts. */
1558 if ((env
->interrupt_request
& CPU_INTERRUPT_HARD
)) {
1559 run
->request_interrupt_window
= 1;
1561 run
->request_interrupt_window
= 0;
1564 DPRINTF("setting tpr\n");
1565 run
->cr8
= cpu_get_apic_tpr(env
->apic_state
);
1569 void kvm_arch_post_run(CPUState
*env
, struct kvm_run
*run
)
1572 env
->eflags
|= IF_MASK
;
1574 env
->eflags
&= ~IF_MASK
;
1576 cpu_set_apic_tpr(env
->apic_state
, run
->cr8
);
1577 cpu_set_apic_base(env
->apic_state
, run
->apic_base
);
1580 int kvm_arch_process_async_events(CPUState
*env
)
1582 if (env
->interrupt_request
& CPU_INTERRUPT_MCE
) {
1583 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
1584 assert(env
->mcg_cap
);
1586 env
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
1588 kvm_cpu_synchronize_state(env
);
1590 if (env
->exception_injected
== EXCP08_DBLE
) {
1591 /* this means triple fault */
1592 qemu_system_reset_request();
1593 env
->exit_request
= 1;
1596 env
->exception_injected
= EXCP12_MCHK
;
1597 env
->has_error_code
= 0;
1600 if (kvm_irqchip_in_kernel() && env
->mp_state
== KVM_MP_STATE_HALTED
) {
1601 env
->mp_state
= KVM_MP_STATE_RUNNABLE
;
1605 if (kvm_irqchip_in_kernel()) {
1609 if (((env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
1610 (env
->eflags
& IF_MASK
)) ||
1611 (env
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
1614 if (env
->interrupt_request
& CPU_INTERRUPT_INIT
) {
1615 kvm_cpu_synchronize_state(env
);
1618 if (env
->interrupt_request
& CPU_INTERRUPT_SIPI
) {
1619 kvm_cpu_synchronize_state(env
);
1626 static int kvm_handle_halt(CPUState
*env
)
1628 if (!((env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
1629 (env
->eflags
& IF_MASK
)) &&
1630 !(env
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
1638 int kvm_arch_insert_sw_breakpoint(CPUState
*env
, struct kvm_sw_breakpoint
*bp
)
1640 static const uint8_t int3
= 0xcc;
1642 if (cpu_memory_rw_debug(env
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 0) ||
1643 cpu_memory_rw_debug(env
, bp
->pc
, (uint8_t *)&int3
, 1, 1)) {
1649 int kvm_arch_remove_sw_breakpoint(CPUState
*env
, struct kvm_sw_breakpoint
*bp
)
1653 if (cpu_memory_rw_debug(env
, bp
->pc
, &int3
, 1, 0) || int3
!= 0xcc ||
1654 cpu_memory_rw_debug(env
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 1)) {
1666 static int nb_hw_breakpoint
;
1668 static int find_hw_breakpoint(target_ulong addr
, int len
, int type
)
1672 for (n
= 0; n
< nb_hw_breakpoint
; n
++) {
1673 if (hw_breakpoint
[n
].addr
== addr
&& hw_breakpoint
[n
].type
== type
&&
1674 (hw_breakpoint
[n
].len
== len
|| len
== -1)) {
1681 int kvm_arch_insert_hw_breakpoint(target_ulong addr
,
1682 target_ulong len
, int type
)
1685 case GDB_BREAKPOINT_HW
:
1688 case GDB_WATCHPOINT_WRITE
:
1689 case GDB_WATCHPOINT_ACCESS
:
1696 if (addr
& (len
- 1)) {
1708 if (nb_hw_breakpoint
== 4) {
1711 if (find_hw_breakpoint(addr
, len
, type
) >= 0) {
1714 hw_breakpoint
[nb_hw_breakpoint
].addr
= addr
;
1715 hw_breakpoint
[nb_hw_breakpoint
].len
= len
;
1716 hw_breakpoint
[nb_hw_breakpoint
].type
= type
;
1722 int kvm_arch_remove_hw_breakpoint(target_ulong addr
,
1723 target_ulong len
, int type
)
1727 n
= find_hw_breakpoint(addr
, (type
== GDB_BREAKPOINT_HW
) ? 1 : len
, type
);
1732 hw_breakpoint
[n
] = hw_breakpoint
[nb_hw_breakpoint
];
1737 void kvm_arch_remove_all_hw_breakpoints(void)
1739 nb_hw_breakpoint
= 0;
1742 static CPUWatchpoint hw_watchpoint
;
1744 static int kvm_handle_debug(struct kvm_debug_exit_arch
*arch_info
)
1749 if (arch_info
->exception
== 1) {
1750 if (arch_info
->dr6
& (1 << 14)) {
1751 if (cpu_single_env
->singlestep_enabled
) {
1755 for (n
= 0; n
< 4; n
++) {
1756 if (arch_info
->dr6
& (1 << n
)) {
1757 switch ((arch_info
->dr7
>> (16 + n
*4)) & 0x3) {
1763 cpu_single_env
->watchpoint_hit
= &hw_watchpoint
;
1764 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
1765 hw_watchpoint
.flags
= BP_MEM_WRITE
;
1769 cpu_single_env
->watchpoint_hit
= &hw_watchpoint
;
1770 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
1771 hw_watchpoint
.flags
= BP_MEM_ACCESS
;
1777 } else if (kvm_find_sw_breakpoint(cpu_single_env
, arch_info
->pc
)) {
1781 cpu_synchronize_state(cpu_single_env
);
1782 assert(cpu_single_env
->exception_injected
== -1);
1785 cpu_single_env
->exception_injected
= arch_info
->exception
;
1786 cpu_single_env
->has_error_code
= 0;
1792 void kvm_arch_update_guest_debug(CPUState
*env
, struct kvm_guest_debug
*dbg
)
1794 const uint8_t type_code
[] = {
1795 [GDB_BREAKPOINT_HW
] = 0x0,
1796 [GDB_WATCHPOINT_WRITE
] = 0x1,
1797 [GDB_WATCHPOINT_ACCESS
] = 0x3
1799 const uint8_t len_code
[] = {
1800 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
1804 if (kvm_sw_breakpoints_active(env
)) {
1805 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_SW_BP
;
1807 if (nb_hw_breakpoint
> 0) {
1808 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_HW_BP
;
1809 dbg
->arch
.debugreg
[7] = 0x0600;
1810 for (n
= 0; n
< nb_hw_breakpoint
; n
++) {
1811 dbg
->arch
.debugreg
[n
] = hw_breakpoint
[n
].addr
;
1812 dbg
->arch
.debugreg
[7] |= (2 << (n
* 2)) |
1813 (type_code
[hw_breakpoint
[n
].type
] << (16 + n
*4)) |
1814 ((uint32_t)len_code
[hw_breakpoint
[n
].len
] << (18 + n
*4));
1819 static bool host_supports_vmx(void)
1821 uint32_t ecx
, unused
;
1823 host_cpuid(1, 0, &unused
, &unused
, &ecx
, &unused
);
1824 return ecx
& CPUID_EXT_VMX
;
1827 #define VMX_INVALID_GUEST_STATE 0x80000021
1829 int kvm_arch_handle_exit(CPUState
*env
, struct kvm_run
*run
)
1834 switch (run
->exit_reason
) {
1836 DPRINTF("handle_hlt\n");
1837 ret
= kvm_handle_halt(env
);
1839 case KVM_EXIT_SET_TPR
:
1842 case KVM_EXIT_FAIL_ENTRY
:
1843 code
= run
->fail_entry
.hardware_entry_failure_reason
;
1844 fprintf(stderr
, "KVM: entry failed, hardware error 0x%" PRIx64
"\n",
1846 if (host_supports_vmx() && code
== VMX_INVALID_GUEST_STATE
) {
1848 "\nIf you're runnning a guest on an Intel machine without "
1849 "unrestricted mode\n"
1850 "support, the failure can be most likely due to the guest "
1851 "entering an invalid\n"
1852 "state for Intel VT. For example, the guest maybe running "
1853 "in big real mode\n"
1854 "which is not supported on less recent Intel processors."
1859 case KVM_EXIT_EXCEPTION
:
1860 fprintf(stderr
, "KVM: exception %d exit (error code 0x%x)\n",
1861 run
->ex
.exception
, run
->ex
.error_code
);
1864 case KVM_EXIT_DEBUG
:
1865 DPRINTF("kvm_exit_debug\n");
1866 ret
= kvm_handle_debug(&run
->debug
.arch
);
1869 fprintf(stderr
, "KVM: unknown exit reason %d\n", run
->exit_reason
);
1877 bool kvm_arch_stop_on_emulation_error(CPUState
*env
)
1879 return !(env
->cr
[0] & CR0_PE_MASK
) ||
1880 ((env
->segs
[R_CS
].selector
& 3) != 3);