4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
15 #include <sys/types.h>
16 #include <sys/ioctl.h>
18 #include <sys/utsname.h>
20 #include <linux/kvm.h>
22 #include "qemu-common.h"
27 #include "host-utils.h"
33 #ifdef CONFIG_KVM_PARA
34 #include <linux/kvm_para.h>
40 #define DPRINTF(fmt, ...) \
41 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
43 #define DPRINTF(fmt, ...) \
47 #define MSR_KVM_WALL_CLOCK 0x11
48 #define MSR_KVM_SYSTEM_TIME 0x12
51 #define BUS_MCEERR_AR 4
54 #define BUS_MCEERR_AO 5
57 const KVMCapabilityInfo kvm_arch_required_capabilities
[] = {
58 KVM_CAP_INFO(SET_TSS_ADDR
),
59 KVM_CAP_INFO(EXT_CPUID
),
60 KVM_CAP_INFO(MP_STATE
),
64 static bool has_msr_star
;
65 static bool has_msr_hsave_pa
;
66 #if defined(CONFIG_KVM_PARA) && defined(KVM_CAP_ASYNC_PF)
67 static bool has_msr_async_pf_en
;
69 static int lm_capable_kernel
;
71 static struct kvm_cpuid2
*try_get_cpuid(KVMState
*s
, int max
)
73 struct kvm_cpuid2
*cpuid
;
76 size
= sizeof(*cpuid
) + max
* sizeof(*cpuid
->entries
);
77 cpuid
= (struct kvm_cpuid2
*)qemu_mallocz(size
);
79 r
= kvm_ioctl(s
, KVM_GET_SUPPORTED_CPUID
, cpuid
);
80 if (r
== 0 && cpuid
->nent
>= max
) {
88 fprintf(stderr
, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
96 uint32_t kvm_arch_get_supported_cpuid(CPUState
*env
, uint32_t function
,
97 uint32_t index
, int reg
)
99 struct kvm_cpuid2
*cpuid
;
102 uint32_t cpuid_1_edx
;
105 while ((cpuid
= try_get_cpuid(env
->kvm_state
, max
)) == NULL
) {
109 for (i
= 0; i
< cpuid
->nent
; ++i
) {
110 if (cpuid
->entries
[i
].function
== function
&&
111 cpuid
->entries
[i
].index
== index
) {
114 ret
= cpuid
->entries
[i
].eax
;
117 ret
= cpuid
->entries
[i
].ebx
;
120 ret
= cpuid
->entries
[i
].ecx
;
123 ret
= cpuid
->entries
[i
].edx
;
126 /* KVM before 2.6.30 misreports the following features */
127 ret
|= CPUID_MTRR
| CPUID_PAT
| CPUID_MCE
| CPUID_MCA
;
130 /* On Intel, kvm returns cpuid according to the Intel spec,
131 * so add missing bits according to the AMD spec:
133 cpuid_1_edx
= kvm_arch_get_supported_cpuid(env
, 1, 0, R_EDX
);
134 ret
|= cpuid_1_edx
& 0x183f7ff;
147 #ifdef CONFIG_KVM_PARA
148 struct kvm_para_features
{
151 } para_features
[] = {
152 { KVM_CAP_CLOCKSOURCE
, KVM_FEATURE_CLOCKSOURCE
},
153 { KVM_CAP_NOP_IO_DELAY
, KVM_FEATURE_NOP_IO_DELAY
},
154 { KVM_CAP_PV_MMU
, KVM_FEATURE_MMU_OP
},
155 #ifdef KVM_CAP_ASYNC_PF
156 { KVM_CAP_ASYNC_PF
, KVM_FEATURE_ASYNC_PF
},
161 static int get_para_features(CPUState
*env
)
165 for (i
= 0; i
< ARRAY_SIZE(para_features
) - 1; i
++) {
166 if (kvm_check_extension(env
->kvm_state
, para_features
[i
].cap
)) {
167 features
|= (1 << para_features
[i
].feature
);
170 #ifdef KVM_CAP_ASYNC_PF
171 has_msr_async_pf_en
= features
& (1 << KVM_FEATURE_ASYNC_PF
);
175 #endif /* CONFIG_KVM_PARA */
178 static int kvm_get_mce_cap_supported(KVMState
*s
, uint64_t *mce_cap
,
183 r
= kvm_check_extension(s
, KVM_CAP_MCE
);
186 return kvm_ioctl(s
, KVM_X86_GET_MCE_CAP_SUPPORTED
, mce_cap
);
191 static int kvm_setup_mce(CPUState
*env
, uint64_t *mcg_cap
)
193 return kvm_vcpu_ioctl(env
, KVM_X86_SETUP_MCE
, mcg_cap
);
196 static int kvm_set_mce(CPUState
*env
, struct kvm_x86_mce
*m
)
198 return kvm_vcpu_ioctl(env
, KVM_X86_SET_MCE
, m
);
201 static int kvm_get_msr(CPUState
*env
, struct kvm_msr_entry
*msrs
, int n
)
203 struct kvm_msrs
*kmsrs
= qemu_malloc(sizeof *kmsrs
+ n
* sizeof *msrs
);
207 memcpy(kmsrs
->entries
, msrs
, n
* sizeof *msrs
);
208 r
= kvm_vcpu_ioctl(env
, KVM_GET_MSRS
, kmsrs
);
209 memcpy(msrs
, kmsrs
->entries
, n
* sizeof *msrs
);
214 /* FIXME: kill this and kvm_get_msr, use env->mcg_status instead */
215 static int kvm_mce_in_progress(CPUState
*env
)
217 struct kvm_msr_entry msr_mcg_status
= {
218 .index
= MSR_MCG_STATUS
,
222 r
= kvm_get_msr(env
, &msr_mcg_status
, 1);
223 if (r
== -1 || r
== 0) {
224 fprintf(stderr
, "Failed to get MCE status\n");
227 return !!(msr_mcg_status
.data
& MCG_STATUS_MCIP
);
230 struct kvm_x86_mce_data
233 struct kvm_x86_mce
*mce
;
237 static void kvm_do_inject_x86_mce(void *_data
)
239 struct kvm_x86_mce_data
*data
= _data
;
242 /* If there is an MCE exception being processed, ignore this SRAO MCE */
243 if ((data
->env
->mcg_cap
& MCG_SER_P
) &&
244 !(data
->mce
->status
& MCI_STATUS_AR
)) {
245 if (kvm_mce_in_progress(data
->env
)) {
250 r
= kvm_set_mce(data
->env
, data
->mce
);
252 perror("kvm_set_mce FAILED");
253 if (data
->abort_on_error
) {
259 static void kvm_inject_x86_mce_on(CPUState
*env
, struct kvm_x86_mce
*mce
,
262 struct kvm_x86_mce_data data
= {
265 .abort_on_error
= (flag
& ABORT_ON_ERROR
),
269 fprintf(stderr
, "MCE support is not enabled!\n");
273 run_on_cpu(env
, kvm_do_inject_x86_mce
, &data
);
276 static void kvm_mce_broadcast_rest(CPUState
*env
)
278 struct kvm_x86_mce mce
= {
280 .status
= MCI_STATUS_VAL
| MCI_STATUS_UC
,
281 .mcg_status
= MCG_STATUS_MCIP
| MCG_STATUS_RIPV
,
287 /* Broadcast MCA signal for processor version 06H_EH and above */
288 if (cpu_x86_support_mca_broadcast(env
)) {
289 for (cenv
= first_cpu
; cenv
!= NULL
; cenv
= cenv
->next_cpu
) {
293 kvm_inject_x86_mce_on(cenv
, &mce
, ABORT_ON_ERROR
);
298 static void kvm_mce_inj_srar_dataload(CPUState
*env
, target_phys_addr_t paddr
)
300 struct kvm_x86_mce mce
= {
302 .status
= MCI_STATUS_VAL
| MCI_STATUS_UC
| MCI_STATUS_EN
303 | MCI_STATUS_MISCV
| MCI_STATUS_ADDRV
| MCI_STATUS_S
304 | MCI_STATUS_AR
| 0x134,
305 .mcg_status
= MCG_STATUS_MCIP
| MCG_STATUS_EIPV
,
307 .misc
= (MCM_ADDR_PHYS
<< 6) | 0xc,
311 r
= kvm_set_mce(env
, &mce
);
313 fprintf(stderr
, "kvm_set_mce: %s\n", strerror(errno
));
316 kvm_mce_broadcast_rest(env
);
319 static void kvm_mce_inj_srao_memscrub(CPUState
*env
, target_phys_addr_t paddr
)
321 struct kvm_x86_mce mce
= {
323 .status
= MCI_STATUS_VAL
| MCI_STATUS_UC
| MCI_STATUS_EN
324 | MCI_STATUS_MISCV
| MCI_STATUS_ADDRV
| MCI_STATUS_S
326 .mcg_status
= MCG_STATUS_MCIP
| MCG_STATUS_RIPV
,
328 .misc
= (MCM_ADDR_PHYS
<< 6) | 0xc,
332 r
= kvm_set_mce(env
, &mce
);
334 fprintf(stderr
, "kvm_set_mce: %s\n", strerror(errno
));
337 kvm_mce_broadcast_rest(env
);
340 static void kvm_mce_inj_srao_memscrub2(CPUState
*env
, target_phys_addr_t paddr
)
342 struct kvm_x86_mce mce
= {
344 .status
= MCI_STATUS_VAL
| MCI_STATUS_UC
| MCI_STATUS_EN
345 | MCI_STATUS_MISCV
| MCI_STATUS_ADDRV
| MCI_STATUS_S
347 .mcg_status
= MCG_STATUS_MCIP
| MCG_STATUS_RIPV
,
349 .misc
= (MCM_ADDR_PHYS
<< 6) | 0xc,
352 kvm_inject_x86_mce_on(env
, &mce
, ABORT_ON_ERROR
);
353 kvm_mce_broadcast_rest(env
);
355 #endif /* KVM_CAP_MCE */
357 static void hardware_memory_error(void)
359 fprintf(stderr
, "Hardware memory error!\n");
363 int kvm_arch_on_sigbus_vcpu(CPUState
*env
, int code
, void *addr
)
368 target_phys_addr_t paddr
;
370 if ((env
->mcg_cap
& MCG_SER_P
) && addr
371 && (code
== BUS_MCEERR_AR
372 || code
== BUS_MCEERR_AO
)) {
373 vaddr
= (void *)addr
;
374 if (qemu_ram_addr_from_host(vaddr
, &ram_addr
) ||
375 !kvm_physical_memory_addr_from_ram(env
->kvm_state
, ram_addr
, &paddr
)) {
376 fprintf(stderr
, "Hardware memory error for memory used by "
377 "QEMU itself instead of guest system!\n");
378 /* Hope we are lucky for AO MCE */
379 if (code
== BUS_MCEERR_AO
) {
382 hardware_memory_error();
386 if (code
== BUS_MCEERR_AR
) {
387 /* Fake an Intel architectural Data Load SRAR UCR */
388 kvm_mce_inj_srar_dataload(env
, paddr
);
391 * If there is an MCE excpetion being processed, ignore
394 if (!kvm_mce_in_progress(env
)) {
395 /* Fake an Intel architectural Memory scrubbing UCR */
396 kvm_mce_inj_srao_memscrub(env
, paddr
);
400 #endif /* KVM_CAP_MCE */
402 if (code
== BUS_MCEERR_AO
) {
404 } else if (code
== BUS_MCEERR_AR
) {
405 hardware_memory_error();
413 int kvm_arch_on_sigbus(int code
, void *addr
)
416 if ((first_cpu
->mcg_cap
& MCG_SER_P
) && addr
&& code
== BUS_MCEERR_AO
) {
419 target_phys_addr_t paddr
;
421 /* Hope we are lucky for AO MCE */
423 if (qemu_ram_addr_from_host(vaddr
, &ram_addr
) ||
424 !kvm_physical_memory_addr_from_ram(first_cpu
->kvm_state
, ram_addr
,
426 fprintf(stderr
, "Hardware memory error for memory used by "
427 "QEMU itself instead of guest system!: %p\n", addr
);
430 kvm_mce_inj_srao_memscrub2(first_cpu
, paddr
);
432 #endif /* KVM_CAP_MCE */
434 if (code
== BUS_MCEERR_AO
) {
436 } else if (code
== BUS_MCEERR_AR
) {
437 hardware_memory_error();
445 void kvm_inject_x86_mce(CPUState
*cenv
, int bank
, uint64_t status
,
446 uint64_t mcg_status
, uint64_t addr
, uint64_t misc
,
450 struct kvm_x86_mce mce
= {
453 .mcg_status
= mcg_status
,
458 if (flag
& MCE_BROADCAST
) {
459 kvm_mce_broadcast_rest(cenv
);
462 kvm_inject_x86_mce_on(cenv
, &mce
, flag
);
463 #else /* !KVM_CAP_MCE*/
464 if (flag
& ABORT_ON_ERROR
) {
467 #endif /* !KVM_CAP_MCE*/
470 static void cpu_update_state(void *opaque
, int running
, int reason
)
472 CPUState
*env
= opaque
;
475 env
->tsc_valid
= false;
479 int kvm_arch_init_vcpu(CPUState
*env
)
482 struct kvm_cpuid2 cpuid
;
483 struct kvm_cpuid_entry2 entries
[100];
484 } __attribute__((packed
)) cpuid_data
;
485 uint32_t limit
, i
, j
, cpuid_i
;
487 struct kvm_cpuid_entry2
*c
;
488 #ifdef CONFIG_KVM_PARA
489 uint32_t signature
[3];
492 env
->cpuid_features
&= kvm_arch_get_supported_cpuid(env
, 1, 0, R_EDX
);
494 i
= env
->cpuid_ext_features
& CPUID_EXT_HYPERVISOR
;
495 env
->cpuid_ext_features
&= kvm_arch_get_supported_cpuid(env
, 1, 0, R_ECX
);
496 env
->cpuid_ext_features
|= i
;
498 env
->cpuid_ext2_features
&= kvm_arch_get_supported_cpuid(env
, 0x80000001,
500 env
->cpuid_ext3_features
&= kvm_arch_get_supported_cpuid(env
, 0x80000001,
502 env
->cpuid_svm_features
&= kvm_arch_get_supported_cpuid(env
, 0x8000000A,
508 #ifdef CONFIG_KVM_PARA
509 /* Paravirtualization CPUIDs */
510 memcpy(signature
, "KVMKVMKVM\0\0\0", 12);
511 c
= &cpuid_data
.entries
[cpuid_i
++];
512 memset(c
, 0, sizeof(*c
));
513 c
->function
= KVM_CPUID_SIGNATURE
;
515 c
->ebx
= signature
[0];
516 c
->ecx
= signature
[1];
517 c
->edx
= signature
[2];
519 c
= &cpuid_data
.entries
[cpuid_i
++];
520 memset(c
, 0, sizeof(*c
));
521 c
->function
= KVM_CPUID_FEATURES
;
522 c
->eax
= env
->cpuid_kvm_features
& get_para_features(env
);
525 cpu_x86_cpuid(env
, 0, 0, &limit
, &unused
, &unused
, &unused
);
527 for (i
= 0; i
<= limit
; i
++) {
528 c
= &cpuid_data
.entries
[cpuid_i
++];
532 /* Keep reading function 2 till all the input is received */
536 c
->flags
= KVM_CPUID_FLAG_STATEFUL_FUNC
|
537 KVM_CPUID_FLAG_STATE_READ_NEXT
;
538 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
539 times
= c
->eax
& 0xff;
541 for (j
= 1; j
< times
; ++j
) {
542 c
= &cpuid_data
.entries
[cpuid_i
++];
544 c
->flags
= KVM_CPUID_FLAG_STATEFUL_FUNC
;
545 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
554 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
556 cpu_x86_cpuid(env
, i
, j
, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
558 if (i
== 4 && c
->eax
== 0) {
561 if (i
== 0xb && !(c
->ecx
& 0xff00)) {
564 if (i
== 0xd && c
->eax
== 0) {
567 c
= &cpuid_data
.entries
[cpuid_i
++];
573 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
577 cpu_x86_cpuid(env
, 0x80000000, 0, &limit
, &unused
, &unused
, &unused
);
579 for (i
= 0x80000000; i
<= limit
; i
++) {
580 c
= &cpuid_data
.entries
[cpuid_i
++];
584 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
587 cpuid_data
.cpuid
.nent
= cpuid_i
;
590 if (((env
->cpuid_version
>> 8)&0xF) >= 6
591 && (env
->cpuid_features
&(CPUID_MCE
|CPUID_MCA
)) == (CPUID_MCE
|CPUID_MCA
)
592 && kvm_check_extension(env
->kvm_state
, KVM_CAP_MCE
) > 0) {
596 if (kvm_get_mce_cap_supported(env
->kvm_state
, &mcg_cap
, &banks
)) {
597 perror("kvm_get_mce_cap_supported FAILED");
599 if (banks
> MCE_BANKS_DEF
)
600 banks
= MCE_BANKS_DEF
;
601 mcg_cap
&= MCE_CAP_DEF
;
603 if (kvm_setup_mce(env
, &mcg_cap
)) {
604 perror("kvm_setup_mce FAILED");
606 env
->mcg_cap
= mcg_cap
;
612 qemu_add_vm_change_state_handler(cpu_update_state
, env
);
614 return kvm_vcpu_ioctl(env
, KVM_SET_CPUID2
, &cpuid_data
);
617 void kvm_arch_reset_vcpu(CPUState
*env
)
619 env
->exception_injected
= -1;
620 env
->interrupt_injected
= -1;
622 if (kvm_irqchip_in_kernel()) {
623 env
->mp_state
= cpu_is_bsp(env
) ? KVM_MP_STATE_RUNNABLE
:
624 KVM_MP_STATE_UNINITIALIZED
;
626 env
->mp_state
= KVM_MP_STATE_RUNNABLE
;
630 static int kvm_get_supported_msrs(KVMState
*s
)
632 static int kvm_supported_msrs
;
636 if (kvm_supported_msrs
== 0) {
637 struct kvm_msr_list msr_list
, *kvm_msr_list
;
639 kvm_supported_msrs
= -1;
641 /* Obtain MSR list from KVM. These are the MSRs that we must
644 ret
= kvm_ioctl(s
, KVM_GET_MSR_INDEX_LIST
, &msr_list
);
645 if (ret
< 0 && ret
!= -E2BIG
) {
648 /* Old kernel modules had a bug and could write beyond the provided
649 memory. Allocate at least a safe amount of 1K. */
650 kvm_msr_list
= qemu_mallocz(MAX(1024, sizeof(msr_list
) +
652 sizeof(msr_list
.indices
[0])));
654 kvm_msr_list
->nmsrs
= msr_list
.nmsrs
;
655 ret
= kvm_ioctl(s
, KVM_GET_MSR_INDEX_LIST
, kvm_msr_list
);
659 for (i
= 0; i
< kvm_msr_list
->nmsrs
; i
++) {
660 if (kvm_msr_list
->indices
[i
] == MSR_STAR
) {
664 if (kvm_msr_list
->indices
[i
] == MSR_VM_HSAVE_PA
) {
665 has_msr_hsave_pa
= true;
677 int kvm_arch_init(KVMState
*s
)
679 uint64_t identity_base
= 0xfffbc000;
681 struct utsname utsname
;
683 ret
= kvm_get_supported_msrs(s
);
689 lm_capable_kernel
= strcmp(utsname
.machine
, "x86_64") == 0;
692 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
693 * In order to use vm86 mode, an EPT identity map and a TSS are needed.
694 * Since these must be part of guest physical memory, we need to allocate
695 * them, both by setting their start addresses in the kernel and by
696 * creating a corresponding e820 entry. We need 4 pages before the BIOS.
698 * Older KVM versions may not support setting the identity map base. In
699 * that case we need to stick with the default, i.e. a 256K maximum BIOS
702 #ifdef KVM_CAP_SET_IDENTITY_MAP_ADDR
703 if (kvm_check_extension(s
, KVM_CAP_SET_IDENTITY_MAP_ADDR
)) {
704 /* Allows up to 16M BIOSes. */
705 identity_base
= 0xfeffc000;
707 ret
= kvm_vm_ioctl(s
, KVM_SET_IDENTITY_MAP_ADDR
, &identity_base
);
713 /* Set TSS base one page after EPT identity map. */
714 ret
= kvm_vm_ioctl(s
, KVM_SET_TSS_ADDR
, identity_base
+ 0x1000);
719 /* Tell fw_cfg to notify the BIOS to reserve the range. */
720 ret
= e820_add_entry(identity_base
, 0x4000, E820_RESERVED
);
722 fprintf(stderr
, "e820_add_entry() table is full\n");
729 static void set_v8086_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
731 lhs
->selector
= rhs
->selector
;
732 lhs
->base
= rhs
->base
;
733 lhs
->limit
= rhs
->limit
;
745 static void set_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
747 unsigned flags
= rhs
->flags
;
748 lhs
->selector
= rhs
->selector
;
749 lhs
->base
= rhs
->base
;
750 lhs
->limit
= rhs
->limit
;
751 lhs
->type
= (flags
>> DESC_TYPE_SHIFT
) & 15;
752 lhs
->present
= (flags
& DESC_P_MASK
) != 0;
753 lhs
->dpl
= (flags
>> DESC_DPL_SHIFT
) & 3;
754 lhs
->db
= (flags
>> DESC_B_SHIFT
) & 1;
755 lhs
->s
= (flags
& DESC_S_MASK
) != 0;
756 lhs
->l
= (flags
>> DESC_L_SHIFT
) & 1;
757 lhs
->g
= (flags
& DESC_G_MASK
) != 0;
758 lhs
->avl
= (flags
& DESC_AVL_MASK
) != 0;
762 static void get_seg(SegmentCache
*lhs
, const struct kvm_segment
*rhs
)
764 lhs
->selector
= rhs
->selector
;
765 lhs
->base
= rhs
->base
;
766 lhs
->limit
= rhs
->limit
;
767 lhs
->flags
= (rhs
->type
<< DESC_TYPE_SHIFT
) |
768 (rhs
->present
* DESC_P_MASK
) |
769 (rhs
->dpl
<< DESC_DPL_SHIFT
) |
770 (rhs
->db
<< DESC_B_SHIFT
) |
771 (rhs
->s
* DESC_S_MASK
) |
772 (rhs
->l
<< DESC_L_SHIFT
) |
773 (rhs
->g
* DESC_G_MASK
) |
774 (rhs
->avl
* DESC_AVL_MASK
);
777 static void kvm_getput_reg(__u64
*kvm_reg
, target_ulong
*qemu_reg
, int set
)
780 *kvm_reg
= *qemu_reg
;
782 *qemu_reg
= *kvm_reg
;
786 static int kvm_getput_regs(CPUState
*env
, int set
)
788 struct kvm_regs regs
;
792 ret
= kvm_vcpu_ioctl(env
, KVM_GET_REGS
, ®s
);
798 kvm_getput_reg(®s
.rax
, &env
->regs
[R_EAX
], set
);
799 kvm_getput_reg(®s
.rbx
, &env
->regs
[R_EBX
], set
);
800 kvm_getput_reg(®s
.rcx
, &env
->regs
[R_ECX
], set
);
801 kvm_getput_reg(®s
.rdx
, &env
->regs
[R_EDX
], set
);
802 kvm_getput_reg(®s
.rsi
, &env
->regs
[R_ESI
], set
);
803 kvm_getput_reg(®s
.rdi
, &env
->regs
[R_EDI
], set
);
804 kvm_getput_reg(®s
.rsp
, &env
->regs
[R_ESP
], set
);
805 kvm_getput_reg(®s
.rbp
, &env
->regs
[R_EBP
], set
);
807 kvm_getput_reg(®s
.r8
, &env
->regs
[8], set
);
808 kvm_getput_reg(®s
.r9
, &env
->regs
[9], set
);
809 kvm_getput_reg(®s
.r10
, &env
->regs
[10], set
);
810 kvm_getput_reg(®s
.r11
, &env
->regs
[11], set
);
811 kvm_getput_reg(®s
.r12
, &env
->regs
[12], set
);
812 kvm_getput_reg(®s
.r13
, &env
->regs
[13], set
);
813 kvm_getput_reg(®s
.r14
, &env
->regs
[14], set
);
814 kvm_getput_reg(®s
.r15
, &env
->regs
[15], set
);
817 kvm_getput_reg(®s
.rflags
, &env
->eflags
, set
);
818 kvm_getput_reg(®s
.rip
, &env
->eip
, set
);
821 ret
= kvm_vcpu_ioctl(env
, KVM_SET_REGS
, ®s
);
827 static int kvm_put_fpu(CPUState
*env
)
832 memset(&fpu
, 0, sizeof fpu
);
833 fpu
.fsw
= env
->fpus
& ~(7 << 11);
834 fpu
.fsw
|= (env
->fpstt
& 7) << 11;
836 for (i
= 0; i
< 8; ++i
) {
837 fpu
.ftwx
|= (!env
->fptags
[i
]) << i
;
839 memcpy(fpu
.fpr
, env
->fpregs
, sizeof env
->fpregs
);
840 memcpy(fpu
.xmm
, env
->xmm_regs
, sizeof env
->xmm_regs
);
841 fpu
.mxcsr
= env
->mxcsr
;
843 return kvm_vcpu_ioctl(env
, KVM_SET_FPU
, &fpu
);
847 #define XSAVE_CWD_RIP 2
848 #define XSAVE_CWD_RDP 4
849 #define XSAVE_MXCSR 6
850 #define XSAVE_ST_SPACE 8
851 #define XSAVE_XMM_SPACE 40
852 #define XSAVE_XSTATE_BV 128
853 #define XSAVE_YMMH_SPACE 144
856 static int kvm_put_xsave(CPUState
*env
)
860 struct kvm_xsave
* xsave
;
861 uint16_t cwd
, swd
, twd
, fop
;
863 if (!kvm_has_xsave()) {
864 return kvm_put_fpu(env
);
867 xsave
= qemu_memalign(4096, sizeof(struct kvm_xsave
));
868 memset(xsave
, 0, sizeof(struct kvm_xsave
));
869 cwd
= swd
= twd
= fop
= 0;
870 swd
= env
->fpus
& ~(7 << 11);
871 swd
|= (env
->fpstt
& 7) << 11;
873 for (i
= 0; i
< 8; ++i
) {
874 twd
|= (!env
->fptags
[i
]) << i
;
876 xsave
->region
[0] = (uint32_t)(swd
<< 16) + cwd
;
877 xsave
->region
[1] = (uint32_t)(fop
<< 16) + twd
;
878 memcpy(&xsave
->region
[XSAVE_ST_SPACE
], env
->fpregs
,
880 memcpy(&xsave
->region
[XSAVE_XMM_SPACE
], env
->xmm_regs
,
881 sizeof env
->xmm_regs
);
882 xsave
->region
[XSAVE_MXCSR
] = env
->mxcsr
;
883 *(uint64_t *)&xsave
->region
[XSAVE_XSTATE_BV
] = env
->xstate_bv
;
884 memcpy(&xsave
->region
[XSAVE_YMMH_SPACE
], env
->ymmh_regs
,
885 sizeof env
->ymmh_regs
);
886 r
= kvm_vcpu_ioctl(env
, KVM_SET_XSAVE
, xsave
);
890 return kvm_put_fpu(env
);
894 static int kvm_put_xcrs(CPUState
*env
)
897 struct kvm_xcrs xcrs
;
899 if (!kvm_has_xcrs()) {
905 xcrs
.xcrs
[0].xcr
= 0;
906 xcrs
.xcrs
[0].value
= env
->xcr0
;
907 return kvm_vcpu_ioctl(env
, KVM_SET_XCRS
, &xcrs
);
913 static int kvm_put_sregs(CPUState
*env
)
915 struct kvm_sregs sregs
;
917 memset(sregs
.interrupt_bitmap
, 0, sizeof(sregs
.interrupt_bitmap
));
918 if (env
->interrupt_injected
>= 0) {
919 sregs
.interrupt_bitmap
[env
->interrupt_injected
/ 64] |=
920 (uint64_t)1 << (env
->interrupt_injected
% 64);
923 if ((env
->eflags
& VM_MASK
)) {
924 set_v8086_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
925 set_v8086_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
926 set_v8086_seg(&sregs
.es
, &env
->segs
[R_ES
]);
927 set_v8086_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
928 set_v8086_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
929 set_v8086_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
931 set_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
932 set_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
933 set_seg(&sregs
.es
, &env
->segs
[R_ES
]);
934 set_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
935 set_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
936 set_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
939 set_seg(&sregs
.tr
, &env
->tr
);
940 set_seg(&sregs
.ldt
, &env
->ldt
);
942 sregs
.idt
.limit
= env
->idt
.limit
;
943 sregs
.idt
.base
= env
->idt
.base
;
944 sregs
.gdt
.limit
= env
->gdt
.limit
;
945 sregs
.gdt
.base
= env
->gdt
.base
;
947 sregs
.cr0
= env
->cr
[0];
948 sregs
.cr2
= env
->cr
[2];
949 sregs
.cr3
= env
->cr
[3];
950 sregs
.cr4
= env
->cr
[4];
952 sregs
.cr8
= cpu_get_apic_tpr(env
->apic_state
);
953 sregs
.apic_base
= cpu_get_apic_base(env
->apic_state
);
955 sregs
.efer
= env
->efer
;
957 return kvm_vcpu_ioctl(env
, KVM_SET_SREGS
, &sregs
);
960 static void kvm_msr_entry_set(struct kvm_msr_entry
*entry
,
961 uint32_t index
, uint64_t value
)
963 entry
->index
= index
;
967 static int kvm_put_msrs(CPUState
*env
, int level
)
970 struct kvm_msrs info
;
971 struct kvm_msr_entry entries
[100];
973 struct kvm_msr_entry
*msrs
= msr_data
.entries
;
976 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_CS
, env
->sysenter_cs
);
977 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_ESP
, env
->sysenter_esp
);
978 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_EIP
, env
->sysenter_eip
);
980 kvm_msr_entry_set(&msrs
[n
++], MSR_STAR
, env
->star
);
982 if (has_msr_hsave_pa
) {
983 kvm_msr_entry_set(&msrs
[n
++], MSR_VM_HSAVE_PA
, env
->vm_hsave
);
986 if (lm_capable_kernel
) {
987 kvm_msr_entry_set(&msrs
[n
++], MSR_CSTAR
, env
->cstar
);
988 kvm_msr_entry_set(&msrs
[n
++], MSR_KERNELGSBASE
, env
->kernelgsbase
);
989 kvm_msr_entry_set(&msrs
[n
++], MSR_FMASK
, env
->fmask
);
990 kvm_msr_entry_set(&msrs
[n
++], MSR_LSTAR
, env
->lstar
);
993 if (level
== KVM_PUT_FULL_STATE
) {
995 * KVM is yet unable to synchronize TSC values of multiple VCPUs on
996 * writeback. Until this is fixed, we only write the offset to SMP
997 * guests after migration, desynchronizing the VCPUs, but avoiding
998 * huge jump-backs that would occur without any writeback at all.
1000 if (smp_cpus
== 1 || env
->tsc
!= 0) {
1001 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_TSC
, env
->tsc
);
1005 * The following paravirtual MSRs have side effects on the guest or are
1006 * too heavy for normal writeback. Limit them to reset or full state
1009 if (level
>= KVM_PUT_RESET_STATE
) {
1010 kvm_msr_entry_set(&msrs
[n
++], MSR_KVM_SYSTEM_TIME
,
1011 env
->system_time_msr
);
1012 kvm_msr_entry_set(&msrs
[n
++], MSR_KVM_WALL_CLOCK
, env
->wall_clock_msr
);
1013 #if defined(CONFIG_KVM_PARA) && defined(KVM_CAP_ASYNC_PF)
1014 if (has_msr_async_pf_en
) {
1015 kvm_msr_entry_set(&msrs
[n
++], MSR_KVM_ASYNC_PF_EN
,
1016 env
->async_pf_en_msr
);
1024 if (level
== KVM_PUT_RESET_STATE
) {
1025 kvm_msr_entry_set(&msrs
[n
++], MSR_MCG_STATUS
, env
->mcg_status
);
1026 } else if (level
== KVM_PUT_FULL_STATE
) {
1027 kvm_msr_entry_set(&msrs
[n
++], MSR_MCG_STATUS
, env
->mcg_status
);
1028 kvm_msr_entry_set(&msrs
[n
++], MSR_MCG_CTL
, env
->mcg_ctl
);
1029 for (i
= 0; i
< (env
->mcg_cap
& 0xff) * 4; i
++) {
1030 kvm_msr_entry_set(&msrs
[n
++], MSR_MC0_CTL
+ i
, env
->mce_banks
[i
]);
1036 msr_data
.info
.nmsrs
= n
;
1038 return kvm_vcpu_ioctl(env
, KVM_SET_MSRS
, &msr_data
);
1043 static int kvm_get_fpu(CPUState
*env
)
1048 ret
= kvm_vcpu_ioctl(env
, KVM_GET_FPU
, &fpu
);
1053 env
->fpstt
= (fpu
.fsw
>> 11) & 7;
1054 env
->fpus
= fpu
.fsw
;
1055 env
->fpuc
= fpu
.fcw
;
1056 for (i
= 0; i
< 8; ++i
) {
1057 env
->fptags
[i
] = !((fpu
.ftwx
>> i
) & 1);
1059 memcpy(env
->fpregs
, fpu
.fpr
, sizeof env
->fpregs
);
1060 memcpy(env
->xmm_regs
, fpu
.xmm
, sizeof env
->xmm_regs
);
1061 env
->mxcsr
= fpu
.mxcsr
;
1066 static int kvm_get_xsave(CPUState
*env
)
1068 #ifdef KVM_CAP_XSAVE
1069 struct kvm_xsave
* xsave
;
1071 uint16_t cwd
, swd
, twd
, fop
;
1073 if (!kvm_has_xsave()) {
1074 return kvm_get_fpu(env
);
1077 xsave
= qemu_memalign(4096, sizeof(struct kvm_xsave
));
1078 ret
= kvm_vcpu_ioctl(env
, KVM_GET_XSAVE
, xsave
);
1084 cwd
= (uint16_t)xsave
->region
[0];
1085 swd
= (uint16_t)(xsave
->region
[0] >> 16);
1086 twd
= (uint16_t)xsave
->region
[1];
1087 fop
= (uint16_t)(xsave
->region
[1] >> 16);
1088 env
->fpstt
= (swd
>> 11) & 7;
1091 for (i
= 0; i
< 8; ++i
) {
1092 env
->fptags
[i
] = !((twd
>> i
) & 1);
1094 env
->mxcsr
= xsave
->region
[XSAVE_MXCSR
];
1095 memcpy(env
->fpregs
, &xsave
->region
[XSAVE_ST_SPACE
],
1096 sizeof env
->fpregs
);
1097 memcpy(env
->xmm_regs
, &xsave
->region
[XSAVE_XMM_SPACE
],
1098 sizeof env
->xmm_regs
);
1099 env
->xstate_bv
= *(uint64_t *)&xsave
->region
[XSAVE_XSTATE_BV
];
1100 memcpy(env
->ymmh_regs
, &xsave
->region
[XSAVE_YMMH_SPACE
],
1101 sizeof env
->ymmh_regs
);
1105 return kvm_get_fpu(env
);
1109 static int kvm_get_xcrs(CPUState
*env
)
1113 struct kvm_xcrs xcrs
;
1115 if (!kvm_has_xcrs()) {
1119 ret
= kvm_vcpu_ioctl(env
, KVM_GET_XCRS
, &xcrs
);
1124 for (i
= 0; i
< xcrs
.nr_xcrs
; i
++) {
1125 /* Only support xcr0 now */
1126 if (xcrs
.xcrs
[0].xcr
== 0) {
1127 env
->xcr0
= xcrs
.xcrs
[0].value
;
1137 static int kvm_get_sregs(CPUState
*env
)
1139 struct kvm_sregs sregs
;
1143 ret
= kvm_vcpu_ioctl(env
, KVM_GET_SREGS
, &sregs
);
1148 /* There can only be one pending IRQ set in the bitmap at a time, so try
1149 to find it and save its number instead (-1 for none). */
1150 env
->interrupt_injected
= -1;
1151 for (i
= 0; i
< ARRAY_SIZE(sregs
.interrupt_bitmap
); i
++) {
1152 if (sregs
.interrupt_bitmap
[i
]) {
1153 bit
= ctz64(sregs
.interrupt_bitmap
[i
]);
1154 env
->interrupt_injected
= i
* 64 + bit
;
1159 get_seg(&env
->segs
[R_CS
], &sregs
.cs
);
1160 get_seg(&env
->segs
[R_DS
], &sregs
.ds
);
1161 get_seg(&env
->segs
[R_ES
], &sregs
.es
);
1162 get_seg(&env
->segs
[R_FS
], &sregs
.fs
);
1163 get_seg(&env
->segs
[R_GS
], &sregs
.gs
);
1164 get_seg(&env
->segs
[R_SS
], &sregs
.ss
);
1166 get_seg(&env
->tr
, &sregs
.tr
);
1167 get_seg(&env
->ldt
, &sregs
.ldt
);
1169 env
->idt
.limit
= sregs
.idt
.limit
;
1170 env
->idt
.base
= sregs
.idt
.base
;
1171 env
->gdt
.limit
= sregs
.gdt
.limit
;
1172 env
->gdt
.base
= sregs
.gdt
.base
;
1174 env
->cr
[0] = sregs
.cr0
;
1175 env
->cr
[2] = sregs
.cr2
;
1176 env
->cr
[3] = sregs
.cr3
;
1177 env
->cr
[4] = sregs
.cr4
;
1179 cpu_set_apic_base(env
->apic_state
, sregs
.apic_base
);
1181 env
->efer
= sregs
.efer
;
1182 //cpu_set_apic_tpr(env->apic_state, sregs.cr8);
1184 #define HFLAG_COPY_MASK \
1185 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
1186 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
1187 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
1188 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
1190 hflags
= (env
->segs
[R_CS
].flags
>> DESC_DPL_SHIFT
) & HF_CPL_MASK
;
1191 hflags
|= (env
->cr
[0] & CR0_PE_MASK
) << (HF_PE_SHIFT
- CR0_PE_SHIFT
);
1192 hflags
|= (env
->cr
[0] << (HF_MP_SHIFT
- CR0_MP_SHIFT
)) &
1193 (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
);
1194 hflags
|= (env
->eflags
& (HF_TF_MASK
| HF_VM_MASK
| HF_IOPL_MASK
));
1195 hflags
|= (env
->cr
[4] & CR4_OSFXSR_MASK
) <<
1196 (HF_OSFXSR_SHIFT
- CR4_OSFXSR_SHIFT
);
1198 if (env
->efer
& MSR_EFER_LMA
) {
1199 hflags
|= HF_LMA_MASK
;
1202 if ((hflags
& HF_LMA_MASK
) && (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
1203 hflags
|= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
1205 hflags
|= (env
->segs
[R_CS
].flags
& DESC_B_MASK
) >>
1206 (DESC_B_SHIFT
- HF_CS32_SHIFT
);
1207 hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
) >>
1208 (DESC_B_SHIFT
- HF_SS32_SHIFT
);
1209 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
) ||
1210 !(hflags
& HF_CS32_MASK
)) {
1211 hflags
|= HF_ADDSEG_MASK
;
1213 hflags
|= ((env
->segs
[R_DS
].base
| env
->segs
[R_ES
].base
|
1214 env
->segs
[R_SS
].base
) != 0) << HF_ADDSEG_SHIFT
;
1217 env
->hflags
= (env
->hflags
& HFLAG_COPY_MASK
) | hflags
;
1222 static int kvm_get_msrs(CPUState
*env
)
1225 struct kvm_msrs info
;
1226 struct kvm_msr_entry entries
[100];
1228 struct kvm_msr_entry
*msrs
= msr_data
.entries
;
1232 msrs
[n
++].index
= MSR_IA32_SYSENTER_CS
;
1233 msrs
[n
++].index
= MSR_IA32_SYSENTER_ESP
;
1234 msrs
[n
++].index
= MSR_IA32_SYSENTER_EIP
;
1236 msrs
[n
++].index
= MSR_STAR
;
1238 if (has_msr_hsave_pa
) {
1239 msrs
[n
++].index
= MSR_VM_HSAVE_PA
;
1242 if (!env
->tsc_valid
) {
1243 msrs
[n
++].index
= MSR_IA32_TSC
;
1244 env
->tsc_valid
= !vm_running
;
1247 #ifdef TARGET_X86_64
1248 if (lm_capable_kernel
) {
1249 msrs
[n
++].index
= MSR_CSTAR
;
1250 msrs
[n
++].index
= MSR_KERNELGSBASE
;
1251 msrs
[n
++].index
= MSR_FMASK
;
1252 msrs
[n
++].index
= MSR_LSTAR
;
1255 msrs
[n
++].index
= MSR_KVM_SYSTEM_TIME
;
1256 msrs
[n
++].index
= MSR_KVM_WALL_CLOCK
;
1257 #if defined(CONFIG_KVM_PARA) && defined(KVM_CAP_ASYNC_PF)
1258 if (has_msr_async_pf_en
) {
1259 msrs
[n
++].index
= MSR_KVM_ASYNC_PF_EN
;
1265 msrs
[n
++].index
= MSR_MCG_STATUS
;
1266 msrs
[n
++].index
= MSR_MCG_CTL
;
1267 for (i
= 0; i
< (env
->mcg_cap
& 0xff) * 4; i
++) {
1268 msrs
[n
++].index
= MSR_MC0_CTL
+ i
;
1273 msr_data
.info
.nmsrs
= n
;
1274 ret
= kvm_vcpu_ioctl(env
, KVM_GET_MSRS
, &msr_data
);
1279 for (i
= 0; i
< ret
; i
++) {
1280 switch (msrs
[i
].index
) {
1281 case MSR_IA32_SYSENTER_CS
:
1282 env
->sysenter_cs
= msrs
[i
].data
;
1284 case MSR_IA32_SYSENTER_ESP
:
1285 env
->sysenter_esp
= msrs
[i
].data
;
1287 case MSR_IA32_SYSENTER_EIP
:
1288 env
->sysenter_eip
= msrs
[i
].data
;
1291 env
->star
= msrs
[i
].data
;
1293 #ifdef TARGET_X86_64
1295 env
->cstar
= msrs
[i
].data
;
1297 case MSR_KERNELGSBASE
:
1298 env
->kernelgsbase
= msrs
[i
].data
;
1301 env
->fmask
= msrs
[i
].data
;
1304 env
->lstar
= msrs
[i
].data
;
1308 env
->tsc
= msrs
[i
].data
;
1310 case MSR_VM_HSAVE_PA
:
1311 env
->vm_hsave
= msrs
[i
].data
;
1313 case MSR_KVM_SYSTEM_TIME
:
1314 env
->system_time_msr
= msrs
[i
].data
;
1316 case MSR_KVM_WALL_CLOCK
:
1317 env
->wall_clock_msr
= msrs
[i
].data
;
1320 case MSR_MCG_STATUS
:
1321 env
->mcg_status
= msrs
[i
].data
;
1324 env
->mcg_ctl
= msrs
[i
].data
;
1329 if (msrs
[i
].index
>= MSR_MC0_CTL
&&
1330 msrs
[i
].index
< MSR_MC0_CTL
+ (env
->mcg_cap
& 0xff) * 4) {
1331 env
->mce_banks
[msrs
[i
].index
- MSR_MC0_CTL
] = msrs
[i
].data
;
1335 #if defined(CONFIG_KVM_PARA) && defined(KVM_CAP_ASYNC_PF)
1336 case MSR_KVM_ASYNC_PF_EN
:
1337 env
->async_pf_en_msr
= msrs
[i
].data
;
1346 static int kvm_put_mp_state(CPUState
*env
)
1348 struct kvm_mp_state mp_state
= { .mp_state
= env
->mp_state
};
1350 return kvm_vcpu_ioctl(env
, KVM_SET_MP_STATE
, &mp_state
);
1353 static int kvm_get_mp_state(CPUState
*env
)
1355 struct kvm_mp_state mp_state
;
1358 ret
= kvm_vcpu_ioctl(env
, KVM_GET_MP_STATE
, &mp_state
);
1362 env
->mp_state
= mp_state
.mp_state
;
1363 if (kvm_irqchip_in_kernel()) {
1364 env
->halted
= (mp_state
.mp_state
== KVM_MP_STATE_HALTED
);
1369 static int kvm_put_vcpu_events(CPUState
*env
, int level
)
1371 #ifdef KVM_CAP_VCPU_EVENTS
1372 struct kvm_vcpu_events events
;
1374 if (!kvm_has_vcpu_events()) {
1378 events
.exception
.injected
= (env
->exception_injected
>= 0);
1379 events
.exception
.nr
= env
->exception_injected
;
1380 events
.exception
.has_error_code
= env
->has_error_code
;
1381 events
.exception
.error_code
= env
->error_code
;
1383 events
.interrupt
.injected
= (env
->interrupt_injected
>= 0);
1384 events
.interrupt
.nr
= env
->interrupt_injected
;
1385 events
.interrupt
.soft
= env
->soft_interrupt
;
1387 events
.nmi
.injected
= env
->nmi_injected
;
1388 events
.nmi
.pending
= env
->nmi_pending
;
1389 events
.nmi
.masked
= !!(env
->hflags2
& HF2_NMI_MASK
);
1391 events
.sipi_vector
= env
->sipi_vector
;
1394 if (level
>= KVM_PUT_RESET_STATE
) {
1396 KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SIPI_VECTOR
;
1399 return kvm_vcpu_ioctl(env
, KVM_SET_VCPU_EVENTS
, &events
);
1405 static int kvm_get_vcpu_events(CPUState
*env
)
1407 #ifdef KVM_CAP_VCPU_EVENTS
1408 struct kvm_vcpu_events events
;
1411 if (!kvm_has_vcpu_events()) {
1415 ret
= kvm_vcpu_ioctl(env
, KVM_GET_VCPU_EVENTS
, &events
);
1419 env
->exception_injected
=
1420 events
.exception
.injected
? events
.exception
.nr
: -1;
1421 env
->has_error_code
= events
.exception
.has_error_code
;
1422 env
->error_code
= events
.exception
.error_code
;
1424 env
->interrupt_injected
=
1425 events
.interrupt
.injected
? events
.interrupt
.nr
: -1;
1426 env
->soft_interrupt
= events
.interrupt
.soft
;
1428 env
->nmi_injected
= events
.nmi
.injected
;
1429 env
->nmi_pending
= events
.nmi
.pending
;
1430 if (events
.nmi
.masked
) {
1431 env
->hflags2
|= HF2_NMI_MASK
;
1433 env
->hflags2
&= ~HF2_NMI_MASK
;
1436 env
->sipi_vector
= events
.sipi_vector
;
1442 static int kvm_guest_debug_workarounds(CPUState
*env
)
1445 #ifdef KVM_CAP_SET_GUEST_DEBUG
1446 unsigned long reinject_trap
= 0;
1448 if (!kvm_has_vcpu_events()) {
1449 if (env
->exception_injected
== 1) {
1450 reinject_trap
= KVM_GUESTDBG_INJECT_DB
;
1451 } else if (env
->exception_injected
== 3) {
1452 reinject_trap
= KVM_GUESTDBG_INJECT_BP
;
1454 env
->exception_injected
= -1;
1458 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
1459 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
1460 * by updating the debug state once again if single-stepping is on.
1461 * Another reason to call kvm_update_guest_debug here is a pending debug
1462 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
1463 * reinject them via SET_GUEST_DEBUG.
1465 if (reinject_trap
||
1466 (!kvm_has_robust_singlestep() && env
->singlestep_enabled
)) {
1467 ret
= kvm_update_guest_debug(env
, reinject_trap
);
1469 #endif /* KVM_CAP_SET_GUEST_DEBUG */
1473 static int kvm_put_debugregs(CPUState
*env
)
1475 #ifdef KVM_CAP_DEBUGREGS
1476 struct kvm_debugregs dbgregs
;
1479 if (!kvm_has_debugregs()) {
1483 for (i
= 0; i
< 4; i
++) {
1484 dbgregs
.db
[i
] = env
->dr
[i
];
1486 dbgregs
.dr6
= env
->dr
[6];
1487 dbgregs
.dr7
= env
->dr
[7];
1490 return kvm_vcpu_ioctl(env
, KVM_SET_DEBUGREGS
, &dbgregs
);
1496 static int kvm_get_debugregs(CPUState
*env
)
1498 #ifdef KVM_CAP_DEBUGREGS
1499 struct kvm_debugregs dbgregs
;
1502 if (!kvm_has_debugregs()) {
1506 ret
= kvm_vcpu_ioctl(env
, KVM_GET_DEBUGREGS
, &dbgregs
);
1510 for (i
= 0; i
< 4; i
++) {
1511 env
->dr
[i
] = dbgregs
.db
[i
];
1513 env
->dr
[4] = env
->dr
[6] = dbgregs
.dr6
;
1514 env
->dr
[5] = env
->dr
[7] = dbgregs
.dr7
;
1520 int kvm_arch_put_registers(CPUState
*env
, int level
)
1524 assert(cpu_is_stopped(env
) || qemu_cpu_is_self(env
));
1526 ret
= kvm_getput_regs(env
, 1);
1530 ret
= kvm_put_xsave(env
);
1534 ret
= kvm_put_xcrs(env
);
1538 ret
= kvm_put_sregs(env
);
1542 ret
= kvm_put_msrs(env
, level
);
1546 if (level
>= KVM_PUT_RESET_STATE
) {
1547 ret
= kvm_put_mp_state(env
);
1552 ret
= kvm_put_vcpu_events(env
, level
);
1556 ret
= kvm_put_debugregs(env
);
1561 ret
= kvm_guest_debug_workarounds(env
);
1568 int kvm_arch_get_registers(CPUState
*env
)
1572 assert(cpu_is_stopped(env
) || qemu_cpu_is_self(env
));
1574 ret
= kvm_getput_regs(env
, 0);
1578 ret
= kvm_get_xsave(env
);
1582 ret
= kvm_get_xcrs(env
);
1586 ret
= kvm_get_sregs(env
);
1590 ret
= kvm_get_msrs(env
);
1594 ret
= kvm_get_mp_state(env
);
1598 ret
= kvm_get_vcpu_events(env
);
1602 ret
= kvm_get_debugregs(env
);
1609 void kvm_arch_pre_run(CPUState
*env
, struct kvm_run
*run
)
1614 if (env
->interrupt_request
& CPU_INTERRUPT_NMI
) {
1615 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
1616 DPRINTF("injected NMI\n");
1617 ret
= kvm_vcpu_ioctl(env
, KVM_NMI
);
1619 fprintf(stderr
, "KVM: injection failed, NMI lost (%s)\n",
1624 if (!kvm_irqchip_in_kernel()) {
1625 /* Force the VCPU out of its inner loop to process the INIT request */
1626 if (env
->interrupt_request
& CPU_INTERRUPT_INIT
) {
1627 env
->exit_request
= 1;
1630 /* Try to inject an interrupt if the guest can accept it */
1631 if (run
->ready_for_interrupt_injection
&&
1632 (env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
1633 (env
->eflags
& IF_MASK
)) {
1636 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
1637 irq
= cpu_get_pic_interrupt(env
);
1639 struct kvm_interrupt intr
;
1642 DPRINTF("injected interrupt %d\n", irq
);
1643 ret
= kvm_vcpu_ioctl(env
, KVM_INTERRUPT
, &intr
);
1646 "KVM: injection failed, interrupt lost (%s)\n",
1652 /* If we have an interrupt but the guest is not ready to receive an
1653 * interrupt, request an interrupt window exit. This will
1654 * cause a return to userspace as soon as the guest is ready to
1655 * receive interrupts. */
1656 if ((env
->interrupt_request
& CPU_INTERRUPT_HARD
)) {
1657 run
->request_interrupt_window
= 1;
1659 run
->request_interrupt_window
= 0;
1662 DPRINTF("setting tpr\n");
1663 run
->cr8
= cpu_get_apic_tpr(env
->apic_state
);
1667 void kvm_arch_post_run(CPUState
*env
, struct kvm_run
*run
)
1670 env
->eflags
|= IF_MASK
;
1672 env
->eflags
&= ~IF_MASK
;
1674 cpu_set_apic_tpr(env
->apic_state
, run
->cr8
);
1675 cpu_set_apic_base(env
->apic_state
, run
->apic_base
);
1678 int kvm_arch_process_async_events(CPUState
*env
)
1680 if (kvm_irqchip_in_kernel()) {
1684 if (env
->interrupt_request
& (CPU_INTERRUPT_HARD
| CPU_INTERRUPT_NMI
)) {
1687 if (env
->interrupt_request
& CPU_INTERRUPT_INIT
) {
1688 kvm_cpu_synchronize_state(env
);
1691 if (env
->interrupt_request
& CPU_INTERRUPT_SIPI
) {
1692 kvm_cpu_synchronize_state(env
);
1699 static int kvm_handle_halt(CPUState
*env
)
1701 if (!((env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
1702 (env
->eflags
& IF_MASK
)) &&
1703 !(env
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
1711 static bool host_supports_vmx(void)
1713 uint32_t ecx
, unused
;
1715 host_cpuid(1, 0, &unused
, &unused
, &ecx
, &unused
);
1716 return ecx
& CPUID_EXT_VMX
;
1719 #define VMX_INVALID_GUEST_STATE 0x80000021
1721 int kvm_arch_handle_exit(CPUState
*env
, struct kvm_run
*run
)
1726 switch (run
->exit_reason
) {
1728 DPRINTF("handle_hlt\n");
1729 ret
= kvm_handle_halt(env
);
1731 case KVM_EXIT_SET_TPR
:
1734 case KVM_EXIT_FAIL_ENTRY
:
1735 code
= run
->fail_entry
.hardware_entry_failure_reason
;
1736 fprintf(stderr
, "KVM: entry failed, hardware error 0x%" PRIx64
"\n",
1738 if (host_supports_vmx() && code
== VMX_INVALID_GUEST_STATE
) {
1740 "\nIf you're runnning a guest on an Intel machine without "
1741 "unrestricted mode\n"
1742 "support, the failure can be most likely due to the guest "
1743 "entering an invalid\n"
1744 "state for Intel VT. For example, the guest maybe running "
1745 "in big real mode\n"
1746 "which is not supported on less recent Intel processors."
1751 case KVM_EXIT_EXCEPTION
:
1752 fprintf(stderr
, "KVM: exception %d exit (error code 0x%x)\n",
1753 run
->ex
.exception
, run
->ex
.error_code
);
1757 fprintf(stderr
, "KVM: unknown exit reason %d\n", run
->exit_reason
);
1765 #ifdef KVM_CAP_SET_GUEST_DEBUG
1766 int kvm_arch_insert_sw_breakpoint(CPUState
*env
, struct kvm_sw_breakpoint
*bp
)
1768 static const uint8_t int3
= 0xcc;
1770 if (cpu_memory_rw_debug(env
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 0) ||
1771 cpu_memory_rw_debug(env
, bp
->pc
, (uint8_t *)&int3
, 1, 1)) {
1777 int kvm_arch_remove_sw_breakpoint(CPUState
*env
, struct kvm_sw_breakpoint
*bp
)
1781 if (cpu_memory_rw_debug(env
, bp
->pc
, &int3
, 1, 0) || int3
!= 0xcc ||
1782 cpu_memory_rw_debug(env
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 1)) {
1794 static int nb_hw_breakpoint
;
1796 static int find_hw_breakpoint(target_ulong addr
, int len
, int type
)
1800 for (n
= 0; n
< nb_hw_breakpoint
; n
++) {
1801 if (hw_breakpoint
[n
].addr
== addr
&& hw_breakpoint
[n
].type
== type
&&
1802 (hw_breakpoint
[n
].len
== len
|| len
== -1)) {
1809 int kvm_arch_insert_hw_breakpoint(target_ulong addr
,
1810 target_ulong len
, int type
)
1813 case GDB_BREAKPOINT_HW
:
1816 case GDB_WATCHPOINT_WRITE
:
1817 case GDB_WATCHPOINT_ACCESS
:
1824 if (addr
& (len
- 1)) {
1836 if (nb_hw_breakpoint
== 4) {
1839 if (find_hw_breakpoint(addr
, len
, type
) >= 0) {
1842 hw_breakpoint
[nb_hw_breakpoint
].addr
= addr
;
1843 hw_breakpoint
[nb_hw_breakpoint
].len
= len
;
1844 hw_breakpoint
[nb_hw_breakpoint
].type
= type
;
1850 int kvm_arch_remove_hw_breakpoint(target_ulong addr
,
1851 target_ulong len
, int type
)
1855 n
= find_hw_breakpoint(addr
, (type
== GDB_BREAKPOINT_HW
) ? 1 : len
, type
);
1860 hw_breakpoint
[n
] = hw_breakpoint
[nb_hw_breakpoint
];
1865 void kvm_arch_remove_all_hw_breakpoints(void)
1867 nb_hw_breakpoint
= 0;
1870 static CPUWatchpoint hw_watchpoint
;
1872 int kvm_arch_debug(struct kvm_debug_exit_arch
*arch_info
)
1877 if (arch_info
->exception
== 1) {
1878 if (arch_info
->dr6
& (1 << 14)) {
1879 if (cpu_single_env
->singlestep_enabled
) {
1883 for (n
= 0; n
< 4; n
++) {
1884 if (arch_info
->dr6
& (1 << n
)) {
1885 switch ((arch_info
->dr7
>> (16 + n
*4)) & 0x3) {
1891 cpu_single_env
->watchpoint_hit
= &hw_watchpoint
;
1892 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
1893 hw_watchpoint
.flags
= BP_MEM_WRITE
;
1897 cpu_single_env
->watchpoint_hit
= &hw_watchpoint
;
1898 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
1899 hw_watchpoint
.flags
= BP_MEM_ACCESS
;
1905 } else if (kvm_find_sw_breakpoint(cpu_single_env
, arch_info
->pc
)) {
1909 cpu_synchronize_state(cpu_single_env
);
1910 assert(cpu_single_env
->exception_injected
== -1);
1912 cpu_single_env
->exception_injected
= arch_info
->exception
;
1913 cpu_single_env
->has_error_code
= 0;
1919 void kvm_arch_update_guest_debug(CPUState
*env
, struct kvm_guest_debug
*dbg
)
1921 const uint8_t type_code
[] = {
1922 [GDB_BREAKPOINT_HW
] = 0x0,
1923 [GDB_WATCHPOINT_WRITE
] = 0x1,
1924 [GDB_WATCHPOINT_ACCESS
] = 0x3
1926 const uint8_t len_code
[] = {
1927 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
1931 if (kvm_sw_breakpoints_active(env
)) {
1932 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_SW_BP
;
1934 if (nb_hw_breakpoint
> 0) {
1935 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_HW_BP
;
1936 dbg
->arch
.debugreg
[7] = 0x0600;
1937 for (n
= 0; n
< nb_hw_breakpoint
; n
++) {
1938 dbg
->arch
.debugreg
[n
] = hw_breakpoint
[n
].addr
;
1939 dbg
->arch
.debugreg
[7] |= (2 << (n
* 2)) |
1940 (type_code
[hw_breakpoint
[n
].type
] << (16 + n
*4)) |
1941 ((uint32_t)len_code
[hw_breakpoint
[n
].len
] << (18 + n
*4));
1945 #endif /* KVM_CAP_SET_GUEST_DEBUG */
1947 bool kvm_arch_stop_on_emulation_error(CPUState
*env
)
1949 return !(env
->cr
[0] & CR0_PE_MASK
) ||
1950 ((env
->segs
[R_CS
].selector
& 3) != 3);