4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "qapi/error.h"
17 #include <sys/ioctl.h>
18 #include <sys/utsname.h>
20 #include <linux/kvm.h>
21 #include "standard-headers/asm-x86/kvm_para.h"
24 #include "sysemu/sysemu.h"
25 #include "sysemu/hw_accel.h"
26 #include "sysemu/kvm_int.h"
27 #include "sysemu/reset.h"
28 #include "sysemu/runstate.h"
31 #include "hyperv-proto.h"
33 #include "exec/gdbstub.h"
34 #include "qemu/host-utils.h"
35 #include "qemu/main-loop.h"
36 #include "qemu/config-file.h"
37 #include "qemu/error-report.h"
38 #include "hw/i386/x86.h"
39 #include "hw/i386/apic.h"
40 #include "hw/i386/apic_internal.h"
41 #include "hw/i386/apic-msidef.h"
42 #include "hw/i386/intel_iommu.h"
43 #include "hw/i386/x86-iommu.h"
44 #include "hw/i386/e820_memory_layout.h"
46 #include "hw/pci/pci.h"
47 #include "hw/pci/msi.h"
48 #include "hw/pci/msix.h"
49 #include "migration/blocker.h"
50 #include "exec/memattrs.h"
56 #define DPRINTF(fmt, ...) \
57 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
59 #define DPRINTF(fmt, ...) \
63 #define MSR_KVM_WALL_CLOCK 0x11
64 #define MSR_KVM_SYSTEM_TIME 0x12
66 /* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
67 * 255 kvm_msr_entry structs */
68 #define MSR_BUF_SIZE 4096
70 static void kvm_init_msrs(X86CPU
*cpu
);
72 const KVMCapabilityInfo kvm_arch_required_capabilities
[] = {
73 KVM_CAP_INFO(SET_TSS_ADDR
),
74 KVM_CAP_INFO(EXT_CPUID
),
75 KVM_CAP_INFO(MP_STATE
),
79 static bool has_msr_star
;
80 static bool has_msr_hsave_pa
;
81 static bool has_msr_tsc_aux
;
82 static bool has_msr_tsc_adjust
;
83 static bool has_msr_tsc_deadline
;
84 static bool has_msr_feature_control
;
85 static bool has_msr_misc_enable
;
86 static bool has_msr_smbase
;
87 static bool has_msr_bndcfgs
;
88 static int lm_capable_kernel
;
89 static bool has_msr_hv_hypercall
;
90 static bool has_msr_hv_crash
;
91 static bool has_msr_hv_reset
;
92 static bool has_msr_hv_vpindex
;
93 static bool hv_vpindex_settable
;
94 static bool has_msr_hv_runtime
;
95 static bool has_msr_hv_synic
;
96 static bool has_msr_hv_stimer
;
97 static bool has_msr_hv_frequencies
;
98 static bool has_msr_hv_reenlightenment
;
99 static bool has_msr_xss
;
100 static bool has_msr_umwait
;
101 static bool has_msr_spec_ctrl
;
102 static bool has_msr_tsx_ctrl
;
103 static bool has_msr_virt_ssbd
;
104 static bool has_msr_smi_count
;
105 static bool has_msr_arch_capabs
;
106 static bool has_msr_core_capabs
;
107 static bool has_msr_vmx_vmfunc
;
108 static bool has_msr_ucode_rev
;
109 static bool has_msr_vmx_procbased_ctls2
;
111 static uint32_t has_architectural_pmu_version
;
112 static uint32_t num_architectural_pmu_gp_counters
;
113 static uint32_t num_architectural_pmu_fixed_counters
;
115 static int has_xsave
;
117 static int has_pit_state2
;
118 static int has_exception_payload
;
120 static bool has_msr_mcg_ext_ctl
;
122 static struct kvm_cpuid2
*cpuid_cache
;
123 static struct kvm_msr_list
*kvm_feature_msrs
;
125 int kvm_has_pit_state2(void)
127 return has_pit_state2
;
130 bool kvm_has_smm(void)
132 return kvm_check_extension(kvm_state
, KVM_CAP_X86_SMM
);
135 bool kvm_has_adjust_clock_stable(void)
137 int ret
= kvm_check_extension(kvm_state
, KVM_CAP_ADJUST_CLOCK
);
139 return (ret
== KVM_CLOCK_TSC_STABLE
);
142 bool kvm_has_exception_payload(void)
144 return has_exception_payload
;
147 bool kvm_allows_irq0_override(void)
149 return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
152 static bool kvm_x2apic_api_set_flags(uint64_t flags
)
154 KVMState
*s
= KVM_STATE(current_accel());
156 return !kvm_vm_enable_cap(s
, KVM_CAP_X2APIC_API
, 0, flags
);
159 #define MEMORIZE(fn, _result) \
161 static bool _memorized; \
170 static bool has_x2apic_api
;
172 bool kvm_has_x2apic_api(void)
174 return has_x2apic_api
;
177 bool kvm_enable_x2apic(void)
180 kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS
|
181 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK
),
185 bool kvm_hv_vpindex_settable(void)
187 return hv_vpindex_settable
;
190 static int kvm_get_tsc(CPUState
*cs
)
192 X86CPU
*cpu
= X86_CPU(cs
);
193 CPUX86State
*env
= &cpu
->env
;
195 struct kvm_msrs info
;
196 struct kvm_msr_entry entries
[1];
200 if (env
->tsc_valid
) {
204 memset(&msr_data
, 0, sizeof(msr_data
));
205 msr_data
.info
.nmsrs
= 1;
206 msr_data
.entries
[0].index
= MSR_IA32_TSC
;
207 env
->tsc_valid
= !runstate_is_running();
209 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_MSRS
, &msr_data
);
215 env
->tsc
= msr_data
.entries
[0].data
;
219 static inline void do_kvm_synchronize_tsc(CPUState
*cpu
, run_on_cpu_data arg
)
224 void kvm_synchronize_all_tsc(void)
230 run_on_cpu(cpu
, do_kvm_synchronize_tsc
, RUN_ON_CPU_NULL
);
235 static struct kvm_cpuid2
*try_get_cpuid(KVMState
*s
, int max
)
237 struct kvm_cpuid2
*cpuid
;
240 size
= sizeof(*cpuid
) + max
* sizeof(*cpuid
->entries
);
241 cpuid
= g_malloc0(size
);
243 r
= kvm_ioctl(s
, KVM_GET_SUPPORTED_CPUID
, cpuid
);
244 if (r
== 0 && cpuid
->nent
>= max
) {
252 fprintf(stderr
, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
260 /* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
263 static struct kvm_cpuid2
*get_supported_cpuid(KVMState
*s
)
265 struct kvm_cpuid2
*cpuid
;
268 if (cpuid_cache
!= NULL
) {
271 while ((cpuid
= try_get_cpuid(s
, max
)) == NULL
) {
278 static const struct kvm_para_features
{
281 } para_features
[] = {
282 { KVM_CAP_CLOCKSOURCE
, KVM_FEATURE_CLOCKSOURCE
},
283 { KVM_CAP_NOP_IO_DELAY
, KVM_FEATURE_NOP_IO_DELAY
},
284 { KVM_CAP_PV_MMU
, KVM_FEATURE_MMU_OP
},
285 { KVM_CAP_ASYNC_PF
, KVM_FEATURE_ASYNC_PF
},
288 static int get_para_features(KVMState
*s
)
292 for (i
= 0; i
< ARRAY_SIZE(para_features
); i
++) {
293 if (kvm_check_extension(s
, para_features
[i
].cap
)) {
294 features
|= (1 << para_features
[i
].feature
);
301 static bool host_tsx_blacklisted(void)
303 int family
, model
, stepping
;\
304 char vendor
[CPUID_VENDOR_SZ
+ 1];
306 host_vendor_fms(vendor
, &family
, &model
, &stepping
);
308 /* Check if we are running on a Haswell host known to have broken TSX */
309 return !strcmp(vendor
, CPUID_VENDOR_INTEL
) &&
311 ((model
== 63 && stepping
< 4) ||
312 model
== 60 || model
== 69 || model
== 70);
315 /* Returns the value for a specific register on the cpuid entry
317 static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2
*entry
, int reg
)
337 /* Find matching entry for function/index on kvm_cpuid2 struct
339 static struct kvm_cpuid_entry2
*cpuid_find_entry(struct kvm_cpuid2
*cpuid
,
344 for (i
= 0; i
< cpuid
->nent
; ++i
) {
345 if (cpuid
->entries
[i
].function
== function
&&
346 cpuid
->entries
[i
].index
== index
) {
347 return &cpuid
->entries
[i
];
354 uint32_t kvm_arch_get_supported_cpuid(KVMState
*s
, uint32_t function
,
355 uint32_t index
, int reg
)
357 struct kvm_cpuid2
*cpuid
;
359 uint32_t cpuid_1_edx
;
362 cpuid
= get_supported_cpuid(s
);
364 struct kvm_cpuid_entry2
*entry
= cpuid_find_entry(cpuid
, function
, index
);
367 ret
= cpuid_entry_get_reg(entry
, reg
);
370 /* Fixups for the data returned by KVM, below */
372 if (function
== 1 && reg
== R_EDX
) {
373 /* KVM before 2.6.30 misreports the following features */
374 ret
|= CPUID_MTRR
| CPUID_PAT
| CPUID_MCE
| CPUID_MCA
;
375 } else if (function
== 1 && reg
== R_ECX
) {
376 /* We can set the hypervisor flag, even if KVM does not return it on
377 * GET_SUPPORTED_CPUID
379 ret
|= CPUID_EXT_HYPERVISOR
;
380 /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
381 * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
382 * and the irqchip is in the kernel.
384 if (kvm_irqchip_in_kernel() &&
385 kvm_check_extension(s
, KVM_CAP_TSC_DEADLINE_TIMER
)) {
386 ret
|= CPUID_EXT_TSC_DEADLINE_TIMER
;
389 /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
390 * without the in-kernel irqchip
392 if (!kvm_irqchip_in_kernel()) {
393 ret
&= ~CPUID_EXT_X2APIC
;
397 int disable_exits
= kvm_check_extension(s
,
398 KVM_CAP_X86_DISABLE_EXITS
);
400 if (disable_exits
& KVM_X86_DISABLE_EXITS_MWAIT
) {
401 ret
|= CPUID_EXT_MONITOR
;
404 } else if (function
== 6 && reg
== R_EAX
) {
405 ret
|= CPUID_6_EAX_ARAT
; /* safe to allow because of emulated APIC */
406 } else if (function
== 7 && index
== 0 && reg
== R_EBX
) {
407 if (host_tsx_blacklisted()) {
408 ret
&= ~(CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_HLE
);
410 } else if (function
== 7 && index
== 0 && reg
== R_ECX
) {
412 ret
|= CPUID_7_0_ECX_WAITPKG
;
414 ret
&= ~CPUID_7_0_ECX_WAITPKG
;
416 } else if (function
== 7 && index
== 0 && reg
== R_EDX
) {
418 * Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts.
419 * We can detect the bug by checking if MSR_IA32_ARCH_CAPABILITIES is
420 * returned by KVM_GET_MSR_INDEX_LIST.
422 if (!has_msr_arch_capabs
) {
423 ret
&= ~CPUID_7_0_EDX_ARCH_CAPABILITIES
;
425 } else if (function
== 0x80000001 && reg
== R_ECX
) {
427 * It's safe to enable TOPOEXT even if it's not returned by
428 * GET_SUPPORTED_CPUID. Unconditionally enabling TOPOEXT here allows
429 * us to keep CPU models including TOPOEXT runnable on older kernels.
431 ret
|= CPUID_EXT3_TOPOEXT
;
432 } else if (function
== 0x80000001 && reg
== R_EDX
) {
433 /* On Intel, kvm returns cpuid according to the Intel spec,
434 * so add missing bits according to the AMD spec:
436 cpuid_1_edx
= kvm_arch_get_supported_cpuid(s
, 1, 0, R_EDX
);
437 ret
|= cpuid_1_edx
& CPUID_EXT2_AMD_ALIASES
;
438 } else if (function
== KVM_CPUID_FEATURES
&& reg
== R_EAX
) {
439 /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't
440 * be enabled without the in-kernel irqchip
442 if (!kvm_irqchip_in_kernel()) {
443 ret
&= ~(1U << KVM_FEATURE_PV_UNHALT
);
445 } else if (function
== KVM_CPUID_FEATURES
&& reg
== R_EDX
) {
446 ret
|= 1U << KVM_HINTS_REALTIME
;
450 /* fallback for older kernels */
451 if ((function
== KVM_CPUID_FEATURES
) && !found
) {
452 ret
= get_para_features(s
);
458 uint64_t kvm_arch_get_supported_msr_feature(KVMState
*s
, uint32_t index
)
461 struct kvm_msrs info
;
462 struct kvm_msr_entry entries
[1];
465 uint32_t ret
, can_be_one
, must_be_one
;
467 if (kvm_feature_msrs
== NULL
) { /* Host doesn't support feature MSRs */
471 /* Check if requested MSR is supported feature MSR */
473 for (i
= 0; i
< kvm_feature_msrs
->nmsrs
; i
++)
474 if (kvm_feature_msrs
->indices
[i
] == index
) {
477 if (i
== kvm_feature_msrs
->nmsrs
) {
478 return 0; /* if the feature MSR is not supported, simply return 0 */
481 msr_data
.info
.nmsrs
= 1;
482 msr_data
.entries
[0].index
= index
;
484 ret
= kvm_ioctl(s
, KVM_GET_MSRS
, &msr_data
);
486 error_report("KVM get MSR (index=0x%x) feature failed, %s",
487 index
, strerror(-ret
));
491 value
= msr_data
.entries
[0].data
;
493 case MSR_IA32_VMX_PROCBASED_CTLS2
:
494 if (!has_msr_vmx_procbased_ctls2
) {
495 /* KVM forgot to add these bits for some time, do this ourselves. */
496 if (kvm_arch_get_supported_cpuid(s
, 0xD, 1, R_ECX
) &
497 CPUID_XSAVE_XSAVES
) {
498 value
|= (uint64_t)VMX_SECONDARY_EXEC_XSAVES
<< 32;
500 if (kvm_arch_get_supported_cpuid(s
, 1, 0, R_ECX
) &
502 value
|= (uint64_t)VMX_SECONDARY_EXEC_RDRAND_EXITING
<< 32;
504 if (kvm_arch_get_supported_cpuid(s
, 7, 0, R_EBX
) &
505 CPUID_7_0_EBX_INVPCID
) {
506 value
|= (uint64_t)VMX_SECONDARY_EXEC_ENABLE_INVPCID
<< 32;
508 if (kvm_arch_get_supported_cpuid(s
, 7, 0, R_EBX
) &
509 CPUID_7_0_EBX_RDSEED
) {
510 value
|= (uint64_t)VMX_SECONDARY_EXEC_RDSEED_EXITING
<< 32;
512 if (kvm_arch_get_supported_cpuid(s
, 0x80000001, 0, R_EDX
) &
514 value
|= (uint64_t)VMX_SECONDARY_EXEC_RDTSCP
<< 32;
518 case MSR_IA32_VMX_TRUE_PINBASED_CTLS
:
519 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS
:
520 case MSR_IA32_VMX_TRUE_ENTRY_CTLS
:
521 case MSR_IA32_VMX_TRUE_EXIT_CTLS
:
523 * Return true for bits that can be one, but do not have to be one.
524 * The SDM tells us which bits could have a "must be one" setting,
525 * so we can do the opposite transformation in make_vmx_msr_value.
527 must_be_one
= (uint32_t)value
;
528 can_be_one
= (uint32_t)(value
>> 32);
529 return can_be_one
& ~must_be_one
;
537 typedef struct HWPoisonPage
{
539 QLIST_ENTRY(HWPoisonPage
) list
;
542 static QLIST_HEAD(, HWPoisonPage
) hwpoison_page_list
=
543 QLIST_HEAD_INITIALIZER(hwpoison_page_list
);
545 static void kvm_unpoison_all(void *param
)
547 HWPoisonPage
*page
, *next_page
;
549 QLIST_FOREACH_SAFE(page
, &hwpoison_page_list
, list
, next_page
) {
550 QLIST_REMOVE(page
, list
);
551 qemu_ram_remap(page
->ram_addr
, TARGET_PAGE_SIZE
);
556 static void kvm_hwpoison_page_add(ram_addr_t ram_addr
)
560 QLIST_FOREACH(page
, &hwpoison_page_list
, list
) {
561 if (page
->ram_addr
== ram_addr
) {
565 page
= g_new(HWPoisonPage
, 1);
566 page
->ram_addr
= ram_addr
;
567 QLIST_INSERT_HEAD(&hwpoison_page_list
, page
, list
);
570 static int kvm_get_mce_cap_supported(KVMState
*s
, uint64_t *mce_cap
,
575 r
= kvm_check_extension(s
, KVM_CAP_MCE
);
578 return kvm_ioctl(s
, KVM_X86_GET_MCE_CAP_SUPPORTED
, mce_cap
);
583 static void kvm_mce_inject(X86CPU
*cpu
, hwaddr paddr
, int code
)
585 CPUState
*cs
= CPU(cpu
);
586 CPUX86State
*env
= &cpu
->env
;
587 uint64_t status
= MCI_STATUS_VAL
| MCI_STATUS_UC
| MCI_STATUS_EN
|
588 MCI_STATUS_MISCV
| MCI_STATUS_ADDRV
| MCI_STATUS_S
;
589 uint64_t mcg_status
= MCG_STATUS_MCIP
;
592 if (code
== BUS_MCEERR_AR
) {
593 status
|= MCI_STATUS_AR
| 0x134;
594 mcg_status
|= MCG_STATUS_EIPV
;
597 mcg_status
|= MCG_STATUS_RIPV
;
600 flags
= cpu_x86_support_mca_broadcast(env
) ? MCE_INJECT_BROADCAST
: 0;
601 /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the
602 * guest kernel back into env->mcg_ext_ctl.
604 cpu_synchronize_state(cs
);
605 if (env
->mcg_ext_ctl
& MCG_EXT_CTL_LMCE_EN
) {
606 mcg_status
|= MCG_STATUS_LMCE
;
610 cpu_x86_inject_mce(NULL
, cpu
, 9, status
, mcg_status
, paddr
,
611 (MCM_ADDR_PHYS
<< 6) | 0xc, flags
);
614 static void hardware_memory_error(void *host_addr
)
616 error_report("QEMU got Hardware memory error at addr %p", host_addr
);
620 void kvm_arch_on_sigbus_vcpu(CPUState
*c
, int code
, void *addr
)
622 X86CPU
*cpu
= X86_CPU(c
);
623 CPUX86State
*env
= &cpu
->env
;
627 /* If we get an action required MCE, it has been injected by KVM
628 * while the VM was running. An action optional MCE instead should
629 * be coming from the main thread, which qemu_init_sigbus identifies
630 * as the "early kill" thread.
632 assert(code
== BUS_MCEERR_AR
|| code
== BUS_MCEERR_AO
);
634 if ((env
->mcg_cap
& MCG_SER_P
) && addr
) {
635 ram_addr
= qemu_ram_addr_from_host(addr
);
636 if (ram_addr
!= RAM_ADDR_INVALID
&&
637 kvm_physical_memory_addr_from_host(c
->kvm_state
, addr
, &paddr
)) {
638 kvm_hwpoison_page_add(ram_addr
);
639 kvm_mce_inject(cpu
, paddr
, code
);
642 * Use different logging severity based on error type.
643 * If there is additional MCE reporting on the hypervisor, QEMU VA
644 * could be another source to identify the PA and MCE details.
646 if (code
== BUS_MCEERR_AR
) {
647 error_report("Guest MCE Memory Error at QEMU addr %p and "
648 "GUEST addr 0x%" HWADDR_PRIx
" of type %s injected",
649 addr
, paddr
, "BUS_MCEERR_AR");
651 warn_report("Guest MCE Memory Error at QEMU addr %p and "
652 "GUEST addr 0x%" HWADDR_PRIx
" of type %s injected",
653 addr
, paddr
, "BUS_MCEERR_AO");
659 if (code
== BUS_MCEERR_AO
) {
660 warn_report("Hardware memory error at addr %p of type %s "
661 "for memory used by QEMU itself instead of guest system!",
662 addr
, "BUS_MCEERR_AO");
666 if (code
== BUS_MCEERR_AR
) {
667 hardware_memory_error(addr
);
670 /* Hope we are lucky for AO MCE */
673 static void kvm_reset_exception(CPUX86State
*env
)
675 env
->exception_nr
= -1;
676 env
->exception_pending
= 0;
677 env
->exception_injected
= 0;
678 env
->exception_has_payload
= false;
679 env
->exception_payload
= 0;
682 static void kvm_queue_exception(CPUX86State
*env
,
683 int32_t exception_nr
,
684 uint8_t exception_has_payload
,
685 uint64_t exception_payload
)
687 assert(env
->exception_nr
== -1);
688 assert(!env
->exception_pending
);
689 assert(!env
->exception_injected
);
690 assert(!env
->exception_has_payload
);
692 env
->exception_nr
= exception_nr
;
694 if (has_exception_payload
) {
695 env
->exception_pending
= 1;
697 env
->exception_has_payload
= exception_has_payload
;
698 env
->exception_payload
= exception_payload
;
700 env
->exception_injected
= 1;
702 if (exception_nr
== EXCP01_DB
) {
703 assert(exception_has_payload
);
704 env
->dr
[6] = exception_payload
;
705 } else if (exception_nr
== EXCP0E_PAGE
) {
706 assert(exception_has_payload
);
707 env
->cr
[2] = exception_payload
;
709 assert(!exception_has_payload
);
714 static int kvm_inject_mce_oldstyle(X86CPU
*cpu
)
716 CPUX86State
*env
= &cpu
->env
;
718 if (!kvm_has_vcpu_events() && env
->exception_nr
== EXCP12_MCHK
) {
719 unsigned int bank
, bank_num
= env
->mcg_cap
& 0xff;
720 struct kvm_x86_mce mce
;
722 kvm_reset_exception(env
);
725 * There must be at least one bank in use if an MCE is pending.
726 * Find it and use its values for the event injection.
728 for (bank
= 0; bank
< bank_num
; bank
++) {
729 if (env
->mce_banks
[bank
* 4 + 1] & MCI_STATUS_VAL
) {
733 assert(bank
< bank_num
);
736 mce
.status
= env
->mce_banks
[bank
* 4 + 1];
737 mce
.mcg_status
= env
->mcg_status
;
738 mce
.addr
= env
->mce_banks
[bank
* 4 + 2];
739 mce
.misc
= env
->mce_banks
[bank
* 4 + 3];
741 return kvm_vcpu_ioctl(CPU(cpu
), KVM_X86_SET_MCE
, &mce
);
746 static void cpu_update_state(void *opaque
, int running
, RunState state
)
748 CPUX86State
*env
= opaque
;
751 env
->tsc_valid
= false;
755 unsigned long kvm_arch_vcpu_id(CPUState
*cs
)
757 X86CPU
*cpu
= X86_CPU(cs
);
761 #ifndef KVM_CPUID_SIGNATURE_NEXT
762 #define KVM_CPUID_SIGNATURE_NEXT 0x40000100
765 static bool hyperv_enabled(X86CPU
*cpu
)
767 CPUState
*cs
= CPU(cpu
);
768 return kvm_check_extension(cs
->kvm_state
, KVM_CAP_HYPERV
) > 0 &&
769 ((cpu
->hyperv_spinlock_attempts
!= HYPERV_SPINLOCK_NEVER_RETRY
) ||
770 cpu
->hyperv_features
|| cpu
->hyperv_passthrough
);
773 static int kvm_arch_set_tsc_khz(CPUState
*cs
)
775 X86CPU
*cpu
= X86_CPU(cs
);
776 CPUX86State
*env
= &cpu
->env
;
783 r
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_TSC_CONTROL
) ?
784 kvm_vcpu_ioctl(cs
, KVM_SET_TSC_KHZ
, env
->tsc_khz
) :
787 /* When KVM_SET_TSC_KHZ fails, it's an error only if the current
788 * TSC frequency doesn't match the one we want.
790 int cur_freq
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_GET_TSC_KHZ
) ?
791 kvm_vcpu_ioctl(cs
, KVM_GET_TSC_KHZ
) :
793 if (cur_freq
<= 0 || cur_freq
!= env
->tsc_khz
) {
794 warn_report("TSC frequency mismatch between "
795 "VM (%" PRId64
" kHz) and host (%d kHz), "
796 "and TSC scaling unavailable",
797 env
->tsc_khz
, cur_freq
);
805 static bool tsc_is_stable_and_known(CPUX86State
*env
)
810 return (env
->features
[FEAT_8000_0007_EDX
] & CPUID_APM_INVTSC
)
811 || env
->user_tsc_khz
;
820 uint64_t dependencies
;
821 } kvm_hyperv_properties
[] = {
822 [HYPERV_FEAT_RELAXED
] = {
823 .desc
= "relaxed timing (hv-relaxed)",
825 {.fw
= FEAT_HYPERV_EAX
,
826 .bits
= HV_HYPERCALL_AVAILABLE
},
827 {.fw
= FEAT_HV_RECOMM_EAX
,
828 .bits
= HV_RELAXED_TIMING_RECOMMENDED
}
831 [HYPERV_FEAT_VAPIC
] = {
832 .desc
= "virtual APIC (hv-vapic)",
834 {.fw
= FEAT_HYPERV_EAX
,
835 .bits
= HV_HYPERCALL_AVAILABLE
| HV_APIC_ACCESS_AVAILABLE
},
836 {.fw
= FEAT_HV_RECOMM_EAX
,
837 .bits
= HV_APIC_ACCESS_RECOMMENDED
}
840 [HYPERV_FEAT_TIME
] = {
841 .desc
= "clocksources (hv-time)",
843 {.fw
= FEAT_HYPERV_EAX
,
844 .bits
= HV_HYPERCALL_AVAILABLE
| HV_TIME_REF_COUNT_AVAILABLE
|
845 HV_REFERENCE_TSC_AVAILABLE
}
848 [HYPERV_FEAT_CRASH
] = {
849 .desc
= "crash MSRs (hv-crash)",
851 {.fw
= FEAT_HYPERV_EDX
,
852 .bits
= HV_GUEST_CRASH_MSR_AVAILABLE
}
855 [HYPERV_FEAT_RESET
] = {
856 .desc
= "reset MSR (hv-reset)",
858 {.fw
= FEAT_HYPERV_EAX
,
859 .bits
= HV_RESET_AVAILABLE
}
862 [HYPERV_FEAT_VPINDEX
] = {
863 .desc
= "VP_INDEX MSR (hv-vpindex)",
865 {.fw
= FEAT_HYPERV_EAX
,
866 .bits
= HV_VP_INDEX_AVAILABLE
}
869 [HYPERV_FEAT_RUNTIME
] = {
870 .desc
= "VP_RUNTIME MSR (hv-runtime)",
872 {.fw
= FEAT_HYPERV_EAX
,
873 .bits
= HV_VP_RUNTIME_AVAILABLE
}
876 [HYPERV_FEAT_SYNIC
] = {
877 .desc
= "synthetic interrupt controller (hv-synic)",
879 {.fw
= FEAT_HYPERV_EAX
,
880 .bits
= HV_SYNIC_AVAILABLE
}
883 [HYPERV_FEAT_STIMER
] = {
884 .desc
= "synthetic timers (hv-stimer)",
886 {.fw
= FEAT_HYPERV_EAX
,
887 .bits
= HV_SYNTIMERS_AVAILABLE
}
889 .dependencies
= BIT(HYPERV_FEAT_SYNIC
) | BIT(HYPERV_FEAT_TIME
)
891 [HYPERV_FEAT_FREQUENCIES
] = {
892 .desc
= "frequency MSRs (hv-frequencies)",
894 {.fw
= FEAT_HYPERV_EAX
,
895 .bits
= HV_ACCESS_FREQUENCY_MSRS
},
896 {.fw
= FEAT_HYPERV_EDX
,
897 .bits
= HV_FREQUENCY_MSRS_AVAILABLE
}
900 [HYPERV_FEAT_REENLIGHTENMENT
] = {
901 .desc
= "reenlightenment MSRs (hv-reenlightenment)",
903 {.fw
= FEAT_HYPERV_EAX
,
904 .bits
= HV_ACCESS_REENLIGHTENMENTS_CONTROL
}
907 [HYPERV_FEAT_TLBFLUSH
] = {
908 .desc
= "paravirtualized TLB flush (hv-tlbflush)",
910 {.fw
= FEAT_HV_RECOMM_EAX
,
911 .bits
= HV_REMOTE_TLB_FLUSH_RECOMMENDED
|
912 HV_EX_PROCESSOR_MASKS_RECOMMENDED
}
914 .dependencies
= BIT(HYPERV_FEAT_VPINDEX
)
916 [HYPERV_FEAT_EVMCS
] = {
917 .desc
= "enlightened VMCS (hv-evmcs)",
919 {.fw
= FEAT_HV_RECOMM_EAX
,
920 .bits
= HV_ENLIGHTENED_VMCS_RECOMMENDED
}
922 .dependencies
= BIT(HYPERV_FEAT_VAPIC
)
924 [HYPERV_FEAT_IPI
] = {
925 .desc
= "paravirtualized IPI (hv-ipi)",
927 {.fw
= FEAT_HV_RECOMM_EAX
,
928 .bits
= HV_CLUSTER_IPI_RECOMMENDED
|
929 HV_EX_PROCESSOR_MASKS_RECOMMENDED
}
931 .dependencies
= BIT(HYPERV_FEAT_VPINDEX
)
933 [HYPERV_FEAT_STIMER_DIRECT
] = {
934 .desc
= "direct mode synthetic timers (hv-stimer-direct)",
936 {.fw
= FEAT_HYPERV_EDX
,
937 .bits
= HV_STIMER_DIRECT_MODE_AVAILABLE
}
939 .dependencies
= BIT(HYPERV_FEAT_STIMER
)
943 static struct kvm_cpuid2
*try_get_hv_cpuid(CPUState
*cs
, int max
)
945 struct kvm_cpuid2
*cpuid
;
948 size
= sizeof(*cpuid
) + max
* sizeof(*cpuid
->entries
);
949 cpuid
= g_malloc0(size
);
952 r
= kvm_vcpu_ioctl(cs
, KVM_GET_SUPPORTED_HV_CPUID
, cpuid
);
953 if (r
== 0 && cpuid
->nent
>= max
) {
961 fprintf(stderr
, "KVM_GET_SUPPORTED_HV_CPUID failed: %s\n",
970 * Run KVM_GET_SUPPORTED_HV_CPUID ioctl(), allocating a buffer large enough
973 static struct kvm_cpuid2
*get_supported_hv_cpuid(CPUState
*cs
)
975 struct kvm_cpuid2
*cpuid
;
976 int max
= 7; /* 0x40000000..0x40000005, 0x4000000A */
979 * When the buffer is too small, KVM_GET_SUPPORTED_HV_CPUID fails with
980 * -E2BIG, however, it doesn't report back the right size. Keep increasing
981 * it and re-trying until we succeed.
983 while ((cpuid
= try_get_hv_cpuid(cs
, max
)) == NULL
) {
990 * When KVM_GET_SUPPORTED_HV_CPUID is not supported we fill CPUID feature
991 * leaves from KVM_CAP_HYPERV* and present MSRs data.
993 static struct kvm_cpuid2
*get_supported_hv_cpuid_legacy(CPUState
*cs
)
995 X86CPU
*cpu
= X86_CPU(cs
);
996 struct kvm_cpuid2
*cpuid
;
997 struct kvm_cpuid_entry2
*entry_feat
, *entry_recomm
;
999 /* HV_CPUID_FEATURES, HV_CPUID_ENLIGHTMENT_INFO */
1000 cpuid
= g_malloc0(sizeof(*cpuid
) + 2 * sizeof(*cpuid
->entries
));
1003 /* HV_CPUID_VENDOR_AND_MAX_FUNCTIONS */
1004 entry_feat
= &cpuid
->entries
[0];
1005 entry_feat
->function
= HV_CPUID_FEATURES
;
1007 entry_recomm
= &cpuid
->entries
[1];
1008 entry_recomm
->function
= HV_CPUID_ENLIGHTMENT_INFO
;
1009 entry_recomm
->ebx
= cpu
->hyperv_spinlock_attempts
;
1011 if (kvm_check_extension(cs
->kvm_state
, KVM_CAP_HYPERV
) > 0) {
1012 entry_feat
->eax
|= HV_HYPERCALL_AVAILABLE
;
1013 entry_feat
->eax
|= HV_APIC_ACCESS_AVAILABLE
;
1014 entry_feat
->edx
|= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE
;
1015 entry_recomm
->eax
|= HV_RELAXED_TIMING_RECOMMENDED
;
1016 entry_recomm
->eax
|= HV_APIC_ACCESS_RECOMMENDED
;
1019 if (kvm_check_extension(cs
->kvm_state
, KVM_CAP_HYPERV_TIME
) > 0) {
1020 entry_feat
->eax
|= HV_TIME_REF_COUNT_AVAILABLE
;
1021 entry_feat
->eax
|= HV_REFERENCE_TSC_AVAILABLE
;
1024 if (has_msr_hv_frequencies
) {
1025 entry_feat
->eax
|= HV_ACCESS_FREQUENCY_MSRS
;
1026 entry_feat
->edx
|= HV_FREQUENCY_MSRS_AVAILABLE
;
1029 if (has_msr_hv_crash
) {
1030 entry_feat
->edx
|= HV_GUEST_CRASH_MSR_AVAILABLE
;
1033 if (has_msr_hv_reenlightenment
) {
1034 entry_feat
->eax
|= HV_ACCESS_REENLIGHTENMENTS_CONTROL
;
1037 if (has_msr_hv_reset
) {
1038 entry_feat
->eax
|= HV_RESET_AVAILABLE
;
1041 if (has_msr_hv_vpindex
) {
1042 entry_feat
->eax
|= HV_VP_INDEX_AVAILABLE
;
1045 if (has_msr_hv_runtime
) {
1046 entry_feat
->eax
|= HV_VP_RUNTIME_AVAILABLE
;
1049 if (has_msr_hv_synic
) {
1050 unsigned int cap
= cpu
->hyperv_synic_kvm_only
?
1051 KVM_CAP_HYPERV_SYNIC
: KVM_CAP_HYPERV_SYNIC2
;
1053 if (kvm_check_extension(cs
->kvm_state
, cap
) > 0) {
1054 entry_feat
->eax
|= HV_SYNIC_AVAILABLE
;
1058 if (has_msr_hv_stimer
) {
1059 entry_feat
->eax
|= HV_SYNTIMERS_AVAILABLE
;
1062 if (kvm_check_extension(cs
->kvm_state
,
1063 KVM_CAP_HYPERV_TLBFLUSH
) > 0) {
1064 entry_recomm
->eax
|= HV_REMOTE_TLB_FLUSH_RECOMMENDED
;
1065 entry_recomm
->eax
|= HV_EX_PROCESSOR_MASKS_RECOMMENDED
;
1068 if (kvm_check_extension(cs
->kvm_state
,
1069 KVM_CAP_HYPERV_ENLIGHTENED_VMCS
) > 0) {
1070 entry_recomm
->eax
|= HV_ENLIGHTENED_VMCS_RECOMMENDED
;
1073 if (kvm_check_extension(cs
->kvm_state
,
1074 KVM_CAP_HYPERV_SEND_IPI
) > 0) {
1075 entry_recomm
->eax
|= HV_CLUSTER_IPI_RECOMMENDED
;
1076 entry_recomm
->eax
|= HV_EX_PROCESSOR_MASKS_RECOMMENDED
;
1082 static int hv_cpuid_get_fw(struct kvm_cpuid2
*cpuid
, int fw
, uint32_t *r
)
1084 struct kvm_cpuid_entry2
*entry
;
1089 case FEAT_HYPERV_EAX
:
1091 func
= HV_CPUID_FEATURES
;
1093 case FEAT_HYPERV_EDX
:
1095 func
= HV_CPUID_FEATURES
;
1097 case FEAT_HV_RECOMM_EAX
:
1099 func
= HV_CPUID_ENLIGHTMENT_INFO
;
1105 entry
= cpuid_find_entry(cpuid
, func
, 0);
1124 static int hv_cpuid_check_and_set(CPUState
*cs
, struct kvm_cpuid2
*cpuid
,
1127 X86CPU
*cpu
= X86_CPU(cs
);
1128 CPUX86State
*env
= &cpu
->env
;
1129 uint32_t r
, fw
, bits
;
1133 if (!hyperv_feat_enabled(cpu
, feature
) && !cpu
->hyperv_passthrough
) {
1137 deps
= kvm_hyperv_properties
[feature
].dependencies
;
1139 dep_feat
= ctz64(deps
);
1140 if (!(hyperv_feat_enabled(cpu
, dep_feat
))) {
1142 "Hyper-V %s requires Hyper-V %s\n",
1143 kvm_hyperv_properties
[feature
].desc
,
1144 kvm_hyperv_properties
[dep_feat
].desc
);
1147 deps
&= ~(1ull << dep_feat
);
1150 for (i
= 0; i
< ARRAY_SIZE(kvm_hyperv_properties
[feature
].flags
); i
++) {
1151 fw
= kvm_hyperv_properties
[feature
].flags
[i
].fw
;
1152 bits
= kvm_hyperv_properties
[feature
].flags
[i
].bits
;
1158 if (hv_cpuid_get_fw(cpuid
, fw
, &r
) || (r
& bits
) != bits
) {
1159 if (hyperv_feat_enabled(cpu
, feature
)) {
1161 "Hyper-V %s is not supported by kernel\n",
1162 kvm_hyperv_properties
[feature
].desc
);
1169 env
->features
[fw
] |= bits
;
1172 if (cpu
->hyperv_passthrough
) {
1173 cpu
->hyperv_features
|= BIT(feature
);
1180 * Fill in Hyper-V CPUIDs. Returns the number of entries filled in cpuid_ent in
1181 * case of success, errno < 0 in case of failure and 0 when no Hyper-V
1182 * extentions are enabled.
1184 static int hyperv_handle_properties(CPUState
*cs
,
1185 struct kvm_cpuid_entry2
*cpuid_ent
)
1187 X86CPU
*cpu
= X86_CPU(cs
);
1188 CPUX86State
*env
= &cpu
->env
;
1189 struct kvm_cpuid2
*cpuid
;
1190 struct kvm_cpuid_entry2
*c
;
1191 uint32_t signature
[3];
1192 uint32_t cpuid_i
= 0;
1195 if (!hyperv_enabled(cpu
))
1198 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_EVMCS
) ||
1199 cpu
->hyperv_passthrough
) {
1200 uint16_t evmcs_version
;
1202 r
= kvm_vcpu_enable_cap(cs
, KVM_CAP_HYPERV_ENLIGHTENED_VMCS
, 0,
1203 (uintptr_t)&evmcs_version
);
1205 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_EVMCS
) && r
) {
1206 fprintf(stderr
, "Hyper-V %s is not supported by kernel\n",
1207 kvm_hyperv_properties
[HYPERV_FEAT_EVMCS
].desc
);
1212 env
->features
[FEAT_HV_RECOMM_EAX
] |=
1213 HV_ENLIGHTENED_VMCS_RECOMMENDED
;
1214 env
->features
[FEAT_HV_NESTED_EAX
] = evmcs_version
;
1218 if (kvm_check_extension(cs
->kvm_state
, KVM_CAP_HYPERV_CPUID
) > 0) {
1219 cpuid
= get_supported_hv_cpuid(cs
);
1221 cpuid
= get_supported_hv_cpuid_legacy(cs
);
1224 if (cpu
->hyperv_passthrough
) {
1225 memcpy(cpuid_ent
, &cpuid
->entries
[0],
1226 cpuid
->nent
* sizeof(cpuid
->entries
[0]));
1228 c
= cpuid_find_entry(cpuid
, HV_CPUID_FEATURES
, 0);
1230 env
->features
[FEAT_HYPERV_EAX
] = c
->eax
;
1231 env
->features
[FEAT_HYPERV_EBX
] = c
->ebx
;
1232 env
->features
[FEAT_HYPERV_EDX
] = c
->eax
;
1234 c
= cpuid_find_entry(cpuid
, HV_CPUID_ENLIGHTMENT_INFO
, 0);
1236 env
->features
[FEAT_HV_RECOMM_EAX
] = c
->eax
;
1238 /* hv-spinlocks may have been overriden */
1239 if (cpu
->hyperv_spinlock_attempts
!= HYPERV_SPINLOCK_NEVER_RETRY
) {
1240 c
->ebx
= cpu
->hyperv_spinlock_attempts
;
1243 c
= cpuid_find_entry(cpuid
, HV_CPUID_NESTED_FEATURES
, 0);
1245 env
->features
[FEAT_HV_NESTED_EAX
] = c
->eax
;
1249 if (cpu
->hyperv_no_nonarch_cs
== ON_OFF_AUTO_ON
) {
1250 env
->features
[FEAT_HV_RECOMM_EAX
] |= HV_NO_NONARCH_CORESHARING
;
1251 } else if (cpu
->hyperv_no_nonarch_cs
== ON_OFF_AUTO_AUTO
) {
1252 c
= cpuid_find_entry(cpuid
, HV_CPUID_ENLIGHTMENT_INFO
, 0);
1254 env
->features
[FEAT_HV_RECOMM_EAX
] |=
1255 c
->eax
& HV_NO_NONARCH_CORESHARING
;
1260 r
= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_RELAXED
);
1261 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_VAPIC
);
1262 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_TIME
);
1263 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_CRASH
);
1264 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_RESET
);
1265 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_VPINDEX
);
1266 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_RUNTIME
);
1267 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_SYNIC
);
1268 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_STIMER
);
1269 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_FREQUENCIES
);
1270 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_REENLIGHTENMENT
);
1271 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_TLBFLUSH
);
1272 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_EVMCS
);
1273 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_IPI
);
1274 r
|= hv_cpuid_check_and_set(cs
, cpuid
, HYPERV_FEAT_STIMER_DIRECT
);
1276 /* Additional dependencies not covered by kvm_hyperv_properties[] */
1277 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNIC
) &&
1278 !cpu
->hyperv_synic_kvm_only
&&
1279 !hyperv_feat_enabled(cpu
, HYPERV_FEAT_VPINDEX
)) {
1280 fprintf(stderr
, "Hyper-V %s requires Hyper-V %s\n",
1281 kvm_hyperv_properties
[HYPERV_FEAT_SYNIC
].desc
,
1282 kvm_hyperv_properties
[HYPERV_FEAT_VPINDEX
].desc
);
1286 /* Not exposed by KVM but needed to make CPU hotplug in Windows work */
1287 env
->features
[FEAT_HYPERV_EDX
] |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE
;
1294 if (cpu
->hyperv_passthrough
) {
1295 /* We already copied all feature words from KVM as is */
1300 c
= &cpuid_ent
[cpuid_i
++];
1301 c
->function
= HV_CPUID_VENDOR_AND_MAX_FUNCTIONS
;
1302 if (!cpu
->hyperv_vendor_id
) {
1303 memcpy(signature
, "Microsoft Hv", 12);
1305 size_t len
= strlen(cpu
->hyperv_vendor_id
);
1308 error_report("hv-vendor-id truncated to 12 characters");
1311 memset(signature
, 0, 12);
1312 memcpy(signature
, cpu
->hyperv_vendor_id
, len
);
1314 c
->eax
= hyperv_feat_enabled(cpu
, HYPERV_FEAT_EVMCS
) ?
1315 HV_CPUID_NESTED_FEATURES
: HV_CPUID_IMPLEMENT_LIMITS
;
1316 c
->ebx
= signature
[0];
1317 c
->ecx
= signature
[1];
1318 c
->edx
= signature
[2];
1320 c
= &cpuid_ent
[cpuid_i
++];
1321 c
->function
= HV_CPUID_INTERFACE
;
1322 memcpy(signature
, "Hv#1\0\0\0\0\0\0\0\0", 12);
1323 c
->eax
= signature
[0];
1328 c
= &cpuid_ent
[cpuid_i
++];
1329 c
->function
= HV_CPUID_VERSION
;
1330 c
->eax
= 0x00001bbc;
1331 c
->ebx
= 0x00060001;
1333 c
= &cpuid_ent
[cpuid_i
++];
1334 c
->function
= HV_CPUID_FEATURES
;
1335 c
->eax
= env
->features
[FEAT_HYPERV_EAX
];
1336 c
->ebx
= env
->features
[FEAT_HYPERV_EBX
];
1337 c
->edx
= env
->features
[FEAT_HYPERV_EDX
];
1339 c
= &cpuid_ent
[cpuid_i
++];
1340 c
->function
= HV_CPUID_ENLIGHTMENT_INFO
;
1341 c
->eax
= env
->features
[FEAT_HV_RECOMM_EAX
];
1342 c
->ebx
= cpu
->hyperv_spinlock_attempts
;
1344 c
= &cpuid_ent
[cpuid_i
++];
1345 c
->function
= HV_CPUID_IMPLEMENT_LIMITS
;
1346 c
->eax
= cpu
->hv_max_vps
;
1349 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_EVMCS
)) {
1352 /* Create zeroed 0x40000006..0x40000009 leaves */
1353 for (function
= HV_CPUID_IMPLEMENT_LIMITS
+ 1;
1354 function
< HV_CPUID_NESTED_FEATURES
; function
++) {
1355 c
= &cpuid_ent
[cpuid_i
++];
1356 c
->function
= function
;
1359 c
= &cpuid_ent
[cpuid_i
++];
1360 c
->function
= HV_CPUID_NESTED_FEATURES
;
1361 c
->eax
= env
->features
[FEAT_HV_NESTED_EAX
];
1371 static Error
*hv_passthrough_mig_blocker
;
1372 static Error
*hv_no_nonarch_cs_mig_blocker
;
1374 static int hyperv_init_vcpu(X86CPU
*cpu
)
1376 CPUState
*cs
= CPU(cpu
);
1377 Error
*local_err
= NULL
;
1380 if (cpu
->hyperv_passthrough
&& hv_passthrough_mig_blocker
== NULL
) {
1381 error_setg(&hv_passthrough_mig_blocker
,
1382 "'hv-passthrough' CPU flag prevents migration, use explicit"
1383 " set of hv-* flags instead");
1384 ret
= migrate_add_blocker(hv_passthrough_mig_blocker
, &local_err
);
1386 error_report_err(local_err
);
1387 error_free(hv_passthrough_mig_blocker
);
1392 if (cpu
->hyperv_no_nonarch_cs
== ON_OFF_AUTO_AUTO
&&
1393 hv_no_nonarch_cs_mig_blocker
== NULL
) {
1394 error_setg(&hv_no_nonarch_cs_mig_blocker
,
1395 "'hv-no-nonarch-coresharing=auto' CPU flag prevents migration"
1396 " use explicit 'hv-no-nonarch-coresharing=on' instead (but"
1397 " make sure SMT is disabled and/or that vCPUs are properly"
1399 ret
= migrate_add_blocker(hv_no_nonarch_cs_mig_blocker
, &local_err
);
1401 error_report_err(local_err
);
1402 error_free(hv_no_nonarch_cs_mig_blocker
);
1407 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_VPINDEX
) && !hv_vpindex_settable
) {
1409 * the kernel doesn't support setting vp_index; assert that its value
1413 struct kvm_msrs info
;
1414 struct kvm_msr_entry entries
[1];
1417 .entries
[0].index
= HV_X64_MSR_VP_INDEX
,
1420 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_MSRS
, &msr_data
);
1426 if (msr_data
.entries
[0].data
!= hyperv_vp_index(CPU(cpu
))) {
1427 error_report("kernel's vp_index != QEMU's vp_index");
1432 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNIC
)) {
1433 uint32_t synic_cap
= cpu
->hyperv_synic_kvm_only
?
1434 KVM_CAP_HYPERV_SYNIC
: KVM_CAP_HYPERV_SYNIC2
;
1435 ret
= kvm_vcpu_enable_cap(cs
, synic_cap
, 0);
1437 error_report("failed to turn on HyperV SynIC in KVM: %s",
1442 if (!cpu
->hyperv_synic_kvm_only
) {
1443 ret
= hyperv_x86_synic_add(cpu
);
1445 error_report("failed to create HyperV SynIC: %s",
1455 static Error
*invtsc_mig_blocker
;
1457 #define KVM_MAX_CPUID_ENTRIES 100
1459 int kvm_arch_init_vcpu(CPUState
*cs
)
1462 struct kvm_cpuid2 cpuid
;
1463 struct kvm_cpuid_entry2 entries
[KVM_MAX_CPUID_ENTRIES
];
1466 * The kernel defines these structs with padding fields so there
1467 * should be no extra padding in our cpuid_data struct.
1469 QEMU_BUILD_BUG_ON(sizeof(cpuid_data
) !=
1470 sizeof(struct kvm_cpuid2
) +
1471 sizeof(struct kvm_cpuid_entry2
) * KVM_MAX_CPUID_ENTRIES
);
1473 X86CPU
*cpu
= X86_CPU(cs
);
1474 CPUX86State
*env
= &cpu
->env
;
1475 uint32_t limit
, i
, j
, cpuid_i
;
1477 struct kvm_cpuid_entry2
*c
;
1478 uint32_t signature
[3];
1479 int kvm_base
= KVM_CPUID_SIGNATURE
;
1480 int max_nested_state_len
;
1482 Error
*local_err
= NULL
;
1484 memset(&cpuid_data
, 0, sizeof(cpuid_data
));
1488 r
= kvm_arch_set_tsc_khz(cs
);
1493 /* vcpu's TSC frequency is either specified by user, or following
1494 * the value used by KVM if the former is not present. In the
1495 * latter case, we query it from KVM and record in env->tsc_khz,
1496 * so that vcpu's TSC frequency can be migrated later via this field.
1498 if (!env
->tsc_khz
) {
1499 r
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_GET_TSC_KHZ
) ?
1500 kvm_vcpu_ioctl(cs
, KVM_GET_TSC_KHZ
) :
1507 /* Paravirtualization CPUIDs */
1508 r
= hyperv_handle_properties(cs
, cpuid_data
.entries
);
1513 kvm_base
= KVM_CPUID_SIGNATURE_NEXT
;
1514 has_msr_hv_hypercall
= true;
1517 if (cpu
->expose_kvm
) {
1518 memcpy(signature
, "KVMKVMKVM\0\0\0", 12);
1519 c
= &cpuid_data
.entries
[cpuid_i
++];
1520 c
->function
= KVM_CPUID_SIGNATURE
| kvm_base
;
1521 c
->eax
= KVM_CPUID_FEATURES
| kvm_base
;
1522 c
->ebx
= signature
[0];
1523 c
->ecx
= signature
[1];
1524 c
->edx
= signature
[2];
1526 c
= &cpuid_data
.entries
[cpuid_i
++];
1527 c
->function
= KVM_CPUID_FEATURES
| kvm_base
;
1528 c
->eax
= env
->features
[FEAT_KVM
];
1529 c
->edx
= env
->features
[FEAT_KVM_HINTS
];
1532 cpu_x86_cpuid(env
, 0, 0, &limit
, &unused
, &unused
, &unused
);
1534 for (i
= 0; i
<= limit
; i
++) {
1535 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1536 fprintf(stderr
, "unsupported level value: 0x%x\n", limit
);
1539 c
= &cpuid_data
.entries
[cpuid_i
++];
1543 /* Keep reading function 2 till all the input is received */
1547 c
->flags
= KVM_CPUID_FLAG_STATEFUL_FUNC
|
1548 KVM_CPUID_FLAG_STATE_READ_NEXT
;
1549 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1550 times
= c
->eax
& 0xff;
1552 for (j
= 1; j
< times
; ++j
) {
1553 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1554 fprintf(stderr
, "cpuid_data is full, no space for "
1555 "cpuid(eax:2):eax & 0xf = 0x%x\n", times
);
1558 c
= &cpuid_data
.entries
[cpuid_i
++];
1560 c
->flags
= KVM_CPUID_FLAG_STATEFUL_FUNC
;
1561 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1566 if (env
->nr_dies
< 2) {
1572 for (j
= 0; ; j
++) {
1573 if (i
== 0xd && j
== 64) {
1577 if (i
== 0x1f && j
== 64) {
1582 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
1584 cpu_x86_cpuid(env
, i
, j
, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1586 if (i
== 4 && c
->eax
== 0) {
1589 if (i
== 0xb && !(c
->ecx
& 0xff00)) {
1592 if (i
== 0x1f && !(c
->ecx
& 0xff00)) {
1595 if (i
== 0xd && c
->eax
== 0) {
1598 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1599 fprintf(stderr
, "cpuid_data is full, no space for "
1600 "cpuid(eax:0x%x,ecx:0x%x)\n", i
, j
);
1603 c
= &cpuid_data
.entries
[cpuid_i
++];
1612 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
1613 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1616 for (j
= 1; j
<= times
; ++j
) {
1617 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1618 fprintf(stderr
, "cpuid_data is full, no space for "
1619 "cpuid(eax:0x%x,ecx:0x%x)\n", i
, j
);
1622 c
= &cpuid_data
.entries
[cpuid_i
++];
1625 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
1626 cpu_x86_cpuid(env
, i
, j
, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1633 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1634 if (!c
->eax
&& !c
->ebx
&& !c
->ecx
&& !c
->edx
) {
1636 * KVM already returns all zeroes if a CPUID entry is missing,
1637 * so we can omit it and avoid hitting KVM's 80-entry limit.
1645 if (limit
>= 0x0a) {
1648 cpu_x86_cpuid(env
, 0x0a, 0, &eax
, &unused
, &unused
, &edx
);
1650 has_architectural_pmu_version
= eax
& 0xff;
1651 if (has_architectural_pmu_version
> 0) {
1652 num_architectural_pmu_gp_counters
= (eax
& 0xff00) >> 8;
1654 /* Shouldn't be more than 32, since that's the number of bits
1655 * available in EBX to tell us _which_ counters are available.
1658 if (num_architectural_pmu_gp_counters
> MAX_GP_COUNTERS
) {
1659 num_architectural_pmu_gp_counters
= MAX_GP_COUNTERS
;
1662 if (has_architectural_pmu_version
> 1) {
1663 num_architectural_pmu_fixed_counters
= edx
& 0x1f;
1665 if (num_architectural_pmu_fixed_counters
> MAX_FIXED_COUNTERS
) {
1666 num_architectural_pmu_fixed_counters
= MAX_FIXED_COUNTERS
;
1672 cpu_x86_cpuid(env
, 0x80000000, 0, &limit
, &unused
, &unused
, &unused
);
1674 for (i
= 0x80000000; i
<= limit
; i
++) {
1675 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1676 fprintf(stderr
, "unsupported xlevel value: 0x%x\n", limit
);
1679 c
= &cpuid_data
.entries
[cpuid_i
++];
1683 /* Query for all AMD cache information leaves */
1684 for (j
= 0; ; j
++) {
1686 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
1688 cpu_x86_cpuid(env
, i
, j
, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1693 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1694 fprintf(stderr
, "cpuid_data is full, no space for "
1695 "cpuid(eax:0x%x,ecx:0x%x)\n", i
, j
);
1698 c
= &cpuid_data
.entries
[cpuid_i
++];
1704 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1705 if (!c
->eax
&& !c
->ebx
&& !c
->ecx
&& !c
->edx
) {
1707 * KVM already returns all zeroes if a CPUID entry is missing,
1708 * so we can omit it and avoid hitting KVM's 80-entry limit.
1716 /* Call Centaur's CPUID instructions they are supported. */
1717 if (env
->cpuid_xlevel2
> 0) {
1718 cpu_x86_cpuid(env
, 0xC0000000, 0, &limit
, &unused
, &unused
, &unused
);
1720 for (i
= 0xC0000000; i
<= limit
; i
++) {
1721 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1722 fprintf(stderr
, "unsupported xlevel2 value: 0x%x\n", limit
);
1725 c
= &cpuid_data
.entries
[cpuid_i
++];
1729 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1733 cpuid_data
.cpuid
.nent
= cpuid_i
;
1735 if (((env
->cpuid_version
>> 8)&0xF) >= 6
1736 && (env
->features
[FEAT_1_EDX
] & (CPUID_MCE
| CPUID_MCA
)) ==
1737 (CPUID_MCE
| CPUID_MCA
)
1738 && kvm_check_extension(cs
->kvm_state
, KVM_CAP_MCE
) > 0) {
1739 uint64_t mcg_cap
, unsupported_caps
;
1743 ret
= kvm_get_mce_cap_supported(cs
->kvm_state
, &mcg_cap
, &banks
);
1745 fprintf(stderr
, "kvm_get_mce_cap_supported: %s", strerror(-ret
));
1749 if (banks
< (env
->mcg_cap
& MCG_CAP_BANKS_MASK
)) {
1750 error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
1751 (int)(env
->mcg_cap
& MCG_CAP_BANKS_MASK
), banks
);
1755 unsupported_caps
= env
->mcg_cap
& ~(mcg_cap
| MCG_CAP_BANKS_MASK
);
1756 if (unsupported_caps
) {
1757 if (unsupported_caps
& MCG_LMCE_P
) {
1758 error_report("kvm: LMCE not supported");
1761 warn_report("Unsupported MCG_CAP bits: 0x%" PRIx64
,
1765 env
->mcg_cap
&= mcg_cap
| MCG_CAP_BANKS_MASK
;
1766 ret
= kvm_vcpu_ioctl(cs
, KVM_X86_SETUP_MCE
, &env
->mcg_cap
);
1768 fprintf(stderr
, "KVM_X86_SETUP_MCE: %s", strerror(-ret
));
1773 qemu_add_vm_change_state_handler(cpu_update_state
, env
);
1775 c
= cpuid_find_entry(&cpuid_data
.cpuid
, 1, 0);
1777 has_msr_feature_control
= !!(c
->ecx
& CPUID_EXT_VMX
) ||
1778 !!(c
->ecx
& CPUID_EXT_SMX
);
1781 if (env
->mcg_cap
& MCG_LMCE_P
) {
1782 has_msr_mcg_ext_ctl
= has_msr_feature_control
= true;
1785 if (!env
->user_tsc_khz
) {
1786 if ((env
->features
[FEAT_8000_0007_EDX
] & CPUID_APM_INVTSC
) &&
1787 invtsc_mig_blocker
== NULL
) {
1788 error_setg(&invtsc_mig_blocker
,
1789 "State blocked by non-migratable CPU device"
1791 r
= migrate_add_blocker(invtsc_mig_blocker
, &local_err
);
1793 error_report_err(local_err
);
1794 error_free(invtsc_mig_blocker
);
1800 if (cpu
->vmware_cpuid_freq
1801 /* Guests depend on 0x40000000 to detect this feature, so only expose
1802 * it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */
1804 && kvm_base
== KVM_CPUID_SIGNATURE
1805 /* TSC clock must be stable and known for this feature. */
1806 && tsc_is_stable_and_known(env
)) {
1808 c
= &cpuid_data
.entries
[cpuid_i
++];
1809 c
->function
= KVM_CPUID_SIGNATURE
| 0x10;
1810 c
->eax
= env
->tsc_khz
;
1811 /* LAPIC resolution of 1ns (freq: 1GHz) is hardcoded in KVM's
1812 * APIC_BUS_CYCLE_NS */
1814 c
->ecx
= c
->edx
= 0;
1816 c
= cpuid_find_entry(&cpuid_data
.cpuid
, kvm_base
, 0);
1817 c
->eax
= MAX(c
->eax
, KVM_CPUID_SIGNATURE
| 0x10);
1820 cpuid_data
.cpuid
.nent
= cpuid_i
;
1822 cpuid_data
.cpuid
.padding
= 0;
1823 r
= kvm_vcpu_ioctl(cs
, KVM_SET_CPUID2
, &cpuid_data
);
1829 env
->xsave_buf
= qemu_memalign(4096, sizeof(struct kvm_xsave
));
1830 memset(env
->xsave_buf
, 0, sizeof(struct kvm_xsave
));
1833 max_nested_state_len
= kvm_max_nested_state_length();
1834 if (max_nested_state_len
> 0) {
1835 assert(max_nested_state_len
>= offsetof(struct kvm_nested_state
, data
));
1837 if (cpu_has_vmx(env
)) {
1838 struct kvm_vmx_nested_state_hdr
*vmx_hdr
;
1840 env
->nested_state
= g_malloc0(max_nested_state_len
);
1841 env
->nested_state
->size
= max_nested_state_len
;
1842 env
->nested_state
->format
= KVM_STATE_NESTED_FORMAT_VMX
;
1844 vmx_hdr
= &env
->nested_state
->hdr
.vmx
;
1845 vmx_hdr
->vmxon_pa
= -1ull;
1846 vmx_hdr
->vmcs12_pa
= -1ull;
1850 cpu
->kvm_msr_buf
= g_malloc0(MSR_BUF_SIZE
);
1852 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_RDTSCP
)) {
1853 has_msr_tsc_aux
= false;
1858 r
= hyperv_init_vcpu(cpu
);
1866 migrate_del_blocker(invtsc_mig_blocker
);
1871 int kvm_arch_destroy_vcpu(CPUState
*cs
)
1873 X86CPU
*cpu
= X86_CPU(cs
);
1874 CPUX86State
*env
= &cpu
->env
;
1876 if (cpu
->kvm_msr_buf
) {
1877 g_free(cpu
->kvm_msr_buf
);
1878 cpu
->kvm_msr_buf
= NULL
;
1881 if (env
->nested_state
) {
1882 g_free(env
->nested_state
);
1883 env
->nested_state
= NULL
;
1889 void kvm_arch_reset_vcpu(X86CPU
*cpu
)
1891 CPUX86State
*env
= &cpu
->env
;
1894 if (kvm_irqchip_in_kernel()) {
1895 env
->mp_state
= cpu_is_bsp(cpu
) ? KVM_MP_STATE_RUNNABLE
:
1896 KVM_MP_STATE_UNINITIALIZED
;
1898 env
->mp_state
= KVM_MP_STATE_RUNNABLE
;
1901 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNIC
)) {
1903 for (i
= 0; i
< ARRAY_SIZE(env
->msr_hv_synic_sint
); i
++) {
1904 env
->msr_hv_synic_sint
[i
] = HV_SINT_MASKED
;
1907 hyperv_x86_synic_reset(cpu
);
1909 /* enabled by default */
1910 env
->poll_control_msr
= 1;
1913 void kvm_arch_do_init_vcpu(X86CPU
*cpu
)
1915 CPUX86State
*env
= &cpu
->env
;
1917 /* APs get directly into wait-for-SIPI state. */
1918 if (env
->mp_state
== KVM_MP_STATE_UNINITIALIZED
) {
1919 env
->mp_state
= KVM_MP_STATE_INIT_RECEIVED
;
1923 static int kvm_get_supported_feature_msrs(KVMState
*s
)
1927 if (kvm_feature_msrs
!= NULL
) {
1931 if (!kvm_check_extension(s
, KVM_CAP_GET_MSR_FEATURES
)) {
1935 struct kvm_msr_list msr_list
;
1938 ret
= kvm_ioctl(s
, KVM_GET_MSR_FEATURE_INDEX_LIST
, &msr_list
);
1939 if (ret
< 0 && ret
!= -E2BIG
) {
1940 error_report("Fetch KVM feature MSR list failed: %s",
1945 assert(msr_list
.nmsrs
> 0);
1946 kvm_feature_msrs
= (struct kvm_msr_list
*) \
1947 g_malloc0(sizeof(msr_list
) +
1948 msr_list
.nmsrs
* sizeof(msr_list
.indices
[0]));
1950 kvm_feature_msrs
->nmsrs
= msr_list
.nmsrs
;
1951 ret
= kvm_ioctl(s
, KVM_GET_MSR_FEATURE_INDEX_LIST
, kvm_feature_msrs
);
1954 error_report("Fetch KVM feature MSR list failed: %s",
1956 g_free(kvm_feature_msrs
);
1957 kvm_feature_msrs
= NULL
;
1964 static int kvm_get_supported_msrs(KVMState
*s
)
1967 struct kvm_msr_list msr_list
, *kvm_msr_list
;
1970 * Obtain MSR list from KVM. These are the MSRs that we must
1974 ret
= kvm_ioctl(s
, KVM_GET_MSR_INDEX_LIST
, &msr_list
);
1975 if (ret
< 0 && ret
!= -E2BIG
) {
1979 * Old kernel modules had a bug and could write beyond the provided
1980 * memory. Allocate at least a safe amount of 1K.
1982 kvm_msr_list
= g_malloc0(MAX(1024, sizeof(msr_list
) +
1984 sizeof(msr_list
.indices
[0])));
1986 kvm_msr_list
->nmsrs
= msr_list
.nmsrs
;
1987 ret
= kvm_ioctl(s
, KVM_GET_MSR_INDEX_LIST
, kvm_msr_list
);
1991 for (i
= 0; i
< kvm_msr_list
->nmsrs
; i
++) {
1992 switch (kvm_msr_list
->indices
[i
]) {
1994 has_msr_star
= true;
1996 case MSR_VM_HSAVE_PA
:
1997 has_msr_hsave_pa
= true;
2000 has_msr_tsc_aux
= true;
2002 case MSR_TSC_ADJUST
:
2003 has_msr_tsc_adjust
= true;
2005 case MSR_IA32_TSCDEADLINE
:
2006 has_msr_tsc_deadline
= true;
2008 case MSR_IA32_SMBASE
:
2009 has_msr_smbase
= true;
2012 has_msr_smi_count
= true;
2014 case MSR_IA32_MISC_ENABLE
:
2015 has_msr_misc_enable
= true;
2017 case MSR_IA32_BNDCFGS
:
2018 has_msr_bndcfgs
= true;
2023 case MSR_IA32_UMWAIT_CONTROL
:
2024 has_msr_umwait
= true;
2026 case HV_X64_MSR_CRASH_CTL
:
2027 has_msr_hv_crash
= true;
2029 case HV_X64_MSR_RESET
:
2030 has_msr_hv_reset
= true;
2032 case HV_X64_MSR_VP_INDEX
:
2033 has_msr_hv_vpindex
= true;
2035 case HV_X64_MSR_VP_RUNTIME
:
2036 has_msr_hv_runtime
= true;
2038 case HV_X64_MSR_SCONTROL
:
2039 has_msr_hv_synic
= true;
2041 case HV_X64_MSR_STIMER0_CONFIG
:
2042 has_msr_hv_stimer
= true;
2044 case HV_X64_MSR_TSC_FREQUENCY
:
2045 has_msr_hv_frequencies
= true;
2047 case HV_X64_MSR_REENLIGHTENMENT_CONTROL
:
2048 has_msr_hv_reenlightenment
= true;
2050 case MSR_IA32_SPEC_CTRL
:
2051 has_msr_spec_ctrl
= true;
2053 case MSR_IA32_TSX_CTRL
:
2054 has_msr_tsx_ctrl
= true;
2057 has_msr_virt_ssbd
= true;
2059 case MSR_IA32_ARCH_CAPABILITIES
:
2060 has_msr_arch_capabs
= true;
2062 case MSR_IA32_CORE_CAPABILITY
:
2063 has_msr_core_capabs
= true;
2065 case MSR_IA32_VMX_VMFUNC
:
2066 has_msr_vmx_vmfunc
= true;
2068 case MSR_IA32_UCODE_REV
:
2069 has_msr_ucode_rev
= true;
2071 case MSR_IA32_VMX_PROCBASED_CTLS2
:
2072 has_msr_vmx_procbased_ctls2
= true;
2078 g_free(kvm_msr_list
);
2083 static Notifier smram_machine_done
;
2084 static KVMMemoryListener smram_listener
;
2085 static AddressSpace smram_address_space
;
2086 static MemoryRegion smram_as_root
;
2087 static MemoryRegion smram_as_mem
;
2089 static void register_smram_listener(Notifier
*n
, void *unused
)
2091 MemoryRegion
*smram
=
2092 (MemoryRegion
*) object_resolve_path("/machine/smram", NULL
);
2094 /* Outer container... */
2095 memory_region_init(&smram_as_root
, OBJECT(kvm_state
), "mem-container-smram", ~0ull);
2096 memory_region_set_enabled(&smram_as_root
, true);
2098 /* ... with two regions inside: normal system memory with low
2101 memory_region_init_alias(&smram_as_mem
, OBJECT(kvm_state
), "mem-smram",
2102 get_system_memory(), 0, ~0ull);
2103 memory_region_add_subregion_overlap(&smram_as_root
, 0, &smram_as_mem
, 0);
2104 memory_region_set_enabled(&smram_as_mem
, true);
2107 /* ... SMRAM with higher priority */
2108 memory_region_add_subregion_overlap(&smram_as_root
, 0, smram
, 10);
2109 memory_region_set_enabled(smram
, true);
2112 address_space_init(&smram_address_space
, &smram_as_root
, "KVM-SMRAM");
2113 kvm_memory_listener_register(kvm_state
, &smram_listener
,
2114 &smram_address_space
, 1);
2117 int kvm_arch_init(MachineState
*ms
, KVMState
*s
)
2119 uint64_t identity_base
= 0xfffbc000;
2120 uint64_t shadow_mem
;
2122 struct utsname utsname
;
2124 has_xsave
= kvm_check_extension(s
, KVM_CAP_XSAVE
);
2125 has_xcrs
= kvm_check_extension(s
, KVM_CAP_XCRS
);
2126 has_pit_state2
= kvm_check_extension(s
, KVM_CAP_PIT_STATE2
);
2128 hv_vpindex_settable
= kvm_check_extension(s
, KVM_CAP_HYPERV_VP_INDEX
);
2130 has_exception_payload
= kvm_check_extension(s
, KVM_CAP_EXCEPTION_PAYLOAD
);
2131 if (has_exception_payload
) {
2132 ret
= kvm_vm_enable_cap(s
, KVM_CAP_EXCEPTION_PAYLOAD
, 0, true);
2134 error_report("kvm: Failed to enable exception payload cap: %s",
2140 ret
= kvm_get_supported_msrs(s
);
2145 kvm_get_supported_feature_msrs(s
);
2148 lm_capable_kernel
= strcmp(utsname
.machine
, "x86_64") == 0;
2151 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
2152 * In order to use vm86 mode, an EPT identity map and a TSS are needed.
2153 * Since these must be part of guest physical memory, we need to allocate
2154 * them, both by setting their start addresses in the kernel and by
2155 * creating a corresponding e820 entry. We need 4 pages before the BIOS.
2157 * Older KVM versions may not support setting the identity map base. In
2158 * that case we need to stick with the default, i.e. a 256K maximum BIOS
2161 if (kvm_check_extension(s
, KVM_CAP_SET_IDENTITY_MAP_ADDR
)) {
2162 /* Allows up to 16M BIOSes. */
2163 identity_base
= 0xfeffc000;
2165 ret
= kvm_vm_ioctl(s
, KVM_SET_IDENTITY_MAP_ADDR
, &identity_base
);
2171 /* Set TSS base one page after EPT identity map. */
2172 ret
= kvm_vm_ioctl(s
, KVM_SET_TSS_ADDR
, identity_base
+ 0x1000);
2177 /* Tell fw_cfg to notify the BIOS to reserve the range. */
2178 ret
= e820_add_entry(identity_base
, 0x4000, E820_RESERVED
);
2180 fprintf(stderr
, "e820_add_entry() table is full\n");
2183 qemu_register_reset(kvm_unpoison_all
, NULL
);
2185 shadow_mem
= object_property_get_int(OBJECT(s
), "kvm-shadow-mem", &error_abort
);
2186 if (shadow_mem
!= -1) {
2188 ret
= kvm_vm_ioctl(s
, KVM_SET_NR_MMU_PAGES
, shadow_mem
);
2194 if (kvm_check_extension(s
, KVM_CAP_X86_SMM
) &&
2195 object_dynamic_cast(OBJECT(ms
), TYPE_X86_MACHINE
) &&
2196 x86_machine_is_smm_enabled(X86_MACHINE(ms
))) {
2197 smram_machine_done
.notify
= register_smram_listener
;
2198 qemu_add_machine_init_done_notifier(&smram_machine_done
);
2201 if (enable_cpu_pm
) {
2202 int disable_exits
= kvm_check_extension(s
, KVM_CAP_X86_DISABLE_EXITS
);
2205 /* Work around for kernel header with a typo. TODO: fix header and drop. */
2206 #if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT)
2207 #define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL
2209 if (disable_exits
) {
2210 disable_exits
&= (KVM_X86_DISABLE_EXITS_MWAIT
|
2211 KVM_X86_DISABLE_EXITS_HLT
|
2212 KVM_X86_DISABLE_EXITS_PAUSE
|
2213 KVM_X86_DISABLE_EXITS_CSTATE
);
2216 ret
= kvm_vm_enable_cap(s
, KVM_CAP_X86_DISABLE_EXITS
, 0,
2219 error_report("kvm: guest stopping CPU not supported: %s",
2227 static void set_v8086_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
2229 lhs
->selector
= rhs
->selector
;
2230 lhs
->base
= rhs
->base
;
2231 lhs
->limit
= rhs
->limit
;
2243 static void set_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
2245 unsigned flags
= rhs
->flags
;
2246 lhs
->selector
= rhs
->selector
;
2247 lhs
->base
= rhs
->base
;
2248 lhs
->limit
= rhs
->limit
;
2249 lhs
->type
= (flags
>> DESC_TYPE_SHIFT
) & 15;
2250 lhs
->present
= (flags
& DESC_P_MASK
) != 0;
2251 lhs
->dpl
= (flags
>> DESC_DPL_SHIFT
) & 3;
2252 lhs
->db
= (flags
>> DESC_B_SHIFT
) & 1;
2253 lhs
->s
= (flags
& DESC_S_MASK
) != 0;
2254 lhs
->l
= (flags
>> DESC_L_SHIFT
) & 1;
2255 lhs
->g
= (flags
& DESC_G_MASK
) != 0;
2256 lhs
->avl
= (flags
& DESC_AVL_MASK
) != 0;
2257 lhs
->unusable
= !lhs
->present
;
2261 static void get_seg(SegmentCache
*lhs
, const struct kvm_segment
*rhs
)
2263 lhs
->selector
= rhs
->selector
;
2264 lhs
->base
= rhs
->base
;
2265 lhs
->limit
= rhs
->limit
;
2266 lhs
->flags
= (rhs
->type
<< DESC_TYPE_SHIFT
) |
2267 ((rhs
->present
&& !rhs
->unusable
) * DESC_P_MASK
) |
2268 (rhs
->dpl
<< DESC_DPL_SHIFT
) |
2269 (rhs
->db
<< DESC_B_SHIFT
) |
2270 (rhs
->s
* DESC_S_MASK
) |
2271 (rhs
->l
<< DESC_L_SHIFT
) |
2272 (rhs
->g
* DESC_G_MASK
) |
2273 (rhs
->avl
* DESC_AVL_MASK
);
2276 static void kvm_getput_reg(__u64
*kvm_reg
, target_ulong
*qemu_reg
, int set
)
2279 *kvm_reg
= *qemu_reg
;
2281 *qemu_reg
= *kvm_reg
;
2285 static int kvm_getput_regs(X86CPU
*cpu
, int set
)
2287 CPUX86State
*env
= &cpu
->env
;
2288 struct kvm_regs regs
;
2292 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_REGS
, ®s
);
2298 kvm_getput_reg(®s
.rax
, &env
->regs
[R_EAX
], set
);
2299 kvm_getput_reg(®s
.rbx
, &env
->regs
[R_EBX
], set
);
2300 kvm_getput_reg(®s
.rcx
, &env
->regs
[R_ECX
], set
);
2301 kvm_getput_reg(®s
.rdx
, &env
->regs
[R_EDX
], set
);
2302 kvm_getput_reg(®s
.rsi
, &env
->regs
[R_ESI
], set
);
2303 kvm_getput_reg(®s
.rdi
, &env
->regs
[R_EDI
], set
);
2304 kvm_getput_reg(®s
.rsp
, &env
->regs
[R_ESP
], set
);
2305 kvm_getput_reg(®s
.rbp
, &env
->regs
[R_EBP
], set
);
2306 #ifdef TARGET_X86_64
2307 kvm_getput_reg(®s
.r8
, &env
->regs
[8], set
);
2308 kvm_getput_reg(®s
.r9
, &env
->regs
[9], set
);
2309 kvm_getput_reg(®s
.r10
, &env
->regs
[10], set
);
2310 kvm_getput_reg(®s
.r11
, &env
->regs
[11], set
);
2311 kvm_getput_reg(®s
.r12
, &env
->regs
[12], set
);
2312 kvm_getput_reg(®s
.r13
, &env
->regs
[13], set
);
2313 kvm_getput_reg(®s
.r14
, &env
->regs
[14], set
);
2314 kvm_getput_reg(®s
.r15
, &env
->regs
[15], set
);
2317 kvm_getput_reg(®s
.rflags
, &env
->eflags
, set
);
2318 kvm_getput_reg(®s
.rip
, &env
->eip
, set
);
2321 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_REGS
, ®s
);
2327 static int kvm_put_fpu(X86CPU
*cpu
)
2329 CPUX86State
*env
= &cpu
->env
;
2333 memset(&fpu
, 0, sizeof fpu
);
2334 fpu
.fsw
= env
->fpus
& ~(7 << 11);
2335 fpu
.fsw
|= (env
->fpstt
& 7) << 11;
2336 fpu
.fcw
= env
->fpuc
;
2337 fpu
.last_opcode
= env
->fpop
;
2338 fpu
.last_ip
= env
->fpip
;
2339 fpu
.last_dp
= env
->fpdp
;
2340 for (i
= 0; i
< 8; ++i
) {
2341 fpu
.ftwx
|= (!env
->fptags
[i
]) << i
;
2343 memcpy(fpu
.fpr
, env
->fpregs
, sizeof env
->fpregs
);
2344 for (i
= 0; i
< CPU_NB_REGS
; i
++) {
2345 stq_p(&fpu
.xmm
[i
][0], env
->xmm_regs
[i
].ZMM_Q(0));
2346 stq_p(&fpu
.xmm
[i
][8], env
->xmm_regs
[i
].ZMM_Q(1));
2348 fpu
.mxcsr
= env
->mxcsr
;
2350 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_FPU
, &fpu
);
2353 #define XSAVE_FCW_FSW 0
2354 #define XSAVE_FTW_FOP 1
2355 #define XSAVE_CWD_RIP 2
2356 #define XSAVE_CWD_RDP 4
2357 #define XSAVE_MXCSR 6
2358 #define XSAVE_ST_SPACE 8
2359 #define XSAVE_XMM_SPACE 40
2360 #define XSAVE_XSTATE_BV 128
2361 #define XSAVE_YMMH_SPACE 144
2362 #define XSAVE_BNDREGS 240
2363 #define XSAVE_BNDCSR 256
2364 #define XSAVE_OPMASK 272
2365 #define XSAVE_ZMM_Hi256 288
2366 #define XSAVE_Hi16_ZMM 416
2367 #define XSAVE_PKRU 672
2369 #define XSAVE_BYTE_OFFSET(word_offset) \
2370 ((word_offset) * sizeof_field(struct kvm_xsave, region[0]))
2372 #define ASSERT_OFFSET(word_offset, field) \
2373 QEMU_BUILD_BUG_ON(XSAVE_BYTE_OFFSET(word_offset) != \
2374 offsetof(X86XSaveArea, field))
2376 ASSERT_OFFSET(XSAVE_FCW_FSW
, legacy
.fcw
);
2377 ASSERT_OFFSET(XSAVE_FTW_FOP
, legacy
.ftw
);
2378 ASSERT_OFFSET(XSAVE_CWD_RIP
, legacy
.fpip
);
2379 ASSERT_OFFSET(XSAVE_CWD_RDP
, legacy
.fpdp
);
2380 ASSERT_OFFSET(XSAVE_MXCSR
, legacy
.mxcsr
);
2381 ASSERT_OFFSET(XSAVE_ST_SPACE
, legacy
.fpregs
);
2382 ASSERT_OFFSET(XSAVE_XMM_SPACE
, legacy
.xmm_regs
);
2383 ASSERT_OFFSET(XSAVE_XSTATE_BV
, header
.xstate_bv
);
2384 ASSERT_OFFSET(XSAVE_YMMH_SPACE
, avx_state
);
2385 ASSERT_OFFSET(XSAVE_BNDREGS
, bndreg_state
);
2386 ASSERT_OFFSET(XSAVE_BNDCSR
, bndcsr_state
);
2387 ASSERT_OFFSET(XSAVE_OPMASK
, opmask_state
);
2388 ASSERT_OFFSET(XSAVE_ZMM_Hi256
, zmm_hi256_state
);
2389 ASSERT_OFFSET(XSAVE_Hi16_ZMM
, hi16_zmm_state
);
2390 ASSERT_OFFSET(XSAVE_PKRU
, pkru_state
);
2392 static int kvm_put_xsave(X86CPU
*cpu
)
2394 CPUX86State
*env
= &cpu
->env
;
2395 X86XSaveArea
*xsave
= env
->xsave_buf
;
2398 return kvm_put_fpu(cpu
);
2400 x86_cpu_xsave_all_areas(cpu
, xsave
);
2402 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_XSAVE
, xsave
);
2405 static int kvm_put_xcrs(X86CPU
*cpu
)
2407 CPUX86State
*env
= &cpu
->env
;
2408 struct kvm_xcrs xcrs
= {};
2416 xcrs
.xcrs
[0].xcr
= 0;
2417 xcrs
.xcrs
[0].value
= env
->xcr0
;
2418 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_XCRS
, &xcrs
);
2421 static int kvm_put_sregs(X86CPU
*cpu
)
2423 CPUX86State
*env
= &cpu
->env
;
2424 struct kvm_sregs sregs
;
2426 memset(sregs
.interrupt_bitmap
, 0, sizeof(sregs
.interrupt_bitmap
));
2427 if (env
->interrupt_injected
>= 0) {
2428 sregs
.interrupt_bitmap
[env
->interrupt_injected
/ 64] |=
2429 (uint64_t)1 << (env
->interrupt_injected
% 64);
2432 if ((env
->eflags
& VM_MASK
)) {
2433 set_v8086_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
2434 set_v8086_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
2435 set_v8086_seg(&sregs
.es
, &env
->segs
[R_ES
]);
2436 set_v8086_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
2437 set_v8086_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
2438 set_v8086_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
2440 set_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
2441 set_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
2442 set_seg(&sregs
.es
, &env
->segs
[R_ES
]);
2443 set_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
2444 set_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
2445 set_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
2448 set_seg(&sregs
.tr
, &env
->tr
);
2449 set_seg(&sregs
.ldt
, &env
->ldt
);
2451 sregs
.idt
.limit
= env
->idt
.limit
;
2452 sregs
.idt
.base
= env
->idt
.base
;
2453 memset(sregs
.idt
.padding
, 0, sizeof sregs
.idt
.padding
);
2454 sregs
.gdt
.limit
= env
->gdt
.limit
;
2455 sregs
.gdt
.base
= env
->gdt
.base
;
2456 memset(sregs
.gdt
.padding
, 0, sizeof sregs
.gdt
.padding
);
2458 sregs
.cr0
= env
->cr
[0];
2459 sregs
.cr2
= env
->cr
[2];
2460 sregs
.cr3
= env
->cr
[3];
2461 sregs
.cr4
= env
->cr
[4];
2463 sregs
.cr8
= cpu_get_apic_tpr(cpu
->apic_state
);
2464 sregs
.apic_base
= cpu_get_apic_base(cpu
->apic_state
);
2466 sregs
.efer
= env
->efer
;
2468 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_SREGS
, &sregs
);
2471 static void kvm_msr_buf_reset(X86CPU
*cpu
)
2473 memset(cpu
->kvm_msr_buf
, 0, MSR_BUF_SIZE
);
2476 static void kvm_msr_entry_add(X86CPU
*cpu
, uint32_t index
, uint64_t value
)
2478 struct kvm_msrs
*msrs
= cpu
->kvm_msr_buf
;
2479 void *limit
= ((void *)msrs
) + MSR_BUF_SIZE
;
2480 struct kvm_msr_entry
*entry
= &msrs
->entries
[msrs
->nmsrs
];
2482 assert((void *)(entry
+ 1) <= limit
);
2484 entry
->index
= index
;
2485 entry
->reserved
= 0;
2486 entry
->data
= value
;
2490 static int kvm_put_one_msr(X86CPU
*cpu
, int index
, uint64_t value
)
2492 kvm_msr_buf_reset(cpu
);
2493 kvm_msr_entry_add(cpu
, index
, value
);
2495 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MSRS
, cpu
->kvm_msr_buf
);
2498 void kvm_put_apicbase(X86CPU
*cpu
, uint64_t value
)
2502 ret
= kvm_put_one_msr(cpu
, MSR_IA32_APICBASE
, value
);
2506 static int kvm_put_tscdeadline_msr(X86CPU
*cpu
)
2508 CPUX86State
*env
= &cpu
->env
;
2511 if (!has_msr_tsc_deadline
) {
2515 ret
= kvm_put_one_msr(cpu
, MSR_IA32_TSCDEADLINE
, env
->tsc_deadline
);
2525 * Provide a separate write service for the feature control MSR in order to
2526 * kick the VCPU out of VMXON or even guest mode on reset. This has to be done
2527 * before writing any other state because forcibly leaving nested mode
2528 * invalidates the VCPU state.
2530 static int kvm_put_msr_feature_control(X86CPU
*cpu
)
2534 if (!has_msr_feature_control
) {
2538 ret
= kvm_put_one_msr(cpu
, MSR_IA32_FEATURE_CONTROL
,
2539 cpu
->env
.msr_ia32_feature_control
);
2548 static uint64_t make_vmx_msr_value(uint32_t index
, uint32_t features
)
2550 uint32_t default1
, can_be_one
, can_be_zero
;
2551 uint32_t must_be_one
;
2554 case MSR_IA32_VMX_TRUE_PINBASED_CTLS
:
2555 default1
= 0x00000016;
2557 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS
:
2558 default1
= 0x0401e172;
2560 case MSR_IA32_VMX_TRUE_ENTRY_CTLS
:
2561 default1
= 0x000011ff;
2563 case MSR_IA32_VMX_TRUE_EXIT_CTLS
:
2564 default1
= 0x00036dff;
2566 case MSR_IA32_VMX_PROCBASED_CTLS2
:
2573 /* If a feature bit is set, the control can be either set or clear.
2574 * Otherwise the value is limited to either 0 or 1 by default1.
2576 can_be_one
= features
| default1
;
2577 can_be_zero
= features
| ~default1
;
2578 must_be_one
= ~can_be_zero
;
2581 * Bit 0:31 -> 0 if the control bit can be zero (i.e. 1 if it must be one).
2582 * Bit 32:63 -> 1 if the control bit can be one.
2584 return must_be_one
| (((uint64_t)can_be_one
) << 32);
2587 #define VMCS12_MAX_FIELD_INDEX (0x17)
2589 static void kvm_msr_entry_add_vmx(X86CPU
*cpu
, FeatureWordArray f
)
2591 uint64_t kvm_vmx_basic
=
2592 kvm_arch_get_supported_msr_feature(kvm_state
,
2593 MSR_IA32_VMX_BASIC
);
2595 if (!kvm_vmx_basic
) {
2596 /* If the kernel doesn't support VMX feature (kvm_intel.nested=0),
2597 * then kvm_vmx_basic will be 0 and KVM_SET_MSR will fail.
2602 uint64_t kvm_vmx_misc
=
2603 kvm_arch_get_supported_msr_feature(kvm_state
,
2605 uint64_t kvm_vmx_ept_vpid
=
2606 kvm_arch_get_supported_msr_feature(kvm_state
,
2607 MSR_IA32_VMX_EPT_VPID_CAP
);
2610 * If the guest is 64-bit, a value of 1 is allowed for the host address
2611 * space size vmexit control.
2613 uint64_t fixed_vmx_exit
= f
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
2614 ? (uint64_t)VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE
<< 32 : 0;
2617 * Bits 0-30, 32-44 and 50-53 come from the host. KVM should
2618 * not change them for backwards compatibility.
2620 uint64_t fixed_vmx_basic
= kvm_vmx_basic
&
2621 (MSR_VMX_BASIC_VMCS_REVISION_MASK
|
2622 MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK
|
2623 MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK
);
2626 * Same for bits 0-4 and 25-27. Bits 16-24 (CR3 target count) can
2627 * change in the future but are always zero for now, clear them to be
2628 * future proof. Bits 32-63 in theory could change, though KVM does
2629 * not support dual-monitor treatment and probably never will; mask
2632 uint64_t fixed_vmx_misc
= kvm_vmx_misc
&
2633 (MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK
|
2634 MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK
);
2637 * EPT memory types should not change either, so we do not bother
2638 * adding features for them.
2640 uint64_t fixed_vmx_ept_mask
=
2641 (f
[FEAT_VMX_SECONDARY_CTLS
] & VMX_SECONDARY_EXEC_ENABLE_EPT
?
2642 MSR_VMX_EPT_UC
| MSR_VMX_EPT_WB
: 0);
2643 uint64_t fixed_vmx_ept_vpid
= kvm_vmx_ept_vpid
& fixed_vmx_ept_mask
;
2645 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_TRUE_PROCBASED_CTLS
,
2646 make_vmx_msr_value(MSR_IA32_VMX_TRUE_PROCBASED_CTLS
,
2647 f
[FEAT_VMX_PROCBASED_CTLS
]));
2648 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_TRUE_PINBASED_CTLS
,
2649 make_vmx_msr_value(MSR_IA32_VMX_TRUE_PINBASED_CTLS
,
2650 f
[FEAT_VMX_PINBASED_CTLS
]));
2651 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_TRUE_EXIT_CTLS
,
2652 make_vmx_msr_value(MSR_IA32_VMX_TRUE_EXIT_CTLS
,
2653 f
[FEAT_VMX_EXIT_CTLS
]) | fixed_vmx_exit
);
2654 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_TRUE_ENTRY_CTLS
,
2655 make_vmx_msr_value(MSR_IA32_VMX_TRUE_ENTRY_CTLS
,
2656 f
[FEAT_VMX_ENTRY_CTLS
]));
2657 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_PROCBASED_CTLS2
,
2658 make_vmx_msr_value(MSR_IA32_VMX_PROCBASED_CTLS2
,
2659 f
[FEAT_VMX_SECONDARY_CTLS
]));
2660 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_EPT_VPID_CAP
,
2661 f
[FEAT_VMX_EPT_VPID_CAPS
] | fixed_vmx_ept_vpid
);
2662 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_BASIC
,
2663 f
[FEAT_VMX_BASIC
] | fixed_vmx_basic
);
2664 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_MISC
,
2665 f
[FEAT_VMX_MISC
] | fixed_vmx_misc
);
2666 if (has_msr_vmx_vmfunc
) {
2667 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_VMFUNC
, f
[FEAT_VMX_VMFUNC
]);
2671 * Just to be safe, write these with constant values. The CRn_FIXED1
2672 * MSRs are generated by KVM based on the vCPU's CPUID.
2674 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_CR0_FIXED0
,
2675 CR0_PE_MASK
| CR0_PG_MASK
| CR0_NE_MASK
);
2676 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_CR4_FIXED0
,
2678 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_VMCS_ENUM
,
2679 VMCS12_MAX_FIELD_INDEX
<< 1);
2682 static int kvm_buf_set_msrs(X86CPU
*cpu
)
2684 int ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MSRS
, cpu
->kvm_msr_buf
);
2689 if (ret
< cpu
->kvm_msr_buf
->nmsrs
) {
2690 struct kvm_msr_entry
*e
= &cpu
->kvm_msr_buf
->entries
[ret
];
2691 error_report("error: failed to set MSR 0x%" PRIx32
" to 0x%" PRIx64
,
2692 (uint32_t)e
->index
, (uint64_t)e
->data
);
2695 assert(ret
== cpu
->kvm_msr_buf
->nmsrs
);
2699 static void kvm_init_msrs(X86CPU
*cpu
)
2701 CPUX86State
*env
= &cpu
->env
;
2703 kvm_msr_buf_reset(cpu
);
2704 if (has_msr_arch_capabs
) {
2705 kvm_msr_entry_add(cpu
, MSR_IA32_ARCH_CAPABILITIES
,
2706 env
->features
[FEAT_ARCH_CAPABILITIES
]);
2709 if (has_msr_core_capabs
) {
2710 kvm_msr_entry_add(cpu
, MSR_IA32_CORE_CAPABILITY
,
2711 env
->features
[FEAT_CORE_CAPABILITY
]);
2714 if (has_msr_ucode_rev
) {
2715 kvm_msr_entry_add(cpu
, MSR_IA32_UCODE_REV
, cpu
->ucode_rev
);
2719 * Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but
2720 * all kernels with MSR features should have them.
2722 if (kvm_feature_msrs
&& cpu_has_vmx(env
)) {
2723 kvm_msr_entry_add_vmx(cpu
, env
->features
);
2726 assert(kvm_buf_set_msrs(cpu
) == 0);
2729 static int kvm_put_msrs(X86CPU
*cpu
, int level
)
2731 CPUX86State
*env
= &cpu
->env
;
2734 kvm_msr_buf_reset(cpu
);
2736 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_CS
, env
->sysenter_cs
);
2737 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_ESP
, env
->sysenter_esp
);
2738 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_EIP
, env
->sysenter_eip
);
2739 kvm_msr_entry_add(cpu
, MSR_PAT
, env
->pat
);
2741 kvm_msr_entry_add(cpu
, MSR_STAR
, env
->star
);
2743 if (has_msr_hsave_pa
) {
2744 kvm_msr_entry_add(cpu
, MSR_VM_HSAVE_PA
, env
->vm_hsave
);
2746 if (has_msr_tsc_aux
) {
2747 kvm_msr_entry_add(cpu
, MSR_TSC_AUX
, env
->tsc_aux
);
2749 if (has_msr_tsc_adjust
) {
2750 kvm_msr_entry_add(cpu
, MSR_TSC_ADJUST
, env
->tsc_adjust
);
2752 if (has_msr_misc_enable
) {
2753 kvm_msr_entry_add(cpu
, MSR_IA32_MISC_ENABLE
,
2754 env
->msr_ia32_misc_enable
);
2756 if (has_msr_smbase
) {
2757 kvm_msr_entry_add(cpu
, MSR_IA32_SMBASE
, env
->smbase
);
2759 if (has_msr_smi_count
) {
2760 kvm_msr_entry_add(cpu
, MSR_SMI_COUNT
, env
->msr_smi_count
);
2762 if (has_msr_bndcfgs
) {
2763 kvm_msr_entry_add(cpu
, MSR_IA32_BNDCFGS
, env
->msr_bndcfgs
);
2766 kvm_msr_entry_add(cpu
, MSR_IA32_XSS
, env
->xss
);
2768 if (has_msr_umwait
) {
2769 kvm_msr_entry_add(cpu
, MSR_IA32_UMWAIT_CONTROL
, env
->umwait
);
2771 if (has_msr_spec_ctrl
) {
2772 kvm_msr_entry_add(cpu
, MSR_IA32_SPEC_CTRL
, env
->spec_ctrl
);
2774 if (has_msr_tsx_ctrl
) {
2775 kvm_msr_entry_add(cpu
, MSR_IA32_TSX_CTRL
, env
->tsx_ctrl
);
2777 if (has_msr_virt_ssbd
) {
2778 kvm_msr_entry_add(cpu
, MSR_VIRT_SSBD
, env
->virt_ssbd
);
2781 #ifdef TARGET_X86_64
2782 if (lm_capable_kernel
) {
2783 kvm_msr_entry_add(cpu
, MSR_CSTAR
, env
->cstar
);
2784 kvm_msr_entry_add(cpu
, MSR_KERNELGSBASE
, env
->kernelgsbase
);
2785 kvm_msr_entry_add(cpu
, MSR_FMASK
, env
->fmask
);
2786 kvm_msr_entry_add(cpu
, MSR_LSTAR
, env
->lstar
);
2791 * The following MSRs have side effects on the guest or are too heavy
2792 * for normal writeback. Limit them to reset or full state updates.
2794 if (level
>= KVM_PUT_RESET_STATE
) {
2795 kvm_msr_entry_add(cpu
, MSR_IA32_TSC
, env
->tsc
);
2796 kvm_msr_entry_add(cpu
, MSR_KVM_SYSTEM_TIME
, env
->system_time_msr
);
2797 kvm_msr_entry_add(cpu
, MSR_KVM_WALL_CLOCK
, env
->wall_clock_msr
);
2798 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_ASYNC_PF
)) {
2799 kvm_msr_entry_add(cpu
, MSR_KVM_ASYNC_PF_EN
, env
->async_pf_en_msr
);
2801 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_PV_EOI
)) {
2802 kvm_msr_entry_add(cpu
, MSR_KVM_PV_EOI_EN
, env
->pv_eoi_en_msr
);
2804 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_STEAL_TIME
)) {
2805 kvm_msr_entry_add(cpu
, MSR_KVM_STEAL_TIME
, env
->steal_time_msr
);
2808 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_POLL_CONTROL
)) {
2809 kvm_msr_entry_add(cpu
, MSR_KVM_POLL_CONTROL
, env
->poll_control_msr
);
2812 if (has_architectural_pmu_version
> 0) {
2813 if (has_architectural_pmu_version
> 1) {
2814 /* Stop the counter. */
2815 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR_CTRL
, 0);
2816 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_CTRL
, 0);
2819 /* Set the counter values. */
2820 for (i
= 0; i
< num_architectural_pmu_fixed_counters
; i
++) {
2821 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR0
+ i
,
2822 env
->msr_fixed_counters
[i
]);
2824 for (i
= 0; i
< num_architectural_pmu_gp_counters
; i
++) {
2825 kvm_msr_entry_add(cpu
, MSR_P6_PERFCTR0
+ i
,
2826 env
->msr_gp_counters
[i
]);
2827 kvm_msr_entry_add(cpu
, MSR_P6_EVNTSEL0
+ i
,
2828 env
->msr_gp_evtsel
[i
]);
2830 if (has_architectural_pmu_version
> 1) {
2831 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_STATUS
,
2832 env
->msr_global_status
);
2833 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_OVF_CTRL
,
2834 env
->msr_global_ovf_ctrl
);
2836 /* Now start the PMU. */
2837 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR_CTRL
,
2838 env
->msr_fixed_ctr_ctrl
);
2839 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_CTRL
,
2840 env
->msr_global_ctrl
);
2844 * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add,
2845 * only sync them to KVM on the first cpu
2847 if (current_cpu
== first_cpu
) {
2848 if (has_msr_hv_hypercall
) {
2849 kvm_msr_entry_add(cpu
, HV_X64_MSR_GUEST_OS_ID
,
2850 env
->msr_hv_guest_os_id
);
2851 kvm_msr_entry_add(cpu
, HV_X64_MSR_HYPERCALL
,
2852 env
->msr_hv_hypercall
);
2854 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_TIME
)) {
2855 kvm_msr_entry_add(cpu
, HV_X64_MSR_REFERENCE_TSC
,
2858 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_REENLIGHTENMENT
)) {
2859 kvm_msr_entry_add(cpu
, HV_X64_MSR_REENLIGHTENMENT_CONTROL
,
2860 env
->msr_hv_reenlightenment_control
);
2861 kvm_msr_entry_add(cpu
, HV_X64_MSR_TSC_EMULATION_CONTROL
,
2862 env
->msr_hv_tsc_emulation_control
);
2863 kvm_msr_entry_add(cpu
, HV_X64_MSR_TSC_EMULATION_STATUS
,
2864 env
->msr_hv_tsc_emulation_status
);
2867 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_VAPIC
)) {
2868 kvm_msr_entry_add(cpu
, HV_X64_MSR_APIC_ASSIST_PAGE
,
2871 if (has_msr_hv_crash
) {
2874 for (j
= 0; j
< HV_CRASH_PARAMS
; j
++)
2875 kvm_msr_entry_add(cpu
, HV_X64_MSR_CRASH_P0
+ j
,
2876 env
->msr_hv_crash_params
[j
]);
2878 kvm_msr_entry_add(cpu
, HV_X64_MSR_CRASH_CTL
, HV_CRASH_CTL_NOTIFY
);
2880 if (has_msr_hv_runtime
) {
2881 kvm_msr_entry_add(cpu
, HV_X64_MSR_VP_RUNTIME
, env
->msr_hv_runtime
);
2883 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_VPINDEX
)
2884 && hv_vpindex_settable
) {
2885 kvm_msr_entry_add(cpu
, HV_X64_MSR_VP_INDEX
,
2886 hyperv_vp_index(CPU(cpu
)));
2888 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNIC
)) {
2891 kvm_msr_entry_add(cpu
, HV_X64_MSR_SVERSION
, HV_SYNIC_VERSION
);
2893 kvm_msr_entry_add(cpu
, HV_X64_MSR_SCONTROL
,
2894 env
->msr_hv_synic_control
);
2895 kvm_msr_entry_add(cpu
, HV_X64_MSR_SIEFP
,
2896 env
->msr_hv_synic_evt_page
);
2897 kvm_msr_entry_add(cpu
, HV_X64_MSR_SIMP
,
2898 env
->msr_hv_synic_msg_page
);
2900 for (j
= 0; j
< ARRAY_SIZE(env
->msr_hv_synic_sint
); j
++) {
2901 kvm_msr_entry_add(cpu
, HV_X64_MSR_SINT0
+ j
,
2902 env
->msr_hv_synic_sint
[j
]);
2905 if (has_msr_hv_stimer
) {
2908 for (j
= 0; j
< ARRAY_SIZE(env
->msr_hv_stimer_config
); j
++) {
2909 kvm_msr_entry_add(cpu
, HV_X64_MSR_STIMER0_CONFIG
+ j
* 2,
2910 env
->msr_hv_stimer_config
[j
]);
2913 for (j
= 0; j
< ARRAY_SIZE(env
->msr_hv_stimer_count
); j
++) {
2914 kvm_msr_entry_add(cpu
, HV_X64_MSR_STIMER0_COUNT
+ j
* 2,
2915 env
->msr_hv_stimer_count
[j
]);
2918 if (env
->features
[FEAT_1_EDX
] & CPUID_MTRR
) {
2919 uint64_t phys_mask
= MAKE_64BIT_MASK(0, cpu
->phys_bits
);
2921 kvm_msr_entry_add(cpu
, MSR_MTRRdefType
, env
->mtrr_deftype
);
2922 kvm_msr_entry_add(cpu
, MSR_MTRRfix64K_00000
, env
->mtrr_fixed
[0]);
2923 kvm_msr_entry_add(cpu
, MSR_MTRRfix16K_80000
, env
->mtrr_fixed
[1]);
2924 kvm_msr_entry_add(cpu
, MSR_MTRRfix16K_A0000
, env
->mtrr_fixed
[2]);
2925 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_C0000
, env
->mtrr_fixed
[3]);
2926 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_C8000
, env
->mtrr_fixed
[4]);
2927 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_D0000
, env
->mtrr_fixed
[5]);
2928 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_D8000
, env
->mtrr_fixed
[6]);
2929 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_E0000
, env
->mtrr_fixed
[7]);
2930 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_E8000
, env
->mtrr_fixed
[8]);
2931 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_F0000
, env
->mtrr_fixed
[9]);
2932 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_F8000
, env
->mtrr_fixed
[10]);
2933 for (i
= 0; i
< MSR_MTRRcap_VCNT
; i
++) {
2934 /* The CPU GPs if we write to a bit above the physical limit of
2935 * the host CPU (and KVM emulates that)
2937 uint64_t mask
= env
->mtrr_var
[i
].mask
;
2940 kvm_msr_entry_add(cpu
, MSR_MTRRphysBase(i
),
2941 env
->mtrr_var
[i
].base
);
2942 kvm_msr_entry_add(cpu
, MSR_MTRRphysMask(i
), mask
);
2945 if (env
->features
[FEAT_7_0_EBX
] & CPUID_7_0_EBX_INTEL_PT
) {
2946 int addr_num
= kvm_arch_get_supported_cpuid(kvm_state
,
2947 0x14, 1, R_EAX
) & 0x7;
2949 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_CTL
,
2950 env
->msr_rtit_ctrl
);
2951 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_STATUS
,
2952 env
->msr_rtit_status
);
2953 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_OUTPUT_BASE
,
2954 env
->msr_rtit_output_base
);
2955 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_OUTPUT_MASK
,
2956 env
->msr_rtit_output_mask
);
2957 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_CR3_MATCH
,
2958 env
->msr_rtit_cr3_match
);
2959 for (i
= 0; i
< addr_num
; i
++) {
2960 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_ADDR0_A
+ i
,
2961 env
->msr_rtit_addrs
[i
]);
2965 /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
2966 * kvm_put_msr_feature_control. */
2972 kvm_msr_entry_add(cpu
, MSR_MCG_STATUS
, env
->mcg_status
);
2973 kvm_msr_entry_add(cpu
, MSR_MCG_CTL
, env
->mcg_ctl
);
2974 if (has_msr_mcg_ext_ctl
) {
2975 kvm_msr_entry_add(cpu
, MSR_MCG_EXT_CTL
, env
->mcg_ext_ctl
);
2977 for (i
= 0; i
< (env
->mcg_cap
& 0xff) * 4; i
++) {
2978 kvm_msr_entry_add(cpu
, MSR_MC0_CTL
+ i
, env
->mce_banks
[i
]);
2982 return kvm_buf_set_msrs(cpu
);
2986 static int kvm_get_fpu(X86CPU
*cpu
)
2988 CPUX86State
*env
= &cpu
->env
;
2992 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_FPU
, &fpu
);
2997 env
->fpstt
= (fpu
.fsw
>> 11) & 7;
2998 env
->fpus
= fpu
.fsw
;
2999 env
->fpuc
= fpu
.fcw
;
3000 env
->fpop
= fpu
.last_opcode
;
3001 env
->fpip
= fpu
.last_ip
;
3002 env
->fpdp
= fpu
.last_dp
;
3003 for (i
= 0; i
< 8; ++i
) {
3004 env
->fptags
[i
] = !((fpu
.ftwx
>> i
) & 1);
3006 memcpy(env
->fpregs
, fpu
.fpr
, sizeof env
->fpregs
);
3007 for (i
= 0; i
< CPU_NB_REGS
; i
++) {
3008 env
->xmm_regs
[i
].ZMM_Q(0) = ldq_p(&fpu
.xmm
[i
][0]);
3009 env
->xmm_regs
[i
].ZMM_Q(1) = ldq_p(&fpu
.xmm
[i
][8]);
3011 env
->mxcsr
= fpu
.mxcsr
;
3016 static int kvm_get_xsave(X86CPU
*cpu
)
3018 CPUX86State
*env
= &cpu
->env
;
3019 X86XSaveArea
*xsave
= env
->xsave_buf
;
3023 return kvm_get_fpu(cpu
);
3026 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_XSAVE
, xsave
);
3030 x86_cpu_xrstor_all_areas(cpu
, xsave
);
3035 static int kvm_get_xcrs(X86CPU
*cpu
)
3037 CPUX86State
*env
= &cpu
->env
;
3039 struct kvm_xcrs xcrs
;
3045 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_XCRS
, &xcrs
);
3050 for (i
= 0; i
< xcrs
.nr_xcrs
; i
++) {
3051 /* Only support xcr0 now */
3052 if (xcrs
.xcrs
[i
].xcr
== 0) {
3053 env
->xcr0
= xcrs
.xcrs
[i
].value
;
3060 static int kvm_get_sregs(X86CPU
*cpu
)
3062 CPUX86State
*env
= &cpu
->env
;
3063 struct kvm_sregs sregs
;
3066 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_SREGS
, &sregs
);
3071 /* There can only be one pending IRQ set in the bitmap at a time, so try
3072 to find it and save its number instead (-1 for none). */
3073 env
->interrupt_injected
= -1;
3074 for (i
= 0; i
< ARRAY_SIZE(sregs
.interrupt_bitmap
); i
++) {
3075 if (sregs
.interrupt_bitmap
[i
]) {
3076 bit
= ctz64(sregs
.interrupt_bitmap
[i
]);
3077 env
->interrupt_injected
= i
* 64 + bit
;
3082 get_seg(&env
->segs
[R_CS
], &sregs
.cs
);
3083 get_seg(&env
->segs
[R_DS
], &sregs
.ds
);
3084 get_seg(&env
->segs
[R_ES
], &sregs
.es
);
3085 get_seg(&env
->segs
[R_FS
], &sregs
.fs
);
3086 get_seg(&env
->segs
[R_GS
], &sregs
.gs
);
3087 get_seg(&env
->segs
[R_SS
], &sregs
.ss
);
3089 get_seg(&env
->tr
, &sregs
.tr
);
3090 get_seg(&env
->ldt
, &sregs
.ldt
);
3092 env
->idt
.limit
= sregs
.idt
.limit
;
3093 env
->idt
.base
= sregs
.idt
.base
;
3094 env
->gdt
.limit
= sregs
.gdt
.limit
;
3095 env
->gdt
.base
= sregs
.gdt
.base
;
3097 env
->cr
[0] = sregs
.cr0
;
3098 env
->cr
[2] = sregs
.cr2
;
3099 env
->cr
[3] = sregs
.cr3
;
3100 env
->cr
[4] = sregs
.cr4
;
3102 env
->efer
= sregs
.efer
;
3104 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
3105 x86_update_hflags(env
);
3110 static int kvm_get_msrs(X86CPU
*cpu
)
3112 CPUX86State
*env
= &cpu
->env
;
3113 struct kvm_msr_entry
*msrs
= cpu
->kvm_msr_buf
->entries
;
3115 uint64_t mtrr_top_bits
;
3117 kvm_msr_buf_reset(cpu
);
3119 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_CS
, 0);
3120 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_ESP
, 0);
3121 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_EIP
, 0);
3122 kvm_msr_entry_add(cpu
, MSR_PAT
, 0);
3124 kvm_msr_entry_add(cpu
, MSR_STAR
, 0);
3126 if (has_msr_hsave_pa
) {
3127 kvm_msr_entry_add(cpu
, MSR_VM_HSAVE_PA
, 0);
3129 if (has_msr_tsc_aux
) {
3130 kvm_msr_entry_add(cpu
, MSR_TSC_AUX
, 0);
3132 if (has_msr_tsc_adjust
) {
3133 kvm_msr_entry_add(cpu
, MSR_TSC_ADJUST
, 0);
3135 if (has_msr_tsc_deadline
) {
3136 kvm_msr_entry_add(cpu
, MSR_IA32_TSCDEADLINE
, 0);
3138 if (has_msr_misc_enable
) {
3139 kvm_msr_entry_add(cpu
, MSR_IA32_MISC_ENABLE
, 0);
3141 if (has_msr_smbase
) {
3142 kvm_msr_entry_add(cpu
, MSR_IA32_SMBASE
, 0);
3144 if (has_msr_smi_count
) {
3145 kvm_msr_entry_add(cpu
, MSR_SMI_COUNT
, 0);
3147 if (has_msr_feature_control
) {
3148 kvm_msr_entry_add(cpu
, MSR_IA32_FEATURE_CONTROL
, 0);
3150 if (has_msr_bndcfgs
) {
3151 kvm_msr_entry_add(cpu
, MSR_IA32_BNDCFGS
, 0);
3154 kvm_msr_entry_add(cpu
, MSR_IA32_XSS
, 0);
3156 if (has_msr_umwait
) {
3157 kvm_msr_entry_add(cpu
, MSR_IA32_UMWAIT_CONTROL
, 0);
3159 if (has_msr_spec_ctrl
) {
3160 kvm_msr_entry_add(cpu
, MSR_IA32_SPEC_CTRL
, 0);
3162 if (has_msr_tsx_ctrl
) {
3163 kvm_msr_entry_add(cpu
, MSR_IA32_TSX_CTRL
, 0);
3165 if (has_msr_virt_ssbd
) {
3166 kvm_msr_entry_add(cpu
, MSR_VIRT_SSBD
, 0);
3168 if (!env
->tsc_valid
) {
3169 kvm_msr_entry_add(cpu
, MSR_IA32_TSC
, 0);
3170 env
->tsc_valid
= !runstate_is_running();
3173 #ifdef TARGET_X86_64
3174 if (lm_capable_kernel
) {
3175 kvm_msr_entry_add(cpu
, MSR_CSTAR
, 0);
3176 kvm_msr_entry_add(cpu
, MSR_KERNELGSBASE
, 0);
3177 kvm_msr_entry_add(cpu
, MSR_FMASK
, 0);
3178 kvm_msr_entry_add(cpu
, MSR_LSTAR
, 0);
3181 kvm_msr_entry_add(cpu
, MSR_KVM_SYSTEM_TIME
, 0);
3182 kvm_msr_entry_add(cpu
, MSR_KVM_WALL_CLOCK
, 0);
3183 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_ASYNC_PF
)) {
3184 kvm_msr_entry_add(cpu
, MSR_KVM_ASYNC_PF_EN
, 0);
3186 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_PV_EOI
)) {
3187 kvm_msr_entry_add(cpu
, MSR_KVM_PV_EOI_EN
, 0);
3189 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_STEAL_TIME
)) {
3190 kvm_msr_entry_add(cpu
, MSR_KVM_STEAL_TIME
, 0);
3192 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_POLL_CONTROL
)) {
3193 kvm_msr_entry_add(cpu
, MSR_KVM_POLL_CONTROL
, 1);
3195 if (has_architectural_pmu_version
> 0) {
3196 if (has_architectural_pmu_version
> 1) {
3197 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR_CTRL
, 0);
3198 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_CTRL
, 0);
3199 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_STATUS
, 0);
3200 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_OVF_CTRL
, 0);
3202 for (i
= 0; i
< num_architectural_pmu_fixed_counters
; i
++) {
3203 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR0
+ i
, 0);
3205 for (i
= 0; i
< num_architectural_pmu_gp_counters
; i
++) {
3206 kvm_msr_entry_add(cpu
, MSR_P6_PERFCTR0
+ i
, 0);
3207 kvm_msr_entry_add(cpu
, MSR_P6_EVNTSEL0
+ i
, 0);
3212 kvm_msr_entry_add(cpu
, MSR_MCG_STATUS
, 0);
3213 kvm_msr_entry_add(cpu
, MSR_MCG_CTL
, 0);
3214 if (has_msr_mcg_ext_ctl
) {
3215 kvm_msr_entry_add(cpu
, MSR_MCG_EXT_CTL
, 0);
3217 for (i
= 0; i
< (env
->mcg_cap
& 0xff) * 4; i
++) {
3218 kvm_msr_entry_add(cpu
, MSR_MC0_CTL
+ i
, 0);
3222 if (has_msr_hv_hypercall
) {
3223 kvm_msr_entry_add(cpu
, HV_X64_MSR_HYPERCALL
, 0);
3224 kvm_msr_entry_add(cpu
, HV_X64_MSR_GUEST_OS_ID
, 0);
3226 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_VAPIC
)) {
3227 kvm_msr_entry_add(cpu
, HV_X64_MSR_APIC_ASSIST_PAGE
, 0);
3229 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_TIME
)) {
3230 kvm_msr_entry_add(cpu
, HV_X64_MSR_REFERENCE_TSC
, 0);
3232 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_REENLIGHTENMENT
)) {
3233 kvm_msr_entry_add(cpu
, HV_X64_MSR_REENLIGHTENMENT_CONTROL
, 0);
3234 kvm_msr_entry_add(cpu
, HV_X64_MSR_TSC_EMULATION_CONTROL
, 0);
3235 kvm_msr_entry_add(cpu
, HV_X64_MSR_TSC_EMULATION_STATUS
, 0);
3237 if (has_msr_hv_crash
) {
3240 for (j
= 0; j
< HV_CRASH_PARAMS
; j
++) {
3241 kvm_msr_entry_add(cpu
, HV_X64_MSR_CRASH_P0
+ j
, 0);
3244 if (has_msr_hv_runtime
) {
3245 kvm_msr_entry_add(cpu
, HV_X64_MSR_VP_RUNTIME
, 0);
3247 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNIC
)) {
3250 kvm_msr_entry_add(cpu
, HV_X64_MSR_SCONTROL
, 0);
3251 kvm_msr_entry_add(cpu
, HV_X64_MSR_SIEFP
, 0);
3252 kvm_msr_entry_add(cpu
, HV_X64_MSR_SIMP
, 0);
3253 for (msr
= HV_X64_MSR_SINT0
; msr
<= HV_X64_MSR_SINT15
; msr
++) {
3254 kvm_msr_entry_add(cpu
, msr
, 0);
3257 if (has_msr_hv_stimer
) {
3260 for (msr
= HV_X64_MSR_STIMER0_CONFIG
; msr
<= HV_X64_MSR_STIMER3_COUNT
;
3262 kvm_msr_entry_add(cpu
, msr
, 0);
3265 if (env
->features
[FEAT_1_EDX
] & CPUID_MTRR
) {
3266 kvm_msr_entry_add(cpu
, MSR_MTRRdefType
, 0);
3267 kvm_msr_entry_add(cpu
, MSR_MTRRfix64K_00000
, 0);
3268 kvm_msr_entry_add(cpu
, MSR_MTRRfix16K_80000
, 0);
3269 kvm_msr_entry_add(cpu
, MSR_MTRRfix16K_A0000
, 0);
3270 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_C0000
, 0);
3271 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_C8000
, 0);
3272 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_D0000
, 0);
3273 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_D8000
, 0);
3274 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_E0000
, 0);
3275 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_E8000
, 0);
3276 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_F0000
, 0);
3277 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_F8000
, 0);
3278 for (i
= 0; i
< MSR_MTRRcap_VCNT
; i
++) {
3279 kvm_msr_entry_add(cpu
, MSR_MTRRphysBase(i
), 0);
3280 kvm_msr_entry_add(cpu
, MSR_MTRRphysMask(i
), 0);
3284 if (env
->features
[FEAT_7_0_EBX
] & CPUID_7_0_EBX_INTEL_PT
) {
3286 kvm_arch_get_supported_cpuid(kvm_state
, 0x14, 1, R_EAX
) & 0x7;
3288 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_CTL
, 0);
3289 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_STATUS
, 0);
3290 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_OUTPUT_BASE
, 0);
3291 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_OUTPUT_MASK
, 0);
3292 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_CR3_MATCH
, 0);
3293 for (i
= 0; i
< addr_num
; i
++) {
3294 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_ADDR0_A
+ i
, 0);
3298 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_MSRS
, cpu
->kvm_msr_buf
);
3303 if (ret
< cpu
->kvm_msr_buf
->nmsrs
) {
3304 struct kvm_msr_entry
*e
= &cpu
->kvm_msr_buf
->entries
[ret
];
3305 error_report("error: failed to get MSR 0x%" PRIx32
,
3306 (uint32_t)e
->index
);
3309 assert(ret
== cpu
->kvm_msr_buf
->nmsrs
);
3311 * MTRR masks: Each mask consists of 5 parts
3312 * a 10..0: must be zero
3314 * c n-1.12: actual mask bits
3315 * d 51..n: reserved must be zero
3316 * e 63.52: reserved must be zero
3318 * 'n' is the number of physical bits supported by the CPU and is
3319 * apparently always <= 52. We know our 'n' but don't know what
3320 * the destinations 'n' is; it might be smaller, in which case
3321 * it masks (c) on loading. It might be larger, in which case
3322 * we fill 'd' so that d..c is consistent irrespetive of the 'n'
3323 * we're migrating to.
3326 if (cpu
->fill_mtrr_mask
) {
3327 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS
> 52);
3328 assert(cpu
->phys_bits
<= TARGET_PHYS_ADDR_SPACE_BITS
);
3329 mtrr_top_bits
= MAKE_64BIT_MASK(cpu
->phys_bits
, 52 - cpu
->phys_bits
);
3334 for (i
= 0; i
< ret
; i
++) {
3335 uint32_t index
= msrs
[i
].index
;
3337 case MSR_IA32_SYSENTER_CS
:
3338 env
->sysenter_cs
= msrs
[i
].data
;
3340 case MSR_IA32_SYSENTER_ESP
:
3341 env
->sysenter_esp
= msrs
[i
].data
;
3343 case MSR_IA32_SYSENTER_EIP
:
3344 env
->sysenter_eip
= msrs
[i
].data
;
3347 env
->pat
= msrs
[i
].data
;
3350 env
->star
= msrs
[i
].data
;
3352 #ifdef TARGET_X86_64
3354 env
->cstar
= msrs
[i
].data
;
3356 case MSR_KERNELGSBASE
:
3357 env
->kernelgsbase
= msrs
[i
].data
;
3360 env
->fmask
= msrs
[i
].data
;
3363 env
->lstar
= msrs
[i
].data
;
3367 env
->tsc
= msrs
[i
].data
;
3370 env
->tsc_aux
= msrs
[i
].data
;
3372 case MSR_TSC_ADJUST
:
3373 env
->tsc_adjust
= msrs
[i
].data
;
3375 case MSR_IA32_TSCDEADLINE
:
3376 env
->tsc_deadline
= msrs
[i
].data
;
3378 case MSR_VM_HSAVE_PA
:
3379 env
->vm_hsave
= msrs
[i
].data
;
3381 case MSR_KVM_SYSTEM_TIME
:
3382 env
->system_time_msr
= msrs
[i
].data
;
3384 case MSR_KVM_WALL_CLOCK
:
3385 env
->wall_clock_msr
= msrs
[i
].data
;
3387 case MSR_MCG_STATUS
:
3388 env
->mcg_status
= msrs
[i
].data
;
3391 env
->mcg_ctl
= msrs
[i
].data
;
3393 case MSR_MCG_EXT_CTL
:
3394 env
->mcg_ext_ctl
= msrs
[i
].data
;
3396 case MSR_IA32_MISC_ENABLE
:
3397 env
->msr_ia32_misc_enable
= msrs
[i
].data
;
3399 case MSR_IA32_SMBASE
:
3400 env
->smbase
= msrs
[i
].data
;
3403 env
->msr_smi_count
= msrs
[i
].data
;
3405 case MSR_IA32_FEATURE_CONTROL
:
3406 env
->msr_ia32_feature_control
= msrs
[i
].data
;
3408 case MSR_IA32_BNDCFGS
:
3409 env
->msr_bndcfgs
= msrs
[i
].data
;
3412 env
->xss
= msrs
[i
].data
;
3414 case MSR_IA32_UMWAIT_CONTROL
:
3415 env
->umwait
= msrs
[i
].data
;
3418 if (msrs
[i
].index
>= MSR_MC0_CTL
&&
3419 msrs
[i
].index
< MSR_MC0_CTL
+ (env
->mcg_cap
& 0xff) * 4) {
3420 env
->mce_banks
[msrs
[i
].index
- MSR_MC0_CTL
] = msrs
[i
].data
;
3423 case MSR_KVM_ASYNC_PF_EN
:
3424 env
->async_pf_en_msr
= msrs
[i
].data
;
3426 case MSR_KVM_PV_EOI_EN
:
3427 env
->pv_eoi_en_msr
= msrs
[i
].data
;
3429 case MSR_KVM_STEAL_TIME
:
3430 env
->steal_time_msr
= msrs
[i
].data
;
3432 case MSR_KVM_POLL_CONTROL
: {
3433 env
->poll_control_msr
= msrs
[i
].data
;
3436 case MSR_CORE_PERF_FIXED_CTR_CTRL
:
3437 env
->msr_fixed_ctr_ctrl
= msrs
[i
].data
;
3439 case MSR_CORE_PERF_GLOBAL_CTRL
:
3440 env
->msr_global_ctrl
= msrs
[i
].data
;
3442 case MSR_CORE_PERF_GLOBAL_STATUS
:
3443 env
->msr_global_status
= msrs
[i
].data
;
3445 case MSR_CORE_PERF_GLOBAL_OVF_CTRL
:
3446 env
->msr_global_ovf_ctrl
= msrs
[i
].data
;
3448 case MSR_CORE_PERF_FIXED_CTR0
... MSR_CORE_PERF_FIXED_CTR0
+ MAX_FIXED_COUNTERS
- 1:
3449 env
->msr_fixed_counters
[index
- MSR_CORE_PERF_FIXED_CTR0
] = msrs
[i
].data
;
3451 case MSR_P6_PERFCTR0
... MSR_P6_PERFCTR0
+ MAX_GP_COUNTERS
- 1:
3452 env
->msr_gp_counters
[index
- MSR_P6_PERFCTR0
] = msrs
[i
].data
;
3454 case MSR_P6_EVNTSEL0
... MSR_P6_EVNTSEL0
+ MAX_GP_COUNTERS
- 1:
3455 env
->msr_gp_evtsel
[index
- MSR_P6_EVNTSEL0
] = msrs
[i
].data
;
3457 case HV_X64_MSR_HYPERCALL
:
3458 env
->msr_hv_hypercall
= msrs
[i
].data
;
3460 case HV_X64_MSR_GUEST_OS_ID
:
3461 env
->msr_hv_guest_os_id
= msrs
[i
].data
;
3463 case HV_X64_MSR_APIC_ASSIST_PAGE
:
3464 env
->msr_hv_vapic
= msrs
[i
].data
;
3466 case HV_X64_MSR_REFERENCE_TSC
:
3467 env
->msr_hv_tsc
= msrs
[i
].data
;
3469 case HV_X64_MSR_CRASH_P0
... HV_X64_MSR_CRASH_P4
:
3470 env
->msr_hv_crash_params
[index
- HV_X64_MSR_CRASH_P0
] = msrs
[i
].data
;
3472 case HV_X64_MSR_VP_RUNTIME
:
3473 env
->msr_hv_runtime
= msrs
[i
].data
;
3475 case HV_X64_MSR_SCONTROL
:
3476 env
->msr_hv_synic_control
= msrs
[i
].data
;
3478 case HV_X64_MSR_SIEFP
:
3479 env
->msr_hv_synic_evt_page
= msrs
[i
].data
;
3481 case HV_X64_MSR_SIMP
:
3482 env
->msr_hv_synic_msg_page
= msrs
[i
].data
;
3484 case HV_X64_MSR_SINT0
... HV_X64_MSR_SINT15
:
3485 env
->msr_hv_synic_sint
[index
- HV_X64_MSR_SINT0
] = msrs
[i
].data
;
3487 case HV_X64_MSR_STIMER0_CONFIG
:
3488 case HV_X64_MSR_STIMER1_CONFIG
:
3489 case HV_X64_MSR_STIMER2_CONFIG
:
3490 case HV_X64_MSR_STIMER3_CONFIG
:
3491 env
->msr_hv_stimer_config
[(index
- HV_X64_MSR_STIMER0_CONFIG
)/2] =
3494 case HV_X64_MSR_STIMER0_COUNT
:
3495 case HV_X64_MSR_STIMER1_COUNT
:
3496 case HV_X64_MSR_STIMER2_COUNT
:
3497 case HV_X64_MSR_STIMER3_COUNT
:
3498 env
->msr_hv_stimer_count
[(index
- HV_X64_MSR_STIMER0_COUNT
)/2] =
3501 case HV_X64_MSR_REENLIGHTENMENT_CONTROL
:
3502 env
->msr_hv_reenlightenment_control
= msrs
[i
].data
;
3504 case HV_X64_MSR_TSC_EMULATION_CONTROL
:
3505 env
->msr_hv_tsc_emulation_control
= msrs
[i
].data
;
3507 case HV_X64_MSR_TSC_EMULATION_STATUS
:
3508 env
->msr_hv_tsc_emulation_status
= msrs
[i
].data
;
3510 case MSR_MTRRdefType
:
3511 env
->mtrr_deftype
= msrs
[i
].data
;
3513 case MSR_MTRRfix64K_00000
:
3514 env
->mtrr_fixed
[0] = msrs
[i
].data
;
3516 case MSR_MTRRfix16K_80000
:
3517 env
->mtrr_fixed
[1] = msrs
[i
].data
;
3519 case MSR_MTRRfix16K_A0000
:
3520 env
->mtrr_fixed
[2] = msrs
[i
].data
;
3522 case MSR_MTRRfix4K_C0000
:
3523 env
->mtrr_fixed
[3] = msrs
[i
].data
;
3525 case MSR_MTRRfix4K_C8000
:
3526 env
->mtrr_fixed
[4] = msrs
[i
].data
;
3528 case MSR_MTRRfix4K_D0000
:
3529 env
->mtrr_fixed
[5] = msrs
[i
].data
;
3531 case MSR_MTRRfix4K_D8000
:
3532 env
->mtrr_fixed
[6] = msrs
[i
].data
;
3534 case MSR_MTRRfix4K_E0000
:
3535 env
->mtrr_fixed
[7] = msrs
[i
].data
;
3537 case MSR_MTRRfix4K_E8000
:
3538 env
->mtrr_fixed
[8] = msrs
[i
].data
;
3540 case MSR_MTRRfix4K_F0000
:
3541 env
->mtrr_fixed
[9] = msrs
[i
].data
;
3543 case MSR_MTRRfix4K_F8000
:
3544 env
->mtrr_fixed
[10] = msrs
[i
].data
;
3546 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT
- 1):
3548 env
->mtrr_var
[MSR_MTRRphysIndex(index
)].mask
= msrs
[i
].data
|
3551 env
->mtrr_var
[MSR_MTRRphysIndex(index
)].base
= msrs
[i
].data
;
3554 case MSR_IA32_SPEC_CTRL
:
3555 env
->spec_ctrl
= msrs
[i
].data
;
3557 case MSR_IA32_TSX_CTRL
:
3558 env
->tsx_ctrl
= msrs
[i
].data
;
3561 env
->virt_ssbd
= msrs
[i
].data
;
3563 case MSR_IA32_RTIT_CTL
:
3564 env
->msr_rtit_ctrl
= msrs
[i
].data
;
3566 case MSR_IA32_RTIT_STATUS
:
3567 env
->msr_rtit_status
= msrs
[i
].data
;
3569 case MSR_IA32_RTIT_OUTPUT_BASE
:
3570 env
->msr_rtit_output_base
= msrs
[i
].data
;
3572 case MSR_IA32_RTIT_OUTPUT_MASK
:
3573 env
->msr_rtit_output_mask
= msrs
[i
].data
;
3575 case MSR_IA32_RTIT_CR3_MATCH
:
3576 env
->msr_rtit_cr3_match
= msrs
[i
].data
;
3578 case MSR_IA32_RTIT_ADDR0_A
... MSR_IA32_RTIT_ADDR3_B
:
3579 env
->msr_rtit_addrs
[index
- MSR_IA32_RTIT_ADDR0_A
] = msrs
[i
].data
;
3587 static int kvm_put_mp_state(X86CPU
*cpu
)
3589 struct kvm_mp_state mp_state
= { .mp_state
= cpu
->env
.mp_state
};
3591 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MP_STATE
, &mp_state
);
3594 static int kvm_get_mp_state(X86CPU
*cpu
)
3596 CPUState
*cs
= CPU(cpu
);
3597 CPUX86State
*env
= &cpu
->env
;
3598 struct kvm_mp_state mp_state
;
3601 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_MP_STATE
, &mp_state
);
3605 env
->mp_state
= mp_state
.mp_state
;
3606 if (kvm_irqchip_in_kernel()) {
3607 cs
->halted
= (mp_state
.mp_state
== KVM_MP_STATE_HALTED
);
3612 static int kvm_get_apic(X86CPU
*cpu
)
3614 DeviceState
*apic
= cpu
->apic_state
;
3615 struct kvm_lapic_state kapic
;
3618 if (apic
&& kvm_irqchip_in_kernel()) {
3619 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_LAPIC
, &kapic
);
3624 kvm_get_apic_state(apic
, &kapic
);
3629 static int kvm_put_vcpu_events(X86CPU
*cpu
, int level
)
3631 CPUState
*cs
= CPU(cpu
);
3632 CPUX86State
*env
= &cpu
->env
;
3633 struct kvm_vcpu_events events
= {};
3635 if (!kvm_has_vcpu_events()) {
3641 if (has_exception_payload
) {
3642 events
.flags
|= KVM_VCPUEVENT_VALID_PAYLOAD
;
3643 events
.exception
.pending
= env
->exception_pending
;
3644 events
.exception_has_payload
= env
->exception_has_payload
;
3645 events
.exception_payload
= env
->exception_payload
;
3647 events
.exception
.nr
= env
->exception_nr
;
3648 events
.exception
.injected
= env
->exception_injected
;
3649 events
.exception
.has_error_code
= env
->has_error_code
;
3650 events
.exception
.error_code
= env
->error_code
;
3652 events
.interrupt
.injected
= (env
->interrupt_injected
>= 0);
3653 events
.interrupt
.nr
= env
->interrupt_injected
;
3654 events
.interrupt
.soft
= env
->soft_interrupt
;
3656 events
.nmi
.injected
= env
->nmi_injected
;
3657 events
.nmi
.pending
= env
->nmi_pending
;
3658 events
.nmi
.masked
= !!(env
->hflags2
& HF2_NMI_MASK
);
3660 events
.sipi_vector
= env
->sipi_vector
;
3662 if (has_msr_smbase
) {
3663 events
.smi
.smm
= !!(env
->hflags
& HF_SMM_MASK
);
3664 events
.smi
.smm_inside_nmi
= !!(env
->hflags2
& HF2_SMM_INSIDE_NMI_MASK
);
3665 if (kvm_irqchip_in_kernel()) {
3666 /* As soon as these are moved to the kernel, remove them
3667 * from cs->interrupt_request.
3669 events
.smi
.pending
= cs
->interrupt_request
& CPU_INTERRUPT_SMI
;
3670 events
.smi
.latched_init
= cs
->interrupt_request
& CPU_INTERRUPT_INIT
;
3671 cs
->interrupt_request
&= ~(CPU_INTERRUPT_INIT
| CPU_INTERRUPT_SMI
);
3673 /* Keep these in cs->interrupt_request. */
3674 events
.smi
.pending
= 0;
3675 events
.smi
.latched_init
= 0;
3677 /* Stop SMI delivery on old machine types to avoid a reboot
3678 * on an inward migration of an old VM.
3680 if (!cpu
->kvm_no_smi_migration
) {
3681 events
.flags
|= KVM_VCPUEVENT_VALID_SMM
;
3685 if (level
>= KVM_PUT_RESET_STATE
) {
3686 events
.flags
|= KVM_VCPUEVENT_VALID_NMI_PENDING
;
3687 if (env
->mp_state
== KVM_MP_STATE_SIPI_RECEIVED
) {
3688 events
.flags
|= KVM_VCPUEVENT_VALID_SIPI_VECTOR
;
3692 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_VCPU_EVENTS
, &events
);
3695 static int kvm_get_vcpu_events(X86CPU
*cpu
)
3697 CPUX86State
*env
= &cpu
->env
;
3698 struct kvm_vcpu_events events
;
3701 if (!kvm_has_vcpu_events()) {
3705 memset(&events
, 0, sizeof(events
));
3706 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_VCPU_EVENTS
, &events
);
3711 if (events
.flags
& KVM_VCPUEVENT_VALID_PAYLOAD
) {
3712 env
->exception_pending
= events
.exception
.pending
;
3713 env
->exception_has_payload
= events
.exception_has_payload
;
3714 env
->exception_payload
= events
.exception_payload
;
3716 env
->exception_pending
= 0;
3717 env
->exception_has_payload
= false;
3719 env
->exception_injected
= events
.exception
.injected
;
3721 (env
->exception_pending
|| env
->exception_injected
) ?
3722 events
.exception
.nr
: -1;
3723 env
->has_error_code
= events
.exception
.has_error_code
;
3724 env
->error_code
= events
.exception
.error_code
;
3726 env
->interrupt_injected
=
3727 events
.interrupt
.injected
? events
.interrupt
.nr
: -1;
3728 env
->soft_interrupt
= events
.interrupt
.soft
;
3730 env
->nmi_injected
= events
.nmi
.injected
;
3731 env
->nmi_pending
= events
.nmi
.pending
;
3732 if (events
.nmi
.masked
) {
3733 env
->hflags2
|= HF2_NMI_MASK
;
3735 env
->hflags2
&= ~HF2_NMI_MASK
;
3738 if (events
.flags
& KVM_VCPUEVENT_VALID_SMM
) {
3739 if (events
.smi
.smm
) {
3740 env
->hflags
|= HF_SMM_MASK
;
3742 env
->hflags
&= ~HF_SMM_MASK
;
3744 if (events
.smi
.pending
) {
3745 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_SMI
);
3747 cpu_reset_interrupt(CPU(cpu
), CPU_INTERRUPT_SMI
);
3749 if (events
.smi
.smm_inside_nmi
) {
3750 env
->hflags2
|= HF2_SMM_INSIDE_NMI_MASK
;
3752 env
->hflags2
&= ~HF2_SMM_INSIDE_NMI_MASK
;
3754 if (events
.smi
.latched_init
) {
3755 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_INIT
);
3757 cpu_reset_interrupt(CPU(cpu
), CPU_INTERRUPT_INIT
);
3761 env
->sipi_vector
= events
.sipi_vector
;
3766 static int kvm_guest_debug_workarounds(X86CPU
*cpu
)
3768 CPUState
*cs
= CPU(cpu
);
3769 CPUX86State
*env
= &cpu
->env
;
3771 unsigned long reinject_trap
= 0;
3773 if (!kvm_has_vcpu_events()) {
3774 if (env
->exception_nr
== EXCP01_DB
) {
3775 reinject_trap
= KVM_GUESTDBG_INJECT_DB
;
3776 } else if (env
->exception_injected
== EXCP03_INT3
) {
3777 reinject_trap
= KVM_GUESTDBG_INJECT_BP
;
3779 kvm_reset_exception(env
);
3783 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
3784 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
3785 * by updating the debug state once again if single-stepping is on.
3786 * Another reason to call kvm_update_guest_debug here is a pending debug
3787 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
3788 * reinject them via SET_GUEST_DEBUG.
3790 if (reinject_trap
||
3791 (!kvm_has_robust_singlestep() && cs
->singlestep_enabled
)) {
3792 ret
= kvm_update_guest_debug(cs
, reinject_trap
);
3797 static int kvm_put_debugregs(X86CPU
*cpu
)
3799 CPUX86State
*env
= &cpu
->env
;
3800 struct kvm_debugregs dbgregs
;
3803 if (!kvm_has_debugregs()) {
3807 memset(&dbgregs
, 0, sizeof(dbgregs
));
3808 for (i
= 0; i
< 4; i
++) {
3809 dbgregs
.db
[i
] = env
->dr
[i
];
3811 dbgregs
.dr6
= env
->dr
[6];
3812 dbgregs
.dr7
= env
->dr
[7];
3815 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_DEBUGREGS
, &dbgregs
);
3818 static int kvm_get_debugregs(X86CPU
*cpu
)
3820 CPUX86State
*env
= &cpu
->env
;
3821 struct kvm_debugregs dbgregs
;
3824 if (!kvm_has_debugregs()) {
3828 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_DEBUGREGS
, &dbgregs
);
3832 for (i
= 0; i
< 4; i
++) {
3833 env
->dr
[i
] = dbgregs
.db
[i
];
3835 env
->dr
[4] = env
->dr
[6] = dbgregs
.dr6
;
3836 env
->dr
[5] = env
->dr
[7] = dbgregs
.dr7
;
3841 static int kvm_put_nested_state(X86CPU
*cpu
)
3843 CPUX86State
*env
= &cpu
->env
;
3844 int max_nested_state_len
= kvm_max_nested_state_length();
3846 if (!env
->nested_state
) {
3850 assert(env
->nested_state
->size
<= max_nested_state_len
);
3851 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_NESTED_STATE
, env
->nested_state
);
3854 static int kvm_get_nested_state(X86CPU
*cpu
)
3856 CPUX86State
*env
= &cpu
->env
;
3857 int max_nested_state_len
= kvm_max_nested_state_length();
3860 if (!env
->nested_state
) {
3865 * It is possible that migration restored a smaller size into
3866 * nested_state->hdr.size than what our kernel support.
3867 * We preserve migration origin nested_state->hdr.size for
3868 * call to KVM_SET_NESTED_STATE but wish that our next call
3869 * to KVM_GET_NESTED_STATE will use max size our kernel support.
3871 env
->nested_state
->size
= max_nested_state_len
;
3873 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_NESTED_STATE
, env
->nested_state
);
3878 if (env
->nested_state
->flags
& KVM_STATE_NESTED_GUEST_MODE
) {
3879 env
->hflags
|= HF_GUEST_MASK
;
3881 env
->hflags
&= ~HF_GUEST_MASK
;
3887 int kvm_arch_put_registers(CPUState
*cpu
, int level
)
3889 X86CPU
*x86_cpu
= X86_CPU(cpu
);
3892 assert(cpu_is_stopped(cpu
) || qemu_cpu_is_self(cpu
));
3894 if (level
>= KVM_PUT_RESET_STATE
) {
3895 ret
= kvm_put_nested_state(x86_cpu
);
3900 ret
= kvm_put_msr_feature_control(x86_cpu
);
3906 if (level
== KVM_PUT_FULL_STATE
) {
3907 /* We don't check for kvm_arch_set_tsc_khz() errors here,
3908 * because TSC frequency mismatch shouldn't abort migration,
3909 * unless the user explicitly asked for a more strict TSC
3910 * setting (e.g. using an explicit "tsc-freq" option).
3912 kvm_arch_set_tsc_khz(cpu
);
3915 ret
= kvm_getput_regs(x86_cpu
, 1);
3919 ret
= kvm_put_xsave(x86_cpu
);
3923 ret
= kvm_put_xcrs(x86_cpu
);
3927 ret
= kvm_put_sregs(x86_cpu
);
3931 /* must be before kvm_put_msrs */
3932 ret
= kvm_inject_mce_oldstyle(x86_cpu
);
3936 ret
= kvm_put_msrs(x86_cpu
, level
);
3940 ret
= kvm_put_vcpu_events(x86_cpu
, level
);
3944 if (level
>= KVM_PUT_RESET_STATE
) {
3945 ret
= kvm_put_mp_state(x86_cpu
);
3951 ret
= kvm_put_tscdeadline_msr(x86_cpu
);
3955 ret
= kvm_put_debugregs(x86_cpu
);
3960 ret
= kvm_guest_debug_workarounds(x86_cpu
);
3967 int kvm_arch_get_registers(CPUState
*cs
)
3969 X86CPU
*cpu
= X86_CPU(cs
);
3972 assert(cpu_is_stopped(cs
) || qemu_cpu_is_self(cs
));
3974 ret
= kvm_get_vcpu_events(cpu
);
3979 * KVM_GET_MPSTATE can modify CS and RIP, call it before
3980 * KVM_GET_REGS and KVM_GET_SREGS.
3982 ret
= kvm_get_mp_state(cpu
);
3986 ret
= kvm_getput_regs(cpu
, 0);
3990 ret
= kvm_get_xsave(cpu
);
3994 ret
= kvm_get_xcrs(cpu
);
3998 ret
= kvm_get_sregs(cpu
);
4002 ret
= kvm_get_msrs(cpu
);
4006 ret
= kvm_get_apic(cpu
);
4010 ret
= kvm_get_debugregs(cpu
);
4014 ret
= kvm_get_nested_state(cpu
);
4020 cpu_sync_bndcs_hflags(&cpu
->env
);
4024 void kvm_arch_pre_run(CPUState
*cpu
, struct kvm_run
*run
)
4026 X86CPU
*x86_cpu
= X86_CPU(cpu
);
4027 CPUX86State
*env
= &x86_cpu
->env
;
4031 if (cpu
->interrupt_request
& (CPU_INTERRUPT_NMI
| CPU_INTERRUPT_SMI
)) {
4032 if (cpu
->interrupt_request
& CPU_INTERRUPT_NMI
) {
4033 qemu_mutex_lock_iothread();
4034 cpu
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
4035 qemu_mutex_unlock_iothread();
4036 DPRINTF("injected NMI\n");
4037 ret
= kvm_vcpu_ioctl(cpu
, KVM_NMI
);
4039 fprintf(stderr
, "KVM: injection failed, NMI lost (%s)\n",
4043 if (cpu
->interrupt_request
& CPU_INTERRUPT_SMI
) {
4044 qemu_mutex_lock_iothread();
4045 cpu
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
4046 qemu_mutex_unlock_iothread();
4047 DPRINTF("injected SMI\n");
4048 ret
= kvm_vcpu_ioctl(cpu
, KVM_SMI
);
4050 fprintf(stderr
, "KVM: injection failed, SMI lost (%s)\n",
4056 if (!kvm_pic_in_kernel()) {
4057 qemu_mutex_lock_iothread();
4060 /* Force the VCPU out of its inner loop to process any INIT requests
4061 * or (for userspace APIC, but it is cheap to combine the checks here)
4062 * pending TPR access reports.
4064 if (cpu
->interrupt_request
& (CPU_INTERRUPT_INIT
| CPU_INTERRUPT_TPR
)) {
4065 if ((cpu
->interrupt_request
& CPU_INTERRUPT_INIT
) &&
4066 !(env
->hflags
& HF_SMM_MASK
)) {
4067 cpu
->exit_request
= 1;
4069 if (cpu
->interrupt_request
& CPU_INTERRUPT_TPR
) {
4070 cpu
->exit_request
= 1;
4074 if (!kvm_pic_in_kernel()) {
4075 /* Try to inject an interrupt if the guest can accept it */
4076 if (run
->ready_for_interrupt_injection
&&
4077 (cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
4078 (env
->eflags
& IF_MASK
)) {
4081 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
4082 irq
= cpu_get_pic_interrupt(env
);
4084 struct kvm_interrupt intr
;
4087 DPRINTF("injected interrupt %d\n", irq
);
4088 ret
= kvm_vcpu_ioctl(cpu
, KVM_INTERRUPT
, &intr
);
4091 "KVM: injection failed, interrupt lost (%s)\n",
4097 /* If we have an interrupt but the guest is not ready to receive an
4098 * interrupt, request an interrupt window exit. This will
4099 * cause a return to userspace as soon as the guest is ready to
4100 * receive interrupts. */
4101 if ((cpu
->interrupt_request
& CPU_INTERRUPT_HARD
)) {
4102 run
->request_interrupt_window
= 1;
4104 run
->request_interrupt_window
= 0;
4107 DPRINTF("setting tpr\n");
4108 run
->cr8
= cpu_get_apic_tpr(x86_cpu
->apic_state
);
4110 qemu_mutex_unlock_iothread();
4114 MemTxAttrs
kvm_arch_post_run(CPUState
*cpu
, struct kvm_run
*run
)
4116 X86CPU
*x86_cpu
= X86_CPU(cpu
);
4117 CPUX86State
*env
= &x86_cpu
->env
;
4119 if (run
->flags
& KVM_RUN_X86_SMM
) {
4120 env
->hflags
|= HF_SMM_MASK
;
4122 env
->hflags
&= ~HF_SMM_MASK
;
4125 env
->eflags
|= IF_MASK
;
4127 env
->eflags
&= ~IF_MASK
;
4130 /* We need to protect the apic state against concurrent accesses from
4131 * different threads in case the userspace irqchip is used. */
4132 if (!kvm_irqchip_in_kernel()) {
4133 qemu_mutex_lock_iothread();
4135 cpu_set_apic_tpr(x86_cpu
->apic_state
, run
->cr8
);
4136 cpu_set_apic_base(x86_cpu
->apic_state
, run
->apic_base
);
4137 if (!kvm_irqchip_in_kernel()) {
4138 qemu_mutex_unlock_iothread();
4140 return cpu_get_mem_attrs(env
);
4143 int kvm_arch_process_async_events(CPUState
*cs
)
4145 X86CPU
*cpu
= X86_CPU(cs
);
4146 CPUX86State
*env
= &cpu
->env
;
4148 if (cs
->interrupt_request
& CPU_INTERRUPT_MCE
) {
4149 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
4150 assert(env
->mcg_cap
);
4152 cs
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
4154 kvm_cpu_synchronize_state(cs
);
4156 if (env
->exception_nr
== EXCP08_DBLE
) {
4157 /* this means triple fault */
4158 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
4159 cs
->exit_request
= 1;
4162 kvm_queue_exception(env
, EXCP12_MCHK
, 0, 0);
4163 env
->has_error_code
= 0;
4166 if (kvm_irqchip_in_kernel() && env
->mp_state
== KVM_MP_STATE_HALTED
) {
4167 env
->mp_state
= KVM_MP_STATE_RUNNABLE
;
4171 if ((cs
->interrupt_request
& CPU_INTERRUPT_INIT
) &&
4172 !(env
->hflags
& HF_SMM_MASK
)) {
4173 kvm_cpu_synchronize_state(cs
);
4177 if (kvm_irqchip_in_kernel()) {
4181 if (cs
->interrupt_request
& CPU_INTERRUPT_POLL
) {
4182 cs
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
4183 apic_poll_irq(cpu
->apic_state
);
4185 if (((cs
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
4186 (env
->eflags
& IF_MASK
)) ||
4187 (cs
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
4190 if (cs
->interrupt_request
& CPU_INTERRUPT_SIPI
) {
4191 kvm_cpu_synchronize_state(cs
);
4194 if (cs
->interrupt_request
& CPU_INTERRUPT_TPR
) {
4195 cs
->interrupt_request
&= ~CPU_INTERRUPT_TPR
;
4196 kvm_cpu_synchronize_state(cs
);
4197 apic_handle_tpr_access_report(cpu
->apic_state
, env
->eip
,
4198 env
->tpr_access_type
);
4204 static int kvm_handle_halt(X86CPU
*cpu
)
4206 CPUState
*cs
= CPU(cpu
);
4207 CPUX86State
*env
= &cpu
->env
;
4209 if (!((cs
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
4210 (env
->eflags
& IF_MASK
)) &&
4211 !(cs
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
4219 static int kvm_handle_tpr_access(X86CPU
*cpu
)
4221 CPUState
*cs
= CPU(cpu
);
4222 struct kvm_run
*run
= cs
->kvm_run
;
4224 apic_handle_tpr_access_report(cpu
->apic_state
, run
->tpr_access
.rip
,
4225 run
->tpr_access
.is_write
? TPR_ACCESS_WRITE
4230 int kvm_arch_insert_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
4232 static const uint8_t int3
= 0xcc;
4234 if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 0) ||
4235 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&int3
, 1, 1)) {
4241 int kvm_arch_remove_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
4245 if (cpu_memory_rw_debug(cs
, bp
->pc
, &int3
, 1, 0) || int3
!= 0xcc ||
4246 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 1)) {
4258 static int nb_hw_breakpoint
;
4260 static int find_hw_breakpoint(target_ulong addr
, int len
, int type
)
4264 for (n
= 0; n
< nb_hw_breakpoint
; n
++) {
4265 if (hw_breakpoint
[n
].addr
== addr
&& hw_breakpoint
[n
].type
== type
&&
4266 (hw_breakpoint
[n
].len
== len
|| len
== -1)) {
4273 int kvm_arch_insert_hw_breakpoint(target_ulong addr
,
4274 target_ulong len
, int type
)
4277 case GDB_BREAKPOINT_HW
:
4280 case GDB_WATCHPOINT_WRITE
:
4281 case GDB_WATCHPOINT_ACCESS
:
4288 if (addr
& (len
- 1)) {
4300 if (nb_hw_breakpoint
== 4) {
4303 if (find_hw_breakpoint(addr
, len
, type
) >= 0) {
4306 hw_breakpoint
[nb_hw_breakpoint
].addr
= addr
;
4307 hw_breakpoint
[nb_hw_breakpoint
].len
= len
;
4308 hw_breakpoint
[nb_hw_breakpoint
].type
= type
;
4314 int kvm_arch_remove_hw_breakpoint(target_ulong addr
,
4315 target_ulong len
, int type
)
4319 n
= find_hw_breakpoint(addr
, (type
== GDB_BREAKPOINT_HW
) ? 1 : len
, type
);
4324 hw_breakpoint
[n
] = hw_breakpoint
[nb_hw_breakpoint
];
4329 void kvm_arch_remove_all_hw_breakpoints(void)
4331 nb_hw_breakpoint
= 0;
4334 static CPUWatchpoint hw_watchpoint
;
4336 static int kvm_handle_debug(X86CPU
*cpu
,
4337 struct kvm_debug_exit_arch
*arch_info
)
4339 CPUState
*cs
= CPU(cpu
);
4340 CPUX86State
*env
= &cpu
->env
;
4344 if (arch_info
->exception
== EXCP01_DB
) {
4345 if (arch_info
->dr6
& DR6_BS
) {
4346 if (cs
->singlestep_enabled
) {
4350 for (n
= 0; n
< 4; n
++) {
4351 if (arch_info
->dr6
& (1 << n
)) {
4352 switch ((arch_info
->dr7
>> (16 + n
*4)) & 0x3) {
4358 cs
->watchpoint_hit
= &hw_watchpoint
;
4359 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
4360 hw_watchpoint
.flags
= BP_MEM_WRITE
;
4364 cs
->watchpoint_hit
= &hw_watchpoint
;
4365 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
4366 hw_watchpoint
.flags
= BP_MEM_ACCESS
;
4372 } else if (kvm_find_sw_breakpoint(cs
, arch_info
->pc
)) {
4376 cpu_synchronize_state(cs
);
4377 assert(env
->exception_nr
== -1);
4380 kvm_queue_exception(env
, arch_info
->exception
,
4381 arch_info
->exception
== EXCP01_DB
,
4383 env
->has_error_code
= 0;
4389 void kvm_arch_update_guest_debug(CPUState
*cpu
, struct kvm_guest_debug
*dbg
)
4391 const uint8_t type_code
[] = {
4392 [GDB_BREAKPOINT_HW
] = 0x0,
4393 [GDB_WATCHPOINT_WRITE
] = 0x1,
4394 [GDB_WATCHPOINT_ACCESS
] = 0x3
4396 const uint8_t len_code
[] = {
4397 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
4401 if (kvm_sw_breakpoints_active(cpu
)) {
4402 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_SW_BP
;
4404 if (nb_hw_breakpoint
> 0) {
4405 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_HW_BP
;
4406 dbg
->arch
.debugreg
[7] = 0x0600;
4407 for (n
= 0; n
< nb_hw_breakpoint
; n
++) {
4408 dbg
->arch
.debugreg
[n
] = hw_breakpoint
[n
].addr
;
4409 dbg
->arch
.debugreg
[7] |= (2 << (n
* 2)) |
4410 (type_code
[hw_breakpoint
[n
].type
] << (16 + n
*4)) |
4411 ((uint32_t)len_code
[hw_breakpoint
[n
].len
] << (18 + n
*4));
4416 static bool host_supports_vmx(void)
4418 uint32_t ecx
, unused
;
4420 host_cpuid(1, 0, &unused
, &unused
, &ecx
, &unused
);
4421 return ecx
& CPUID_EXT_VMX
;
4424 #define VMX_INVALID_GUEST_STATE 0x80000021
4426 int kvm_arch_handle_exit(CPUState
*cs
, struct kvm_run
*run
)
4428 X86CPU
*cpu
= X86_CPU(cs
);
4432 switch (run
->exit_reason
) {
4434 DPRINTF("handle_hlt\n");
4435 qemu_mutex_lock_iothread();
4436 ret
= kvm_handle_halt(cpu
);
4437 qemu_mutex_unlock_iothread();
4439 case KVM_EXIT_SET_TPR
:
4442 case KVM_EXIT_TPR_ACCESS
:
4443 qemu_mutex_lock_iothread();
4444 ret
= kvm_handle_tpr_access(cpu
);
4445 qemu_mutex_unlock_iothread();
4447 case KVM_EXIT_FAIL_ENTRY
:
4448 code
= run
->fail_entry
.hardware_entry_failure_reason
;
4449 fprintf(stderr
, "KVM: entry failed, hardware error 0x%" PRIx64
"\n",
4451 if (host_supports_vmx() && code
== VMX_INVALID_GUEST_STATE
) {
4453 "\nIf you're running a guest on an Intel machine without "
4454 "unrestricted mode\n"
4455 "support, the failure can be most likely due to the guest "
4456 "entering an invalid\n"
4457 "state for Intel VT. For example, the guest maybe running "
4458 "in big real mode\n"
4459 "which is not supported on less recent Intel processors."
4464 case KVM_EXIT_EXCEPTION
:
4465 fprintf(stderr
, "KVM: exception %d exit (error code 0x%x)\n",
4466 run
->ex
.exception
, run
->ex
.error_code
);
4469 case KVM_EXIT_DEBUG
:
4470 DPRINTF("kvm_exit_debug\n");
4471 qemu_mutex_lock_iothread();
4472 ret
= kvm_handle_debug(cpu
, &run
->debug
.arch
);
4473 qemu_mutex_unlock_iothread();
4475 case KVM_EXIT_HYPERV
:
4476 ret
= kvm_hv_handle_exit(cpu
, &run
->hyperv
);
4478 case KVM_EXIT_IOAPIC_EOI
:
4479 ioapic_eoi_broadcast(run
->eoi
.vector
);
4483 fprintf(stderr
, "KVM: unknown exit reason %d\n", run
->exit_reason
);
4491 bool kvm_arch_stop_on_emulation_error(CPUState
*cs
)
4493 X86CPU
*cpu
= X86_CPU(cs
);
4494 CPUX86State
*env
= &cpu
->env
;
4496 kvm_cpu_synchronize_state(cs
);
4497 return !(env
->cr
[0] & CR0_PE_MASK
) ||
4498 ((env
->segs
[R_CS
].selector
& 3) != 3);
4501 void kvm_arch_init_irq_routing(KVMState
*s
)
4503 if (!kvm_check_extension(s
, KVM_CAP_IRQ_ROUTING
)) {
4504 /* If kernel can't do irq routing, interrupt source
4505 * override 0->2 cannot be set up as required by HPET.
4506 * So we have to disable it.
4510 /* We know at this point that we're using the in-kernel
4511 * irqchip, so we can use irqfds, and on x86 we know
4512 * we can use msi via irqfd and GSI routing.
4514 kvm_msi_via_irqfd_allowed
= true;
4515 kvm_gsi_routing_allowed
= true;
4517 if (kvm_irqchip_is_split()) {
4520 /* If the ioapic is in QEMU and the lapics are in KVM, reserve
4521 MSI routes for signaling interrupts to the local apics. */
4522 for (i
= 0; i
< IOAPIC_NUM_PINS
; i
++) {
4523 if (kvm_irqchip_add_msi_route(s
, 0, NULL
) < 0) {
4524 error_report("Could not enable split IRQ mode.");
4531 int kvm_arch_irqchip_create(KVMState
*s
)
4534 if (kvm_kernel_irqchip_split()) {
4535 ret
= kvm_vm_enable_cap(s
, KVM_CAP_SPLIT_IRQCHIP
, 0, 24);
4537 error_report("Could not enable split irqchip mode: %s",
4541 DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
4542 kvm_split_irqchip
= true;
4550 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry
*route
,
4551 uint64_t address
, uint32_t data
, PCIDevice
*dev
)
4553 X86IOMMUState
*iommu
= x86_iommu_get_default();
4557 MSIMessage src
, dst
;
4558 X86IOMMUClass
*class = X86_IOMMU_GET_CLASS(iommu
);
4560 if (!class->int_remap
) {
4564 src
.address
= route
->u
.msi
.address_hi
;
4565 src
.address
<<= VTD_MSI_ADDR_HI_SHIFT
;
4566 src
.address
|= route
->u
.msi
.address_lo
;
4567 src
.data
= route
->u
.msi
.data
;
4569 ret
= class->int_remap(iommu
, &src
, &dst
, dev
? \
4570 pci_requester_id(dev
) : \
4571 X86_IOMMU_SID_INVALID
);
4573 trace_kvm_x86_fixup_msi_error(route
->gsi
);
4577 route
->u
.msi
.address_hi
= dst
.address
>> VTD_MSI_ADDR_HI_SHIFT
;
4578 route
->u
.msi
.address_lo
= dst
.address
& VTD_MSI_ADDR_LO_MASK
;
4579 route
->u
.msi
.data
= dst
.data
;
4585 typedef struct MSIRouteEntry MSIRouteEntry
;
4587 struct MSIRouteEntry
{
4588 PCIDevice
*dev
; /* Device pointer */
4589 int vector
; /* MSI/MSIX vector index */
4590 int virq
; /* Virtual IRQ index */
4591 QLIST_ENTRY(MSIRouteEntry
) list
;
4594 /* List of used GSI routes */
4595 static QLIST_HEAD(, MSIRouteEntry
) msi_route_list
= \
4596 QLIST_HEAD_INITIALIZER(msi_route_list
);
4598 static void kvm_update_msi_routes_all(void *private, bool global
,
4599 uint32_t index
, uint32_t mask
)
4601 int cnt
= 0, vector
;
4602 MSIRouteEntry
*entry
;
4606 /* TODO: explicit route update */
4607 QLIST_FOREACH(entry
, &msi_route_list
, list
) {
4609 vector
= entry
->vector
;
4611 if (msix_enabled(dev
) && !msix_is_masked(dev
, vector
)) {
4612 msg
= msix_get_message(dev
, vector
);
4613 } else if (msi_enabled(dev
) && !msi_is_masked(dev
, vector
)) {
4614 msg
= msi_get_message(dev
, vector
);
4617 * Either MSI/MSIX is disabled for the device, or the
4618 * specific message was masked out. Skip this one.
4622 kvm_irqchip_update_msi_route(kvm_state
, entry
->virq
, msg
, dev
);
4624 kvm_irqchip_commit_routes(kvm_state
);
4625 trace_kvm_x86_update_msi_routes(cnt
);
4628 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry
*route
,
4629 int vector
, PCIDevice
*dev
)
4631 static bool notify_list_inited
= false;
4632 MSIRouteEntry
*entry
;
4635 /* These are (possibly) IOAPIC routes only used for split
4636 * kernel irqchip mode, while what we are housekeeping are
4637 * PCI devices only. */
4641 entry
= g_new0(MSIRouteEntry
, 1);
4643 entry
->vector
= vector
;
4644 entry
->virq
= route
->gsi
;
4645 QLIST_INSERT_HEAD(&msi_route_list
, entry
, list
);
4647 trace_kvm_x86_add_msi_route(route
->gsi
);
4649 if (!notify_list_inited
) {
4650 /* For the first time we do add route, add ourselves into
4651 * IOMMU's IEC notify list if needed. */
4652 X86IOMMUState
*iommu
= x86_iommu_get_default();
4654 x86_iommu_iec_register_notifier(iommu
,
4655 kvm_update_msi_routes_all
,
4658 notify_list_inited
= true;
4663 int kvm_arch_release_virq_post(int virq
)
4665 MSIRouteEntry
*entry
, *next
;
4666 QLIST_FOREACH_SAFE(entry
, &msi_route_list
, list
, next
) {
4667 if (entry
->virq
== virq
) {
4668 trace_kvm_x86_remove_msi_route(virq
);
4669 QLIST_REMOVE(entry
, list
);
4677 int kvm_arch_msi_data_to_gsi(uint32_t data
)