4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "qapi/qapi-events-run-state.h"
17 #include "qapi/error.h"
18 #include "qapi/visitor.h"
19 #include <sys/ioctl.h>
20 #include <sys/utsname.h>
21 #include <sys/syscall.h>
23 #include <linux/kvm.h>
24 #include "standard-headers/asm-x86/kvm_para.h"
25 #include "hw/xen/interface/arch-x86/cpuid.h"
29 #include "sysemu/sysemu.h"
30 #include "sysemu/hw_accel.h"
31 #include "sysemu/kvm_int.h"
32 #include "sysemu/runstate.h"
37 #include "hyperv-proto.h"
39 #include "exec/gdbstub.h"
40 #include "qemu/host-utils.h"
41 #include "qemu/main-loop.h"
42 #include "qemu/ratelimit.h"
43 #include "qemu/config-file.h"
44 #include "qemu/error-report.h"
45 #include "qemu/memalign.h"
46 #include "hw/i386/x86.h"
47 #include "hw/i386/kvm/xen_evtchn.h"
48 #include "hw/i386/pc.h"
49 #include "hw/i386/apic.h"
50 #include "hw/i386/apic_internal.h"
51 #include "hw/i386/apic-msidef.h"
52 #include "hw/i386/intel_iommu.h"
53 #include "hw/i386/x86-iommu.h"
54 #include "hw/i386/e820_memory_layout.h"
56 #include "hw/xen/xen.h"
58 #include "hw/pci/pci.h"
59 #include "hw/pci/msi.h"
60 #include "hw/pci/msix.h"
61 #include "migration/blocker.h"
62 #include "exec/memattrs.h"
65 #include CONFIG_DEVICES
70 #define DPRINTF(fmt, ...) \
71 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
73 #define DPRINTF(fmt, ...) \
77 /* From arch/x86/kvm/lapic.h */
78 #define KVM_APIC_BUS_CYCLE_NS 1
79 #define KVM_APIC_BUS_FREQUENCY (1000000000ULL / KVM_APIC_BUS_CYCLE_NS)
81 #define MSR_KVM_WALL_CLOCK 0x11
82 #define MSR_KVM_SYSTEM_TIME 0x12
84 /* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
85 * 255 kvm_msr_entry structs */
86 #define MSR_BUF_SIZE 4096
88 static void kvm_init_msrs(X86CPU
*cpu
);
90 const KVMCapabilityInfo kvm_arch_required_capabilities
[] = {
91 KVM_CAP_INFO(SET_TSS_ADDR
),
92 KVM_CAP_INFO(EXT_CPUID
),
93 KVM_CAP_INFO(MP_STATE
),
97 static bool has_msr_star
;
98 static bool has_msr_hsave_pa
;
99 static bool has_msr_tsc_aux
;
100 static bool has_msr_tsc_adjust
;
101 static bool has_msr_tsc_deadline
;
102 static bool has_msr_feature_control
;
103 static bool has_msr_misc_enable
;
104 static bool has_msr_smbase
;
105 static bool has_msr_bndcfgs
;
106 static int lm_capable_kernel
;
107 static bool has_msr_hv_hypercall
;
108 static bool has_msr_hv_crash
;
109 static bool has_msr_hv_reset
;
110 static bool has_msr_hv_vpindex
;
111 static bool hv_vpindex_settable
;
112 static bool has_msr_hv_runtime
;
113 static bool has_msr_hv_synic
;
114 static bool has_msr_hv_stimer
;
115 static bool has_msr_hv_frequencies
;
116 static bool has_msr_hv_reenlightenment
;
117 static bool has_msr_hv_syndbg_options
;
118 static bool has_msr_xss
;
119 static bool has_msr_umwait
;
120 static bool has_msr_spec_ctrl
;
121 static bool has_tsc_scale_msr
;
122 static bool has_msr_tsx_ctrl
;
123 static bool has_msr_virt_ssbd
;
124 static bool has_msr_smi_count
;
125 static bool has_msr_arch_capabs
;
126 static bool has_msr_core_capabs
;
127 static bool has_msr_vmx_vmfunc
;
128 static bool has_msr_ucode_rev
;
129 static bool has_msr_vmx_procbased_ctls2
;
130 static bool has_msr_perf_capabs
;
131 static bool has_msr_pkrs
;
133 static uint32_t has_architectural_pmu_version
;
134 static uint32_t num_architectural_pmu_gp_counters
;
135 static uint32_t num_architectural_pmu_fixed_counters
;
137 static int has_xsave
;
138 static int has_xsave2
;
140 static int has_pit_state2
;
141 static int has_sregs2
;
142 static int has_exception_payload
;
143 static int has_triple_fault_event
;
145 static bool has_msr_mcg_ext_ctl
;
147 static struct kvm_cpuid2
*cpuid_cache
;
148 static struct kvm_cpuid2
*hv_cpuid_cache
;
149 static struct kvm_msr_list
*kvm_feature_msrs
;
151 static KVMMSRHandlers msr_handlers
[KVM_MSR_FILTER_MAX_RANGES
];
153 #define BUS_LOCK_SLICE_TIME 1000000000ULL /* ns */
154 static RateLimit bus_lock_ratelimit_ctrl
;
155 static int kvm_get_one_msr(X86CPU
*cpu
, int index
, uint64_t *value
);
157 bool kvm_has_pit_state2(void)
159 return !!has_pit_state2
;
162 bool kvm_has_smm(void)
164 return kvm_vm_check_extension(kvm_state
, KVM_CAP_X86_SMM
);
167 bool kvm_has_adjust_clock_stable(void)
169 int ret
= kvm_check_extension(kvm_state
, KVM_CAP_ADJUST_CLOCK
);
171 return (ret
& KVM_CLOCK_TSC_STABLE
);
174 bool kvm_has_adjust_clock(void)
176 return kvm_check_extension(kvm_state
, KVM_CAP_ADJUST_CLOCK
);
179 bool kvm_has_exception_payload(void)
181 return has_exception_payload
;
184 static bool kvm_x2apic_api_set_flags(uint64_t flags
)
186 KVMState
*s
= KVM_STATE(current_accel());
188 return !kvm_vm_enable_cap(s
, KVM_CAP_X2APIC_API
, 0, flags
);
191 #define MEMORIZE(fn, _result) \
193 static bool _memorized; \
202 static bool has_x2apic_api
;
204 bool kvm_has_x2apic_api(void)
206 return has_x2apic_api
;
209 bool kvm_enable_x2apic(void)
212 kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS
|
213 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK
),
217 bool kvm_hv_vpindex_settable(void)
219 return hv_vpindex_settable
;
222 static int kvm_get_tsc(CPUState
*cs
)
224 X86CPU
*cpu
= X86_CPU(cs
);
225 CPUX86State
*env
= &cpu
->env
;
229 if (env
->tsc_valid
) {
233 env
->tsc_valid
= !runstate_is_running();
235 ret
= kvm_get_one_msr(cpu
, MSR_IA32_TSC
, &value
);
244 static inline void do_kvm_synchronize_tsc(CPUState
*cpu
, run_on_cpu_data arg
)
249 void kvm_synchronize_all_tsc(void)
255 run_on_cpu(cpu
, do_kvm_synchronize_tsc
, RUN_ON_CPU_NULL
);
260 static struct kvm_cpuid2
*try_get_cpuid(KVMState
*s
, int max
)
262 struct kvm_cpuid2
*cpuid
;
265 size
= sizeof(*cpuid
) + max
* sizeof(*cpuid
->entries
);
266 cpuid
= g_malloc0(size
);
268 r
= kvm_ioctl(s
, KVM_GET_SUPPORTED_CPUID
, cpuid
);
269 if (r
== 0 && cpuid
->nent
>= max
) {
277 fprintf(stderr
, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
285 /* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
288 static struct kvm_cpuid2
*get_supported_cpuid(KVMState
*s
)
290 struct kvm_cpuid2
*cpuid
;
293 if (cpuid_cache
!= NULL
) {
296 while ((cpuid
= try_get_cpuid(s
, max
)) == NULL
) {
303 static bool host_tsx_broken(void)
305 int family
, model
, stepping
;\
306 char vendor
[CPUID_VENDOR_SZ
+ 1];
308 host_cpu_vendor_fms(vendor
, &family
, &model
, &stepping
);
310 /* Check if we are running on a Haswell host known to have broken TSX */
311 return !strcmp(vendor
, CPUID_VENDOR_INTEL
) &&
313 ((model
== 63 && stepping
< 4) ||
314 model
== 60 || model
== 69 || model
== 70);
317 /* Returns the value for a specific register on the cpuid entry
319 static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2
*entry
, int reg
)
339 /* Find matching entry for function/index on kvm_cpuid2 struct
341 static struct kvm_cpuid_entry2
*cpuid_find_entry(struct kvm_cpuid2
*cpuid
,
346 for (i
= 0; i
< cpuid
->nent
; ++i
) {
347 if (cpuid
->entries
[i
].function
== function
&&
348 cpuid
->entries
[i
].index
== index
) {
349 return &cpuid
->entries
[i
];
356 uint32_t kvm_arch_get_supported_cpuid(KVMState
*s
, uint32_t function
,
357 uint32_t index
, int reg
)
359 struct kvm_cpuid2
*cpuid
;
361 uint32_t cpuid_1_edx
, unused
;
364 cpuid
= get_supported_cpuid(s
);
366 struct kvm_cpuid_entry2
*entry
= cpuid_find_entry(cpuid
, function
, index
);
368 ret
= cpuid_entry_get_reg(entry
, reg
);
371 /* Fixups for the data returned by KVM, below */
373 if (function
== 1 && reg
== R_EDX
) {
374 /* KVM before 2.6.30 misreports the following features */
375 ret
|= CPUID_MTRR
| CPUID_PAT
| CPUID_MCE
| CPUID_MCA
;
376 /* KVM never reports CPUID_HT but QEMU can support when vcpus > 1 */
378 } else if (function
== 1 && reg
== R_ECX
) {
379 /* We can set the hypervisor flag, even if KVM does not return it on
380 * GET_SUPPORTED_CPUID
382 ret
|= CPUID_EXT_HYPERVISOR
;
383 /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
384 * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
385 * and the irqchip is in the kernel.
387 if (kvm_irqchip_in_kernel() &&
388 kvm_check_extension(s
, KVM_CAP_TSC_DEADLINE_TIMER
)) {
389 ret
|= CPUID_EXT_TSC_DEADLINE_TIMER
;
392 /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
393 * without the in-kernel irqchip
395 if (!kvm_irqchip_in_kernel()) {
396 ret
&= ~CPUID_EXT_X2APIC
;
400 int disable_exits
= kvm_check_extension(s
,
401 KVM_CAP_X86_DISABLE_EXITS
);
403 if (disable_exits
& KVM_X86_DISABLE_EXITS_MWAIT
) {
404 ret
|= CPUID_EXT_MONITOR
;
407 } else if (function
== 6 && reg
== R_EAX
) {
408 ret
|= CPUID_6_EAX_ARAT
; /* safe to allow because of emulated APIC */
409 } else if (function
== 7 && index
== 0 && reg
== R_EBX
) {
410 /* Not new instructions, just an optimization. */
412 host_cpuid(7, 0, &unused
, &ebx
, &unused
, &unused
);
413 ret
|= ebx
& CPUID_7_0_EBX_ERMS
;
415 if (host_tsx_broken()) {
416 ret
&= ~(CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_HLE
);
418 } else if (function
== 7 && index
== 0 && reg
== R_EDX
) {
419 /* Not new instructions, just an optimization. */
421 host_cpuid(7, 0, &unused
, &unused
, &unused
, &edx
);
422 ret
|= edx
& CPUID_7_0_EDX_FSRM
;
425 * Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts.
426 * We can detect the bug by checking if MSR_IA32_ARCH_CAPABILITIES is
427 * returned by KVM_GET_MSR_INDEX_LIST.
429 if (!has_msr_arch_capabs
) {
430 ret
&= ~CPUID_7_0_EDX_ARCH_CAPABILITIES
;
432 } else if (function
== 7 && index
== 1 && reg
== R_EAX
) {
433 /* Not new instructions, just an optimization. */
435 host_cpuid(7, 1, &eax
, &unused
, &unused
, &unused
);
436 ret
|= eax
& (CPUID_7_1_EAX_FZRM
| CPUID_7_1_EAX_FSRS
| CPUID_7_1_EAX_FSRC
);
437 } else if (function
== 7 && index
== 2 && reg
== R_EDX
) {
439 host_cpuid(7, 2, &unused
, &unused
, &unused
, &edx
);
440 ret
|= edx
& CPUID_7_2_EDX_MCDT_NO
;
441 } else if (function
== 0xd && index
== 0 &&
442 (reg
== R_EAX
|| reg
== R_EDX
)) {
444 * The value returned by KVM_GET_SUPPORTED_CPUID does not include
445 * features that still have to be enabled with the arch_prctl
446 * system call. QEMU needs the full value, which is retrieved
447 * with KVM_GET_DEVICE_ATTR.
449 struct kvm_device_attr attr
= {
451 .attr
= KVM_X86_XCOMP_GUEST_SUPP
,
452 .addr
= (unsigned long) &bitmask
455 bool sys_attr
= kvm_check_extension(s
, KVM_CAP_SYS_ATTRIBUTES
);
460 int rc
= kvm_ioctl(s
, KVM_GET_DEVICE_ATTR
, &attr
);
463 warn_report("KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) "
468 ret
= (reg
== R_EAX
) ? bitmask
: bitmask
>> 32;
469 } else if (function
== 0x80000001 && reg
== R_ECX
) {
471 * It's safe to enable TOPOEXT even if it's not returned by
472 * GET_SUPPORTED_CPUID. Unconditionally enabling TOPOEXT here allows
473 * us to keep CPU models including TOPOEXT runnable on older kernels.
475 ret
|= CPUID_EXT3_TOPOEXT
;
476 } else if (function
== 0x80000001 && reg
== R_EDX
) {
477 /* On Intel, kvm returns cpuid according to the Intel spec,
478 * so add missing bits according to the AMD spec:
480 cpuid_1_edx
= kvm_arch_get_supported_cpuid(s
, 1, 0, R_EDX
);
481 ret
|= cpuid_1_edx
& CPUID_EXT2_AMD_ALIASES
;
482 } else if (function
== KVM_CPUID_FEATURES
&& reg
== R_EAX
) {
483 /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't
484 * be enabled without the in-kernel irqchip
486 if (!kvm_irqchip_in_kernel()) {
487 ret
&= ~(1U << KVM_FEATURE_PV_UNHALT
);
489 if (kvm_irqchip_is_split()) {
490 ret
|= 1U << KVM_FEATURE_MSI_EXT_DEST_ID
;
492 } else if (function
== KVM_CPUID_FEATURES
&& reg
== R_EDX
) {
493 ret
|= 1U << KVM_HINTS_REALTIME
;
499 uint64_t kvm_arch_get_supported_msr_feature(KVMState
*s
, uint32_t index
)
502 struct kvm_msrs info
;
503 struct kvm_msr_entry entries
[1];
506 uint32_t ret
, can_be_one
, must_be_one
;
508 if (kvm_feature_msrs
== NULL
) { /* Host doesn't support feature MSRs */
512 /* Check if requested MSR is supported feature MSR */
514 for (i
= 0; i
< kvm_feature_msrs
->nmsrs
; i
++)
515 if (kvm_feature_msrs
->indices
[i
] == index
) {
518 if (i
== kvm_feature_msrs
->nmsrs
) {
519 return 0; /* if the feature MSR is not supported, simply return 0 */
522 msr_data
.info
.nmsrs
= 1;
523 msr_data
.entries
[0].index
= index
;
525 ret
= kvm_ioctl(s
, KVM_GET_MSRS
, &msr_data
);
527 error_report("KVM get MSR (index=0x%x) feature failed, %s",
528 index
, strerror(-ret
));
532 value
= msr_data
.entries
[0].data
;
534 case MSR_IA32_VMX_PROCBASED_CTLS2
:
535 if (!has_msr_vmx_procbased_ctls2
) {
536 /* KVM forgot to add these bits for some time, do this ourselves. */
537 if (kvm_arch_get_supported_cpuid(s
, 0xD, 1, R_ECX
) &
538 CPUID_XSAVE_XSAVES
) {
539 value
|= (uint64_t)VMX_SECONDARY_EXEC_XSAVES
<< 32;
541 if (kvm_arch_get_supported_cpuid(s
, 1, 0, R_ECX
) &
543 value
|= (uint64_t)VMX_SECONDARY_EXEC_RDRAND_EXITING
<< 32;
545 if (kvm_arch_get_supported_cpuid(s
, 7, 0, R_EBX
) &
546 CPUID_7_0_EBX_INVPCID
) {
547 value
|= (uint64_t)VMX_SECONDARY_EXEC_ENABLE_INVPCID
<< 32;
549 if (kvm_arch_get_supported_cpuid(s
, 7, 0, R_EBX
) &
550 CPUID_7_0_EBX_RDSEED
) {
551 value
|= (uint64_t)VMX_SECONDARY_EXEC_RDSEED_EXITING
<< 32;
553 if (kvm_arch_get_supported_cpuid(s
, 0x80000001, 0, R_EDX
) &
555 value
|= (uint64_t)VMX_SECONDARY_EXEC_RDTSCP
<< 32;
559 case MSR_IA32_VMX_TRUE_PINBASED_CTLS
:
560 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS
:
561 case MSR_IA32_VMX_TRUE_ENTRY_CTLS
:
562 case MSR_IA32_VMX_TRUE_EXIT_CTLS
:
564 * Return true for bits that can be one, but do not have to be one.
565 * The SDM tells us which bits could have a "must be one" setting,
566 * so we can do the opposite transformation in make_vmx_msr_value.
568 must_be_one
= (uint32_t)value
;
569 can_be_one
= (uint32_t)(value
>> 32);
570 return can_be_one
& ~must_be_one
;
577 static int kvm_get_mce_cap_supported(KVMState
*s
, uint64_t *mce_cap
,
582 r
= kvm_check_extension(s
, KVM_CAP_MCE
);
585 return kvm_ioctl(s
, KVM_X86_GET_MCE_CAP_SUPPORTED
, mce_cap
);
590 static void kvm_mce_inject(X86CPU
*cpu
, hwaddr paddr
, int code
)
592 CPUState
*cs
= CPU(cpu
);
593 CPUX86State
*env
= &cpu
->env
;
594 uint64_t status
= MCI_STATUS_VAL
| MCI_STATUS_UC
| MCI_STATUS_EN
|
595 MCI_STATUS_MISCV
| MCI_STATUS_ADDRV
| MCI_STATUS_S
;
596 uint64_t mcg_status
= MCG_STATUS_MCIP
;
599 if (code
== BUS_MCEERR_AR
) {
600 status
|= MCI_STATUS_AR
| 0x134;
601 mcg_status
|= MCG_STATUS_RIPV
| MCG_STATUS_EIPV
;
604 mcg_status
|= MCG_STATUS_RIPV
;
607 flags
= cpu_x86_support_mca_broadcast(env
) ? MCE_INJECT_BROADCAST
: 0;
608 /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the
609 * guest kernel back into env->mcg_ext_ctl.
611 cpu_synchronize_state(cs
);
612 if (env
->mcg_ext_ctl
& MCG_EXT_CTL_LMCE_EN
) {
613 mcg_status
|= MCG_STATUS_LMCE
;
617 cpu_x86_inject_mce(NULL
, cpu
, 9, status
, mcg_status
, paddr
,
618 (MCM_ADDR_PHYS
<< 6) | 0xc, flags
);
621 static void emit_hypervisor_memory_failure(MemoryFailureAction action
, bool ar
)
623 MemoryFailureFlags mff
= {.action_required
= ar
, .recursive
= false};
625 qapi_event_send_memory_failure(MEMORY_FAILURE_RECIPIENT_HYPERVISOR
, action
,
629 static void hardware_memory_error(void *host_addr
)
631 emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_FATAL
, true);
632 error_report("QEMU got Hardware memory error at addr %p", host_addr
);
636 void kvm_arch_on_sigbus_vcpu(CPUState
*c
, int code
, void *addr
)
638 X86CPU
*cpu
= X86_CPU(c
);
639 CPUX86State
*env
= &cpu
->env
;
643 /* If we get an action required MCE, it has been injected by KVM
644 * while the VM was running. An action optional MCE instead should
645 * be coming from the main thread, which qemu_init_sigbus identifies
646 * as the "early kill" thread.
648 assert(code
== BUS_MCEERR_AR
|| code
== BUS_MCEERR_AO
);
650 if ((env
->mcg_cap
& MCG_SER_P
) && addr
) {
651 ram_addr
= qemu_ram_addr_from_host(addr
);
652 if (ram_addr
!= RAM_ADDR_INVALID
&&
653 kvm_physical_memory_addr_from_host(c
->kvm_state
, addr
, &paddr
)) {
654 kvm_hwpoison_page_add(ram_addr
);
655 kvm_mce_inject(cpu
, paddr
, code
);
658 * Use different logging severity based on error type.
659 * If there is additional MCE reporting on the hypervisor, QEMU VA
660 * could be another source to identify the PA and MCE details.
662 if (code
== BUS_MCEERR_AR
) {
663 error_report("Guest MCE Memory Error at QEMU addr %p and "
664 "GUEST addr 0x%" HWADDR_PRIx
" of type %s injected",
665 addr
, paddr
, "BUS_MCEERR_AR");
667 warn_report("Guest MCE Memory Error at QEMU addr %p and "
668 "GUEST addr 0x%" HWADDR_PRIx
" of type %s injected",
669 addr
, paddr
, "BUS_MCEERR_AO");
675 if (code
== BUS_MCEERR_AO
) {
676 warn_report("Hardware memory error at addr %p of type %s "
677 "for memory used by QEMU itself instead of guest system!",
678 addr
, "BUS_MCEERR_AO");
682 if (code
== BUS_MCEERR_AR
) {
683 hardware_memory_error(addr
);
686 /* Hope we are lucky for AO MCE, just notify a event */
687 emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_IGNORE
, false);
690 static void kvm_reset_exception(CPUX86State
*env
)
692 env
->exception_nr
= -1;
693 env
->exception_pending
= 0;
694 env
->exception_injected
= 0;
695 env
->exception_has_payload
= false;
696 env
->exception_payload
= 0;
699 static void kvm_queue_exception(CPUX86State
*env
,
700 int32_t exception_nr
,
701 uint8_t exception_has_payload
,
702 uint64_t exception_payload
)
704 assert(env
->exception_nr
== -1);
705 assert(!env
->exception_pending
);
706 assert(!env
->exception_injected
);
707 assert(!env
->exception_has_payload
);
709 env
->exception_nr
= exception_nr
;
711 if (has_exception_payload
) {
712 env
->exception_pending
= 1;
714 env
->exception_has_payload
= exception_has_payload
;
715 env
->exception_payload
= exception_payload
;
717 env
->exception_injected
= 1;
719 if (exception_nr
== EXCP01_DB
) {
720 assert(exception_has_payload
);
721 env
->dr
[6] = exception_payload
;
722 } else if (exception_nr
== EXCP0E_PAGE
) {
723 assert(exception_has_payload
);
724 env
->cr
[2] = exception_payload
;
726 assert(!exception_has_payload
);
731 static int kvm_inject_mce_oldstyle(X86CPU
*cpu
)
733 CPUX86State
*env
= &cpu
->env
;
735 if (!kvm_has_vcpu_events() && env
->exception_nr
== EXCP12_MCHK
) {
736 unsigned int bank
, bank_num
= env
->mcg_cap
& 0xff;
737 struct kvm_x86_mce mce
;
739 kvm_reset_exception(env
);
742 * There must be at least one bank in use if an MCE is pending.
743 * Find it and use its values for the event injection.
745 for (bank
= 0; bank
< bank_num
; bank
++) {
746 if (env
->mce_banks
[bank
* 4 + 1] & MCI_STATUS_VAL
) {
750 assert(bank
< bank_num
);
753 mce
.status
= env
->mce_banks
[bank
* 4 + 1];
754 mce
.mcg_status
= env
->mcg_status
;
755 mce
.addr
= env
->mce_banks
[bank
* 4 + 2];
756 mce
.misc
= env
->mce_banks
[bank
* 4 + 3];
758 return kvm_vcpu_ioctl(CPU(cpu
), KVM_X86_SET_MCE
, &mce
);
763 static void cpu_update_state(void *opaque
, bool running
, RunState state
)
765 CPUX86State
*env
= opaque
;
768 env
->tsc_valid
= false;
772 unsigned long kvm_arch_vcpu_id(CPUState
*cs
)
774 X86CPU
*cpu
= X86_CPU(cs
);
778 #ifndef KVM_CPUID_SIGNATURE_NEXT
779 #define KVM_CPUID_SIGNATURE_NEXT 0x40000100
782 static bool hyperv_enabled(X86CPU
*cpu
)
784 return kvm_check_extension(kvm_state
, KVM_CAP_HYPERV
) > 0 &&
785 ((cpu
->hyperv_spinlock_attempts
!= HYPERV_SPINLOCK_NEVER_NOTIFY
) ||
786 cpu
->hyperv_features
|| cpu
->hyperv_passthrough
);
790 * Check whether target_freq is within conservative
791 * ntp correctable bounds (250ppm) of freq
793 static inline bool freq_within_bounds(int freq
, int target_freq
)
795 int max_freq
= freq
+ (freq
* 250 / 1000000);
796 int min_freq
= freq
- (freq
* 250 / 1000000);
798 if (target_freq
>= min_freq
&& target_freq
<= max_freq
) {
805 static int kvm_arch_set_tsc_khz(CPUState
*cs
)
807 X86CPU
*cpu
= X86_CPU(cs
);
808 CPUX86State
*env
= &cpu
->env
;
810 bool set_ioctl
= false;
816 cur_freq
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_GET_TSC_KHZ
) ?
817 kvm_vcpu_ioctl(cs
, KVM_GET_TSC_KHZ
) : -ENOTSUP
;
820 * If TSC scaling is supported, attempt to set TSC frequency.
822 if (kvm_check_extension(cs
->kvm_state
, KVM_CAP_TSC_CONTROL
)) {
827 * If desired TSC frequency is within bounds of NTP correction,
828 * attempt to set TSC frequency.
830 if (cur_freq
!= -ENOTSUP
&& freq_within_bounds(cur_freq
, env
->tsc_khz
)) {
835 kvm_vcpu_ioctl(cs
, KVM_SET_TSC_KHZ
, env
->tsc_khz
) :
839 /* When KVM_SET_TSC_KHZ fails, it's an error only if the current
840 * TSC frequency doesn't match the one we want.
842 cur_freq
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_GET_TSC_KHZ
) ?
843 kvm_vcpu_ioctl(cs
, KVM_GET_TSC_KHZ
) :
845 if (cur_freq
<= 0 || cur_freq
!= env
->tsc_khz
) {
846 warn_report("TSC frequency mismatch between "
847 "VM (%" PRId64
" kHz) and host (%d kHz), "
848 "and TSC scaling unavailable",
849 env
->tsc_khz
, cur_freq
);
857 static bool tsc_is_stable_and_known(CPUX86State
*env
)
862 return (env
->features
[FEAT_8000_0007_EDX
] & CPUID_APM_INVTSC
)
863 || env
->user_tsc_khz
;
866 #define DEFAULT_EVMCS_VERSION ((1 << 8) | 1)
875 uint64_t dependencies
;
876 } kvm_hyperv_properties
[] = {
877 [HYPERV_FEAT_RELAXED
] = {
878 .desc
= "relaxed timing (hv-relaxed)",
880 {.func
= HV_CPUID_ENLIGHTMENT_INFO
, .reg
= R_EAX
,
881 .bits
= HV_RELAXED_TIMING_RECOMMENDED
}
884 [HYPERV_FEAT_VAPIC
] = {
885 .desc
= "virtual APIC (hv-vapic)",
887 {.func
= HV_CPUID_FEATURES
, .reg
= R_EAX
,
888 .bits
= HV_APIC_ACCESS_AVAILABLE
}
891 [HYPERV_FEAT_TIME
] = {
892 .desc
= "clocksources (hv-time)",
894 {.func
= HV_CPUID_FEATURES
, .reg
= R_EAX
,
895 .bits
= HV_TIME_REF_COUNT_AVAILABLE
| HV_REFERENCE_TSC_AVAILABLE
}
898 [HYPERV_FEAT_CRASH
] = {
899 .desc
= "crash MSRs (hv-crash)",
901 {.func
= HV_CPUID_FEATURES
, .reg
= R_EDX
,
902 .bits
= HV_GUEST_CRASH_MSR_AVAILABLE
}
905 [HYPERV_FEAT_RESET
] = {
906 .desc
= "reset MSR (hv-reset)",
908 {.func
= HV_CPUID_FEATURES
, .reg
= R_EAX
,
909 .bits
= HV_RESET_AVAILABLE
}
912 [HYPERV_FEAT_VPINDEX
] = {
913 .desc
= "VP_INDEX MSR (hv-vpindex)",
915 {.func
= HV_CPUID_FEATURES
, .reg
= R_EAX
,
916 .bits
= HV_VP_INDEX_AVAILABLE
}
919 [HYPERV_FEAT_RUNTIME
] = {
920 .desc
= "VP_RUNTIME MSR (hv-runtime)",
922 {.func
= HV_CPUID_FEATURES
, .reg
= R_EAX
,
923 .bits
= HV_VP_RUNTIME_AVAILABLE
}
926 [HYPERV_FEAT_SYNIC
] = {
927 .desc
= "synthetic interrupt controller (hv-synic)",
929 {.func
= HV_CPUID_FEATURES
, .reg
= R_EAX
,
930 .bits
= HV_SYNIC_AVAILABLE
}
933 [HYPERV_FEAT_STIMER
] = {
934 .desc
= "synthetic timers (hv-stimer)",
936 {.func
= HV_CPUID_FEATURES
, .reg
= R_EAX
,
937 .bits
= HV_SYNTIMERS_AVAILABLE
}
939 .dependencies
= BIT(HYPERV_FEAT_SYNIC
) | BIT(HYPERV_FEAT_TIME
)
941 [HYPERV_FEAT_FREQUENCIES
] = {
942 .desc
= "frequency MSRs (hv-frequencies)",
944 {.func
= HV_CPUID_FEATURES
, .reg
= R_EAX
,
945 .bits
= HV_ACCESS_FREQUENCY_MSRS
},
946 {.func
= HV_CPUID_FEATURES
, .reg
= R_EDX
,
947 .bits
= HV_FREQUENCY_MSRS_AVAILABLE
}
950 [HYPERV_FEAT_REENLIGHTENMENT
] = {
951 .desc
= "reenlightenment MSRs (hv-reenlightenment)",
953 {.func
= HV_CPUID_FEATURES
, .reg
= R_EAX
,
954 .bits
= HV_ACCESS_REENLIGHTENMENTS_CONTROL
}
957 [HYPERV_FEAT_TLBFLUSH
] = {
958 .desc
= "paravirtualized TLB flush (hv-tlbflush)",
960 {.func
= HV_CPUID_ENLIGHTMENT_INFO
, .reg
= R_EAX
,
961 .bits
= HV_REMOTE_TLB_FLUSH_RECOMMENDED
|
962 HV_EX_PROCESSOR_MASKS_RECOMMENDED
}
964 .dependencies
= BIT(HYPERV_FEAT_VPINDEX
)
966 [HYPERV_FEAT_EVMCS
] = {
967 .desc
= "enlightened VMCS (hv-evmcs)",
969 {.func
= HV_CPUID_ENLIGHTMENT_INFO
, .reg
= R_EAX
,
970 .bits
= HV_ENLIGHTENED_VMCS_RECOMMENDED
}
972 .dependencies
= BIT(HYPERV_FEAT_VAPIC
)
974 [HYPERV_FEAT_IPI
] = {
975 .desc
= "paravirtualized IPI (hv-ipi)",
977 {.func
= HV_CPUID_ENLIGHTMENT_INFO
, .reg
= R_EAX
,
978 .bits
= HV_CLUSTER_IPI_RECOMMENDED
|
979 HV_EX_PROCESSOR_MASKS_RECOMMENDED
}
981 .dependencies
= BIT(HYPERV_FEAT_VPINDEX
)
983 [HYPERV_FEAT_STIMER_DIRECT
] = {
984 .desc
= "direct mode synthetic timers (hv-stimer-direct)",
986 {.func
= HV_CPUID_FEATURES
, .reg
= R_EDX
,
987 .bits
= HV_STIMER_DIRECT_MODE_AVAILABLE
}
989 .dependencies
= BIT(HYPERV_FEAT_STIMER
)
991 [HYPERV_FEAT_AVIC
] = {
992 .desc
= "AVIC/APICv support (hv-avic/hv-apicv)",
994 {.func
= HV_CPUID_ENLIGHTMENT_INFO
, .reg
= R_EAX
,
995 .bits
= HV_DEPRECATING_AEOI_RECOMMENDED
}
999 [HYPERV_FEAT_SYNDBG
] = {
1000 .desc
= "Enable synthetic kernel debugger channel (hv-syndbg)",
1002 {.func
= HV_CPUID_FEATURES
, .reg
= R_EDX
,
1003 .bits
= HV_FEATURE_DEBUG_MSRS_AVAILABLE
}
1005 .dependencies
= BIT(HYPERV_FEAT_SYNIC
) | BIT(HYPERV_FEAT_RELAXED
)
1008 [HYPERV_FEAT_MSR_BITMAP
] = {
1009 .desc
= "enlightened MSR-Bitmap (hv-emsr-bitmap)",
1011 {.func
= HV_CPUID_NESTED_FEATURES
, .reg
= R_EAX
,
1012 .bits
= HV_NESTED_MSR_BITMAP
}
1015 [HYPERV_FEAT_XMM_INPUT
] = {
1016 .desc
= "XMM fast hypercall input (hv-xmm-input)",
1018 {.func
= HV_CPUID_FEATURES
, .reg
= R_EDX
,
1019 .bits
= HV_HYPERCALL_XMM_INPUT_AVAILABLE
}
1022 [HYPERV_FEAT_TLBFLUSH_EXT
] = {
1023 .desc
= "Extended gva ranges for TLB flush hypercalls (hv-tlbflush-ext)",
1025 {.func
= HV_CPUID_FEATURES
, .reg
= R_EDX
,
1026 .bits
= HV_EXT_GVA_RANGES_FLUSH_AVAILABLE
}
1028 .dependencies
= BIT(HYPERV_FEAT_TLBFLUSH
)
1030 [HYPERV_FEAT_TLBFLUSH_DIRECT
] = {
1031 .desc
= "direct TLB flush (hv-tlbflush-direct)",
1033 {.func
= HV_CPUID_NESTED_FEATURES
, .reg
= R_EAX
,
1034 .bits
= HV_NESTED_DIRECT_FLUSH
}
1036 .dependencies
= BIT(HYPERV_FEAT_VAPIC
)
1040 static struct kvm_cpuid2
*try_get_hv_cpuid(CPUState
*cs
, int max
,
1043 struct kvm_cpuid2
*cpuid
;
1046 size
= sizeof(*cpuid
) + max
* sizeof(*cpuid
->entries
);
1047 cpuid
= g_malloc0(size
);
1051 r
= kvm_ioctl(kvm_state
, KVM_GET_SUPPORTED_HV_CPUID
, cpuid
);
1053 r
= kvm_vcpu_ioctl(cs
, KVM_GET_SUPPORTED_HV_CPUID
, cpuid
);
1055 if (r
== 0 && cpuid
->nent
>= max
) {
1063 fprintf(stderr
, "KVM_GET_SUPPORTED_HV_CPUID failed: %s\n",
1072 * Run KVM_GET_SUPPORTED_HV_CPUID ioctl(), allocating a buffer large enough
1075 static struct kvm_cpuid2
*get_supported_hv_cpuid(CPUState
*cs
)
1077 struct kvm_cpuid2
*cpuid
;
1078 /* 0x40000000..0x40000005, 0x4000000A, 0x40000080..0x40000082 leaves */
1084 kvm_check_extension(kvm_state
, KVM_CAP_SYS_HYPERV_CPUID
) > 0;
1087 * Non-empty KVM context is needed when KVM_CAP_SYS_HYPERV_CPUID is
1088 * unsupported, kvm_hyperv_expand_features() checks for that.
1090 assert(do_sys_ioctl
|| cs
->kvm_state
);
1093 * When the buffer is too small, KVM_GET_SUPPORTED_HV_CPUID fails with
1094 * -E2BIG, however, it doesn't report back the right size. Keep increasing
1095 * it and re-trying until we succeed.
1097 while ((cpuid
= try_get_hv_cpuid(cs
, max
, do_sys_ioctl
)) == NULL
) {
1102 * KVM_GET_SUPPORTED_HV_CPUID does not set EVMCS CPUID bit before
1103 * KVM_CAP_HYPERV_ENLIGHTENED_VMCS is enabled but we want to get the
1104 * information early, just check for the capability and set the bit
1107 if (!do_sys_ioctl
&& kvm_check_extension(cs
->kvm_state
,
1108 KVM_CAP_HYPERV_ENLIGHTENED_VMCS
) > 0) {
1109 for (i
= 0; i
< cpuid
->nent
; i
++) {
1110 if (cpuid
->entries
[i
].function
== HV_CPUID_ENLIGHTMENT_INFO
) {
1111 cpuid
->entries
[i
].eax
|= HV_ENLIGHTENED_VMCS_RECOMMENDED
;
1120 * When KVM_GET_SUPPORTED_HV_CPUID is not supported we fill CPUID feature
1121 * leaves from KVM_CAP_HYPERV* and present MSRs data.
1123 static struct kvm_cpuid2
*get_supported_hv_cpuid_legacy(CPUState
*cs
)
1125 X86CPU
*cpu
= X86_CPU(cs
);
1126 struct kvm_cpuid2
*cpuid
;
1127 struct kvm_cpuid_entry2
*entry_feat
, *entry_recomm
;
1129 /* HV_CPUID_FEATURES, HV_CPUID_ENLIGHTMENT_INFO */
1130 cpuid
= g_malloc0(sizeof(*cpuid
) + 2 * sizeof(*cpuid
->entries
));
1133 /* HV_CPUID_VENDOR_AND_MAX_FUNCTIONS */
1134 entry_feat
= &cpuid
->entries
[0];
1135 entry_feat
->function
= HV_CPUID_FEATURES
;
1137 entry_recomm
= &cpuid
->entries
[1];
1138 entry_recomm
->function
= HV_CPUID_ENLIGHTMENT_INFO
;
1139 entry_recomm
->ebx
= cpu
->hyperv_spinlock_attempts
;
1141 if (kvm_check_extension(cs
->kvm_state
, KVM_CAP_HYPERV
) > 0) {
1142 entry_feat
->eax
|= HV_HYPERCALL_AVAILABLE
;
1143 entry_feat
->eax
|= HV_APIC_ACCESS_AVAILABLE
;
1144 entry_feat
->edx
|= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE
;
1145 entry_recomm
->eax
|= HV_RELAXED_TIMING_RECOMMENDED
;
1146 entry_recomm
->eax
|= HV_APIC_ACCESS_RECOMMENDED
;
1149 if (kvm_check_extension(cs
->kvm_state
, KVM_CAP_HYPERV_TIME
) > 0) {
1150 entry_feat
->eax
|= HV_TIME_REF_COUNT_AVAILABLE
;
1151 entry_feat
->eax
|= HV_REFERENCE_TSC_AVAILABLE
;
1154 if (has_msr_hv_frequencies
) {
1155 entry_feat
->eax
|= HV_ACCESS_FREQUENCY_MSRS
;
1156 entry_feat
->edx
|= HV_FREQUENCY_MSRS_AVAILABLE
;
1159 if (has_msr_hv_crash
) {
1160 entry_feat
->edx
|= HV_GUEST_CRASH_MSR_AVAILABLE
;
1163 if (has_msr_hv_reenlightenment
) {
1164 entry_feat
->eax
|= HV_ACCESS_REENLIGHTENMENTS_CONTROL
;
1167 if (has_msr_hv_reset
) {
1168 entry_feat
->eax
|= HV_RESET_AVAILABLE
;
1171 if (has_msr_hv_vpindex
) {
1172 entry_feat
->eax
|= HV_VP_INDEX_AVAILABLE
;
1175 if (has_msr_hv_runtime
) {
1176 entry_feat
->eax
|= HV_VP_RUNTIME_AVAILABLE
;
1179 if (has_msr_hv_synic
) {
1180 unsigned int cap
= cpu
->hyperv_synic_kvm_only
?
1181 KVM_CAP_HYPERV_SYNIC
: KVM_CAP_HYPERV_SYNIC2
;
1183 if (kvm_check_extension(cs
->kvm_state
, cap
) > 0) {
1184 entry_feat
->eax
|= HV_SYNIC_AVAILABLE
;
1188 if (has_msr_hv_stimer
) {
1189 entry_feat
->eax
|= HV_SYNTIMERS_AVAILABLE
;
1192 if (has_msr_hv_syndbg_options
) {
1193 entry_feat
->edx
|= HV_GUEST_DEBUGGING_AVAILABLE
;
1194 entry_feat
->edx
|= HV_FEATURE_DEBUG_MSRS_AVAILABLE
;
1195 entry_feat
->ebx
|= HV_PARTITION_DEBUGGING_ALLOWED
;
1198 if (kvm_check_extension(cs
->kvm_state
,
1199 KVM_CAP_HYPERV_TLBFLUSH
) > 0) {
1200 entry_recomm
->eax
|= HV_REMOTE_TLB_FLUSH_RECOMMENDED
;
1201 entry_recomm
->eax
|= HV_EX_PROCESSOR_MASKS_RECOMMENDED
;
1204 if (kvm_check_extension(cs
->kvm_state
,
1205 KVM_CAP_HYPERV_ENLIGHTENED_VMCS
) > 0) {
1206 entry_recomm
->eax
|= HV_ENLIGHTENED_VMCS_RECOMMENDED
;
1209 if (kvm_check_extension(cs
->kvm_state
,
1210 KVM_CAP_HYPERV_SEND_IPI
) > 0) {
1211 entry_recomm
->eax
|= HV_CLUSTER_IPI_RECOMMENDED
;
1212 entry_recomm
->eax
|= HV_EX_PROCESSOR_MASKS_RECOMMENDED
;
1218 static uint32_t hv_cpuid_get_host(CPUState
*cs
, uint32_t func
, int reg
)
1220 struct kvm_cpuid_entry2
*entry
;
1221 struct kvm_cpuid2
*cpuid
;
1223 if (hv_cpuid_cache
) {
1224 cpuid
= hv_cpuid_cache
;
1226 if (kvm_check_extension(kvm_state
, KVM_CAP_HYPERV_CPUID
) > 0) {
1227 cpuid
= get_supported_hv_cpuid(cs
);
1230 * 'cs->kvm_state' may be NULL when Hyper-V features are expanded
1231 * before KVM context is created but this is only done when
1232 * KVM_CAP_SYS_HYPERV_CPUID is supported and it implies
1233 * KVM_CAP_HYPERV_CPUID.
1235 assert(cs
->kvm_state
);
1237 cpuid
= get_supported_hv_cpuid_legacy(cs
);
1239 hv_cpuid_cache
= cpuid
;
1246 entry
= cpuid_find_entry(cpuid
, func
, 0);
1251 return cpuid_entry_get_reg(entry
, reg
);
1254 static bool hyperv_feature_supported(CPUState
*cs
, int feature
)
1256 uint32_t func
, bits
;
1259 for (i
= 0; i
< ARRAY_SIZE(kvm_hyperv_properties
[feature
].flags
); i
++) {
1261 func
= kvm_hyperv_properties
[feature
].flags
[i
].func
;
1262 reg
= kvm_hyperv_properties
[feature
].flags
[i
].reg
;
1263 bits
= kvm_hyperv_properties
[feature
].flags
[i
].bits
;
1269 if ((hv_cpuid_get_host(cs
, func
, reg
) & bits
) != bits
) {
1277 /* Checks that all feature dependencies are enabled */
1278 static bool hv_feature_check_deps(X86CPU
*cpu
, int feature
, Error
**errp
)
1283 deps
= kvm_hyperv_properties
[feature
].dependencies
;
1285 dep_feat
= ctz64(deps
);
1286 if (!(hyperv_feat_enabled(cpu
, dep_feat
))) {
1287 error_setg(errp
, "Hyper-V %s requires Hyper-V %s",
1288 kvm_hyperv_properties
[feature
].desc
,
1289 kvm_hyperv_properties
[dep_feat
].desc
);
1292 deps
&= ~(1ull << dep_feat
);
1298 static uint32_t hv_build_cpuid_leaf(CPUState
*cs
, uint32_t func
, int reg
)
1300 X86CPU
*cpu
= X86_CPU(cs
);
1304 for (i
= 0; i
< ARRAY_SIZE(kvm_hyperv_properties
); i
++) {
1305 if (!hyperv_feat_enabled(cpu
, i
)) {
1309 for (j
= 0; j
< ARRAY_SIZE(kvm_hyperv_properties
[i
].flags
); j
++) {
1310 if (kvm_hyperv_properties
[i
].flags
[j
].func
!= func
) {
1313 if (kvm_hyperv_properties
[i
].flags
[j
].reg
!= reg
) {
1317 r
|= kvm_hyperv_properties
[i
].flags
[j
].bits
;
1321 /* HV_CPUID_NESTED_FEATURES.EAX also encodes the supported eVMCS range */
1322 if (func
== HV_CPUID_NESTED_FEATURES
&& reg
== R_EAX
) {
1323 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_EVMCS
)) {
1324 r
|= DEFAULT_EVMCS_VERSION
;
1332 * Expand Hyper-V CPU features. In partucular, check that all the requested
1333 * features are supported by the host and the sanity of the configuration
1334 * (that all the required dependencies are included). Also, this takes care
1335 * of 'hv_passthrough' mode and fills the environment with all supported
1338 bool kvm_hyperv_expand_features(X86CPU
*cpu
, Error
**errp
)
1340 CPUState
*cs
= CPU(cpu
);
1341 Error
*local_err
= NULL
;
1344 if (!hyperv_enabled(cpu
))
1348 * When kvm_hyperv_expand_features is called at CPU feature expansion
1349 * time per-CPU kvm_state is not available yet so we can only proceed
1350 * when KVM_CAP_SYS_HYPERV_CPUID is supported.
1352 if (!cs
->kvm_state
&&
1353 !kvm_check_extension(kvm_state
, KVM_CAP_SYS_HYPERV_CPUID
))
1356 if (cpu
->hyperv_passthrough
) {
1357 cpu
->hyperv_vendor_id
[0] =
1358 hv_cpuid_get_host(cs
, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS
, R_EBX
);
1359 cpu
->hyperv_vendor_id
[1] =
1360 hv_cpuid_get_host(cs
, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS
, R_ECX
);
1361 cpu
->hyperv_vendor_id
[2] =
1362 hv_cpuid_get_host(cs
, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS
, R_EDX
);
1363 cpu
->hyperv_vendor
= g_realloc(cpu
->hyperv_vendor
,
1364 sizeof(cpu
->hyperv_vendor_id
) + 1);
1365 memcpy(cpu
->hyperv_vendor
, cpu
->hyperv_vendor_id
,
1366 sizeof(cpu
->hyperv_vendor_id
));
1367 cpu
->hyperv_vendor
[sizeof(cpu
->hyperv_vendor_id
)] = 0;
1369 cpu
->hyperv_interface_id
[0] =
1370 hv_cpuid_get_host(cs
, HV_CPUID_INTERFACE
, R_EAX
);
1371 cpu
->hyperv_interface_id
[1] =
1372 hv_cpuid_get_host(cs
, HV_CPUID_INTERFACE
, R_EBX
);
1373 cpu
->hyperv_interface_id
[2] =
1374 hv_cpuid_get_host(cs
, HV_CPUID_INTERFACE
, R_ECX
);
1375 cpu
->hyperv_interface_id
[3] =
1376 hv_cpuid_get_host(cs
, HV_CPUID_INTERFACE
, R_EDX
);
1378 cpu
->hyperv_ver_id_build
=
1379 hv_cpuid_get_host(cs
, HV_CPUID_VERSION
, R_EAX
);
1380 cpu
->hyperv_ver_id_major
=
1381 hv_cpuid_get_host(cs
, HV_CPUID_VERSION
, R_EBX
) >> 16;
1382 cpu
->hyperv_ver_id_minor
=
1383 hv_cpuid_get_host(cs
, HV_CPUID_VERSION
, R_EBX
) & 0xffff;
1384 cpu
->hyperv_ver_id_sp
=
1385 hv_cpuid_get_host(cs
, HV_CPUID_VERSION
, R_ECX
);
1386 cpu
->hyperv_ver_id_sb
=
1387 hv_cpuid_get_host(cs
, HV_CPUID_VERSION
, R_EDX
) >> 24;
1388 cpu
->hyperv_ver_id_sn
=
1389 hv_cpuid_get_host(cs
, HV_CPUID_VERSION
, R_EDX
) & 0xffffff;
1391 cpu
->hv_max_vps
= hv_cpuid_get_host(cs
, HV_CPUID_IMPLEMENT_LIMITS
,
1393 cpu
->hyperv_limits
[0] =
1394 hv_cpuid_get_host(cs
, HV_CPUID_IMPLEMENT_LIMITS
, R_EBX
);
1395 cpu
->hyperv_limits
[1] =
1396 hv_cpuid_get_host(cs
, HV_CPUID_IMPLEMENT_LIMITS
, R_ECX
);
1397 cpu
->hyperv_limits
[2] =
1398 hv_cpuid_get_host(cs
, HV_CPUID_IMPLEMENT_LIMITS
, R_EDX
);
1400 cpu
->hyperv_spinlock_attempts
=
1401 hv_cpuid_get_host(cs
, HV_CPUID_ENLIGHTMENT_INFO
, R_EBX
);
1404 * Mark feature as enabled in 'cpu->hyperv_features' as
1405 * hv_build_cpuid_leaf() uses this info to build guest CPUIDs.
1407 for (feat
= 0; feat
< ARRAY_SIZE(kvm_hyperv_properties
); feat
++) {
1408 if (hyperv_feature_supported(cs
, feat
)) {
1409 cpu
->hyperv_features
|= BIT(feat
);
1413 /* Check features availability and dependencies */
1414 for (feat
= 0; feat
< ARRAY_SIZE(kvm_hyperv_properties
); feat
++) {
1415 /* If the feature was not requested skip it. */
1416 if (!hyperv_feat_enabled(cpu
, feat
)) {
1420 /* Check if the feature is supported by KVM */
1421 if (!hyperv_feature_supported(cs
, feat
)) {
1422 error_setg(errp
, "Hyper-V %s is not supported by kernel",
1423 kvm_hyperv_properties
[feat
].desc
);
1427 /* Check dependencies */
1428 if (!hv_feature_check_deps(cpu
, feat
, &local_err
)) {
1429 error_propagate(errp
, local_err
);
1435 /* Additional dependencies not covered by kvm_hyperv_properties[] */
1436 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNIC
) &&
1437 !cpu
->hyperv_synic_kvm_only
&&
1438 !hyperv_feat_enabled(cpu
, HYPERV_FEAT_VPINDEX
)) {
1439 error_setg(errp
, "Hyper-V %s requires Hyper-V %s",
1440 kvm_hyperv_properties
[HYPERV_FEAT_SYNIC
].desc
,
1441 kvm_hyperv_properties
[HYPERV_FEAT_VPINDEX
].desc
);
1449 * Fill in Hyper-V CPUIDs. Returns the number of entries filled in cpuid_ent.
1451 static int hyperv_fill_cpuids(CPUState
*cs
,
1452 struct kvm_cpuid_entry2
*cpuid_ent
)
1454 X86CPU
*cpu
= X86_CPU(cs
);
1455 struct kvm_cpuid_entry2
*c
;
1456 uint32_t signature
[3];
1457 uint32_t cpuid_i
= 0, max_cpuid_leaf
= 0;
1458 uint32_t nested_eax
=
1459 hv_build_cpuid_leaf(cs
, HV_CPUID_NESTED_FEATURES
, R_EAX
);
1461 max_cpuid_leaf
= nested_eax
? HV_CPUID_NESTED_FEATURES
:
1462 HV_CPUID_IMPLEMENT_LIMITS
;
1464 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNDBG
)) {
1466 MAX(max_cpuid_leaf
, HV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
);
1469 c
= &cpuid_ent
[cpuid_i
++];
1470 c
->function
= HV_CPUID_VENDOR_AND_MAX_FUNCTIONS
;
1471 c
->eax
= max_cpuid_leaf
;
1472 c
->ebx
= cpu
->hyperv_vendor_id
[0];
1473 c
->ecx
= cpu
->hyperv_vendor_id
[1];
1474 c
->edx
= cpu
->hyperv_vendor_id
[2];
1476 c
= &cpuid_ent
[cpuid_i
++];
1477 c
->function
= HV_CPUID_INTERFACE
;
1478 c
->eax
= cpu
->hyperv_interface_id
[0];
1479 c
->ebx
= cpu
->hyperv_interface_id
[1];
1480 c
->ecx
= cpu
->hyperv_interface_id
[2];
1481 c
->edx
= cpu
->hyperv_interface_id
[3];
1483 c
= &cpuid_ent
[cpuid_i
++];
1484 c
->function
= HV_CPUID_VERSION
;
1485 c
->eax
= cpu
->hyperv_ver_id_build
;
1486 c
->ebx
= (uint32_t)cpu
->hyperv_ver_id_major
<< 16 |
1487 cpu
->hyperv_ver_id_minor
;
1488 c
->ecx
= cpu
->hyperv_ver_id_sp
;
1489 c
->edx
= (uint32_t)cpu
->hyperv_ver_id_sb
<< 24 |
1490 (cpu
->hyperv_ver_id_sn
& 0xffffff);
1492 c
= &cpuid_ent
[cpuid_i
++];
1493 c
->function
= HV_CPUID_FEATURES
;
1494 c
->eax
= hv_build_cpuid_leaf(cs
, HV_CPUID_FEATURES
, R_EAX
);
1495 c
->ebx
= hv_build_cpuid_leaf(cs
, HV_CPUID_FEATURES
, R_EBX
);
1496 c
->edx
= hv_build_cpuid_leaf(cs
, HV_CPUID_FEATURES
, R_EDX
);
1498 /* Unconditionally required with any Hyper-V enlightenment */
1499 c
->eax
|= HV_HYPERCALL_AVAILABLE
;
1501 /* SynIC and Vmbus devices require messages/signals hypercalls */
1502 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNIC
) &&
1503 !cpu
->hyperv_synic_kvm_only
) {
1504 c
->ebx
|= HV_POST_MESSAGES
| HV_SIGNAL_EVENTS
;
1508 /* Not exposed by KVM but needed to make CPU hotplug in Windows work */
1509 c
->edx
|= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE
;
1511 c
= &cpuid_ent
[cpuid_i
++];
1512 c
->function
= HV_CPUID_ENLIGHTMENT_INFO
;
1513 c
->eax
= hv_build_cpuid_leaf(cs
, HV_CPUID_ENLIGHTMENT_INFO
, R_EAX
);
1514 c
->ebx
= cpu
->hyperv_spinlock_attempts
;
1516 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_VAPIC
) &&
1517 !hyperv_feat_enabled(cpu
, HYPERV_FEAT_AVIC
)) {
1518 c
->eax
|= HV_APIC_ACCESS_RECOMMENDED
;
1521 if (cpu
->hyperv_no_nonarch_cs
== ON_OFF_AUTO_ON
) {
1522 c
->eax
|= HV_NO_NONARCH_CORESHARING
;
1523 } else if (cpu
->hyperv_no_nonarch_cs
== ON_OFF_AUTO_AUTO
) {
1524 c
->eax
|= hv_cpuid_get_host(cs
, HV_CPUID_ENLIGHTMENT_INFO
, R_EAX
) &
1525 HV_NO_NONARCH_CORESHARING
;
1528 c
= &cpuid_ent
[cpuid_i
++];
1529 c
->function
= HV_CPUID_IMPLEMENT_LIMITS
;
1530 c
->eax
= cpu
->hv_max_vps
;
1531 c
->ebx
= cpu
->hyperv_limits
[0];
1532 c
->ecx
= cpu
->hyperv_limits
[1];
1533 c
->edx
= cpu
->hyperv_limits
[2];
1538 /* Create zeroed 0x40000006..0x40000009 leaves */
1539 for (function
= HV_CPUID_IMPLEMENT_LIMITS
+ 1;
1540 function
< HV_CPUID_NESTED_FEATURES
; function
++) {
1541 c
= &cpuid_ent
[cpuid_i
++];
1542 c
->function
= function
;
1545 c
= &cpuid_ent
[cpuid_i
++];
1546 c
->function
= HV_CPUID_NESTED_FEATURES
;
1547 c
->eax
= nested_eax
;
1550 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNDBG
)) {
1551 c
= &cpuid_ent
[cpuid_i
++];
1552 c
->function
= HV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS
;
1553 c
->eax
= hyperv_feat_enabled(cpu
, HYPERV_FEAT_EVMCS
) ?
1554 HV_CPUID_NESTED_FEATURES
: HV_CPUID_IMPLEMENT_LIMITS
;
1555 memcpy(signature
, "Microsoft VS", 12);
1557 c
->ebx
= signature
[0];
1558 c
->ecx
= signature
[1];
1559 c
->edx
= signature
[2];
1561 c
= &cpuid_ent
[cpuid_i
++];
1562 c
->function
= HV_CPUID_SYNDBG_INTERFACE
;
1563 memcpy(signature
, "VS#1\0\0\0\0\0\0\0\0", 12);
1564 c
->eax
= signature
[0];
1569 c
= &cpuid_ent
[cpuid_i
++];
1570 c
->function
= HV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
;
1571 c
->eax
= HV_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING
;
1580 static Error
*hv_passthrough_mig_blocker
;
1581 static Error
*hv_no_nonarch_cs_mig_blocker
;
1583 /* Checks that the exposed eVMCS version range is supported by KVM */
1584 static bool evmcs_version_supported(uint16_t evmcs_version
,
1585 uint16_t supported_evmcs_version
)
1587 uint8_t min_version
= evmcs_version
& 0xff;
1588 uint8_t max_version
= evmcs_version
>> 8;
1589 uint8_t min_supported_version
= supported_evmcs_version
& 0xff;
1590 uint8_t max_supported_version
= supported_evmcs_version
>> 8;
1592 return (min_version
>= min_supported_version
) &&
1593 (max_version
<= max_supported_version
);
1596 static int hyperv_init_vcpu(X86CPU
*cpu
)
1598 CPUState
*cs
= CPU(cpu
);
1599 Error
*local_err
= NULL
;
1602 if (cpu
->hyperv_passthrough
&& hv_passthrough_mig_blocker
== NULL
) {
1603 error_setg(&hv_passthrough_mig_blocker
,
1604 "'hv-passthrough' CPU flag prevents migration, use explicit"
1605 " set of hv-* flags instead");
1606 ret
= migrate_add_blocker(&hv_passthrough_mig_blocker
, &local_err
);
1608 error_report_err(local_err
);
1613 if (cpu
->hyperv_no_nonarch_cs
== ON_OFF_AUTO_AUTO
&&
1614 hv_no_nonarch_cs_mig_blocker
== NULL
) {
1615 error_setg(&hv_no_nonarch_cs_mig_blocker
,
1616 "'hv-no-nonarch-coresharing=auto' CPU flag prevents migration"
1617 " use explicit 'hv-no-nonarch-coresharing=on' instead (but"
1618 " make sure SMT is disabled and/or that vCPUs are properly"
1620 ret
= migrate_add_blocker(&hv_no_nonarch_cs_mig_blocker
, &local_err
);
1622 error_report_err(local_err
);
1627 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_VPINDEX
) && !hv_vpindex_settable
) {
1629 * the kernel doesn't support setting vp_index; assert that its value
1634 ret
= kvm_get_one_msr(cpu
, HV_X64_MSR_VP_INDEX
, &value
);
1639 if (value
!= hyperv_vp_index(CPU(cpu
))) {
1640 error_report("kernel's vp_index != QEMU's vp_index");
1645 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNIC
)) {
1646 uint32_t synic_cap
= cpu
->hyperv_synic_kvm_only
?
1647 KVM_CAP_HYPERV_SYNIC
: KVM_CAP_HYPERV_SYNIC2
;
1648 ret
= kvm_vcpu_enable_cap(cs
, synic_cap
, 0);
1650 error_report("failed to turn on HyperV SynIC in KVM: %s",
1655 if (!cpu
->hyperv_synic_kvm_only
) {
1656 ret
= hyperv_x86_synic_add(cpu
);
1658 error_report("failed to create HyperV SynIC: %s",
1665 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_EVMCS
)) {
1666 uint16_t evmcs_version
= DEFAULT_EVMCS_VERSION
;
1667 uint16_t supported_evmcs_version
;
1669 ret
= kvm_vcpu_enable_cap(cs
, KVM_CAP_HYPERV_ENLIGHTENED_VMCS
, 0,
1670 (uintptr_t)&supported_evmcs_version
);
1673 * KVM is required to support EVMCS ver.1. as that's what 'hv-evmcs'
1674 * option sets. Note: we hardcode the maximum supported eVMCS version
1675 * to '1' as well so 'hv-evmcs' feature is migratable even when (and if)
1676 * ver.2 is implemented. A new option (e.g. 'hv-evmcs=2') will then have
1680 error_report("Hyper-V %s is not supported by kernel",
1681 kvm_hyperv_properties
[HYPERV_FEAT_EVMCS
].desc
);
1685 if (!evmcs_version_supported(evmcs_version
, supported_evmcs_version
)) {
1686 error_report("eVMCS version range [%d..%d] is not supported by "
1687 "kernel (supported: [%d..%d])", evmcs_version
& 0xff,
1688 evmcs_version
>> 8, supported_evmcs_version
& 0xff,
1689 supported_evmcs_version
>> 8);
1694 if (cpu
->hyperv_enforce_cpuid
) {
1695 ret
= kvm_vcpu_enable_cap(cs
, KVM_CAP_HYPERV_ENFORCE_CPUID
, 0, 1);
1697 error_report("failed to enable KVM_CAP_HYPERV_ENFORCE_CPUID: %s",
1706 static Error
*invtsc_mig_blocker
;
1708 #define KVM_MAX_CPUID_ENTRIES 100
1710 static void kvm_init_xsave(CPUX86State
*env
)
1713 env
->xsave_buf_len
= QEMU_ALIGN_UP(has_xsave2
, 4096);
1714 } else if (has_xsave
) {
1715 env
->xsave_buf_len
= sizeof(struct kvm_xsave
);
1720 env
->xsave_buf
= qemu_memalign(4096, env
->xsave_buf_len
);
1721 memset(env
->xsave_buf
, 0, env
->xsave_buf_len
);
1723 * The allocated storage must be large enough for all of the
1724 * possible XSAVE state components.
1726 assert(kvm_arch_get_supported_cpuid(kvm_state
, 0xd, 0, R_ECX
) <=
1727 env
->xsave_buf_len
);
1730 static void kvm_init_nested_state(CPUX86State
*env
)
1732 struct kvm_vmx_nested_state_hdr
*vmx_hdr
;
1735 if (!env
->nested_state
) {
1739 size
= env
->nested_state
->size
;
1741 memset(env
->nested_state
, 0, size
);
1742 env
->nested_state
->size
= size
;
1744 if (cpu_has_vmx(env
)) {
1745 env
->nested_state
->format
= KVM_STATE_NESTED_FORMAT_VMX
;
1746 vmx_hdr
= &env
->nested_state
->hdr
.vmx
;
1747 vmx_hdr
->vmxon_pa
= -1ull;
1748 vmx_hdr
->vmcs12_pa
= -1ull;
1749 } else if (cpu_has_svm(env
)) {
1750 env
->nested_state
->format
= KVM_STATE_NESTED_FORMAT_SVM
;
1754 int kvm_arch_init_vcpu(CPUState
*cs
)
1757 struct kvm_cpuid2 cpuid
;
1758 struct kvm_cpuid_entry2 entries
[KVM_MAX_CPUID_ENTRIES
];
1761 * The kernel defines these structs with padding fields so there
1762 * should be no extra padding in our cpuid_data struct.
1764 QEMU_BUILD_BUG_ON(sizeof(cpuid_data
) !=
1765 sizeof(struct kvm_cpuid2
) +
1766 sizeof(struct kvm_cpuid_entry2
) * KVM_MAX_CPUID_ENTRIES
);
1768 X86CPU
*cpu
= X86_CPU(cs
);
1769 CPUX86State
*env
= &cpu
->env
;
1770 uint32_t limit
, i
, j
, cpuid_i
;
1772 struct kvm_cpuid_entry2
*c
;
1773 uint32_t signature
[3];
1774 int kvm_base
= KVM_CPUID_SIGNATURE
;
1775 int max_nested_state_len
;
1777 Error
*local_err
= NULL
;
1779 memset(&cpuid_data
, 0, sizeof(cpuid_data
));
1783 has_xsave2
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_XSAVE2
);
1785 r
= kvm_arch_set_tsc_khz(cs
);
1790 /* vcpu's TSC frequency is either specified by user, or following
1791 * the value used by KVM if the former is not present. In the
1792 * latter case, we query it from KVM and record in env->tsc_khz,
1793 * so that vcpu's TSC frequency can be migrated later via this field.
1795 if (!env
->tsc_khz
) {
1796 r
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_GET_TSC_KHZ
) ?
1797 kvm_vcpu_ioctl(cs
, KVM_GET_TSC_KHZ
) :
1804 env
->apic_bus_freq
= KVM_APIC_BUS_FREQUENCY
;
1807 * kvm_hyperv_expand_features() is called here for the second time in case
1808 * KVM_CAP_SYS_HYPERV_CPUID is not supported. While we can't possibly handle
1809 * 'query-cpu-model-expansion' in this case as we don't have a KVM vCPU to
1810 * check which Hyper-V enlightenments are supported and which are not, we
1811 * can still proceed and check/expand Hyper-V enlightenments here so legacy
1812 * behavior is preserved.
1814 if (!kvm_hyperv_expand_features(cpu
, &local_err
)) {
1815 error_report_err(local_err
);
1819 if (hyperv_enabled(cpu
)) {
1820 r
= hyperv_init_vcpu(cpu
);
1825 cpuid_i
= hyperv_fill_cpuids(cs
, cpuid_data
.entries
);
1826 kvm_base
= KVM_CPUID_SIGNATURE_NEXT
;
1827 has_msr_hv_hypercall
= true;
1830 if (cs
->kvm_state
->xen_version
) {
1831 #ifdef CONFIG_XEN_EMU
1832 struct kvm_cpuid_entry2
*xen_max_leaf
;
1834 memcpy(signature
, "XenVMMXenVMM", 12);
1836 xen_max_leaf
= c
= &cpuid_data
.entries
[cpuid_i
++];
1837 c
->function
= kvm_base
+ XEN_CPUID_SIGNATURE
;
1838 c
->eax
= kvm_base
+ XEN_CPUID_TIME
;
1839 c
->ebx
= signature
[0];
1840 c
->ecx
= signature
[1];
1841 c
->edx
= signature
[2];
1843 c
= &cpuid_data
.entries
[cpuid_i
++];
1844 c
->function
= kvm_base
+ XEN_CPUID_VENDOR
;
1845 c
->eax
= cs
->kvm_state
->xen_version
;
1850 c
= &cpuid_data
.entries
[cpuid_i
++];
1851 c
->function
= kvm_base
+ XEN_CPUID_HVM_MSR
;
1852 /* Number of hypercall-transfer pages */
1854 /* Hypercall MSR base address */
1855 if (hyperv_enabled(cpu
)) {
1856 c
->ebx
= XEN_HYPERCALL_MSR_HYPERV
;
1857 kvm_xen_init(cs
->kvm_state
, c
->ebx
);
1859 c
->ebx
= XEN_HYPERCALL_MSR
;
1864 c
= &cpuid_data
.entries
[cpuid_i
++];
1865 c
->function
= kvm_base
+ XEN_CPUID_TIME
;
1866 c
->eax
= ((!!tsc_is_stable_and_known(env
) << 1) |
1867 (!!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_RDTSCP
) << 2));
1868 /* default=0 (emulate if necessary) */
1870 /* guest tsc frequency */
1871 c
->ecx
= env
->user_tsc_khz
;
1872 /* guest tsc incarnation (migration count) */
1875 c
= &cpuid_data
.entries
[cpuid_i
++];
1876 c
->function
= kvm_base
+ XEN_CPUID_HVM
;
1877 xen_max_leaf
->eax
= kvm_base
+ XEN_CPUID_HVM
;
1878 if (cs
->kvm_state
->xen_version
>= XEN_VERSION(4, 5)) {
1879 c
->function
= kvm_base
+ XEN_CPUID_HVM
;
1881 if (cpu
->xen_vapic
) {
1882 c
->eax
|= XEN_HVM_CPUID_APIC_ACCESS_VIRT
;
1883 c
->eax
|= XEN_HVM_CPUID_X2APIC_VIRT
;
1886 c
->eax
|= XEN_HVM_CPUID_IOMMU_MAPPINGS
;
1888 if (cs
->kvm_state
->xen_version
>= XEN_VERSION(4, 6)) {
1889 c
->eax
|= XEN_HVM_CPUID_VCPU_ID_PRESENT
;
1890 c
->ebx
= cs
->cpu_index
;
1894 r
= kvm_xen_init_vcpu(cs
);
1900 #else /* CONFIG_XEN_EMU */
1901 /* This should never happen as kvm_arch_init() would have died first. */
1902 fprintf(stderr
, "Cannot enable Xen CPUID without Xen support\n");
1905 } else if (cpu
->expose_kvm
) {
1906 memcpy(signature
, "KVMKVMKVM\0\0\0", 12);
1907 c
= &cpuid_data
.entries
[cpuid_i
++];
1908 c
->function
= KVM_CPUID_SIGNATURE
| kvm_base
;
1909 c
->eax
= KVM_CPUID_FEATURES
| kvm_base
;
1910 c
->ebx
= signature
[0];
1911 c
->ecx
= signature
[1];
1912 c
->edx
= signature
[2];
1914 c
= &cpuid_data
.entries
[cpuid_i
++];
1915 c
->function
= KVM_CPUID_FEATURES
| kvm_base
;
1916 c
->eax
= env
->features
[FEAT_KVM
];
1917 c
->edx
= env
->features
[FEAT_KVM_HINTS
];
1920 cpu_x86_cpuid(env
, 0, 0, &limit
, &unused
, &unused
, &unused
);
1922 if (cpu
->kvm_pv_enforce_cpuid
) {
1923 r
= kvm_vcpu_enable_cap(cs
, KVM_CAP_ENFORCE_PV_FEATURE_CPUID
, 0, 1);
1926 "failed to enable KVM_CAP_ENFORCE_PV_FEATURE_CPUID: %s",
1932 for (i
= 0; i
<= limit
; i
++) {
1933 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1934 fprintf(stderr
, "unsupported level value: 0x%x\n", limit
);
1937 c
= &cpuid_data
.entries
[cpuid_i
++];
1941 /* Keep reading function 2 till all the input is received */
1945 c
->flags
= KVM_CPUID_FLAG_STATEFUL_FUNC
|
1946 KVM_CPUID_FLAG_STATE_READ_NEXT
;
1947 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1948 times
= c
->eax
& 0xff;
1950 for (j
= 1; j
< times
; ++j
) {
1951 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1952 fprintf(stderr
, "cpuid_data is full, no space for "
1953 "cpuid(eax:2):eax & 0xf = 0x%x\n", times
);
1956 c
= &cpuid_data
.entries
[cpuid_i
++];
1958 c
->flags
= KVM_CPUID_FLAG_STATEFUL_FUNC
;
1959 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1964 if (env
->nr_dies
< 2) {
1971 for (j
= 0; ; j
++) {
1972 if (i
== 0xd && j
== 64) {
1976 if (i
== 0x1f && j
== 64) {
1981 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
1983 cpu_x86_cpuid(env
, i
, j
, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
1985 if (i
== 4 && c
->eax
== 0) {
1988 if (i
== 0xb && !(c
->ecx
& 0xff00)) {
1991 if (i
== 0x1f && !(c
->ecx
& 0xff00)) {
1994 if (i
== 0xd && c
->eax
== 0) {
1997 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
1998 fprintf(stderr
, "cpuid_data is full, no space for "
1999 "cpuid(eax:0x%x,ecx:0x%x)\n", i
, j
);
2002 c
= &cpuid_data
.entries
[cpuid_i
++];
2007 for (j
= 0; ; j
++) {
2009 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
2011 cpu_x86_cpuid(env
, i
, j
, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
2013 if (j
> 1 && (c
->eax
& 0xf) != 1) {
2017 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
2018 fprintf(stderr
, "cpuid_data is full, no space for "
2019 "cpuid(eax:0x12,ecx:0x%x)\n", j
);
2022 c
= &cpuid_data
.entries
[cpuid_i
++];
2032 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
2033 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
2036 for (j
= 1; j
<= times
; ++j
) {
2037 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
2038 fprintf(stderr
, "cpuid_data is full, no space for "
2039 "cpuid(eax:0x%x,ecx:0x%x)\n", i
, j
);
2042 c
= &cpuid_data
.entries
[cpuid_i
++];
2045 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
2046 cpu_x86_cpuid(env
, i
, j
, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
2053 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
2054 if (!c
->eax
&& !c
->ebx
&& !c
->ecx
&& !c
->edx
) {
2056 * KVM already returns all zeroes if a CPUID entry is missing,
2057 * so we can omit it and avoid hitting KVM's 80-entry limit.
2065 if (limit
>= 0x0a) {
2068 cpu_x86_cpuid(env
, 0x0a, 0, &eax
, &unused
, &unused
, &edx
);
2070 has_architectural_pmu_version
= eax
& 0xff;
2071 if (has_architectural_pmu_version
> 0) {
2072 num_architectural_pmu_gp_counters
= (eax
& 0xff00) >> 8;
2074 /* Shouldn't be more than 32, since that's the number of bits
2075 * available in EBX to tell us _which_ counters are available.
2078 if (num_architectural_pmu_gp_counters
> MAX_GP_COUNTERS
) {
2079 num_architectural_pmu_gp_counters
= MAX_GP_COUNTERS
;
2082 if (has_architectural_pmu_version
> 1) {
2083 num_architectural_pmu_fixed_counters
= edx
& 0x1f;
2085 if (num_architectural_pmu_fixed_counters
> MAX_FIXED_COUNTERS
) {
2086 num_architectural_pmu_fixed_counters
= MAX_FIXED_COUNTERS
;
2092 cpu_x86_cpuid(env
, 0x80000000, 0, &limit
, &unused
, &unused
, &unused
);
2094 for (i
= 0x80000000; i
<= limit
; i
++) {
2095 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
2096 fprintf(stderr
, "unsupported xlevel value: 0x%x\n", limit
);
2099 c
= &cpuid_data
.entries
[cpuid_i
++];
2103 /* Query for all AMD cache information leaves */
2104 for (j
= 0; ; j
++) {
2106 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
2108 cpu_x86_cpuid(env
, i
, j
, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
2113 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
2114 fprintf(stderr
, "cpuid_data is full, no space for "
2115 "cpuid(eax:0x%x,ecx:0x%x)\n", i
, j
);
2118 c
= &cpuid_data
.entries
[cpuid_i
++];
2124 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
2125 if (!c
->eax
&& !c
->ebx
&& !c
->ecx
&& !c
->edx
) {
2127 * KVM already returns all zeroes if a CPUID entry is missing,
2128 * so we can omit it and avoid hitting KVM's 80-entry limit.
2136 /* Call Centaur's CPUID instructions they are supported. */
2137 if (env
->cpuid_xlevel2
> 0) {
2138 cpu_x86_cpuid(env
, 0xC0000000, 0, &limit
, &unused
, &unused
, &unused
);
2140 for (i
= 0xC0000000; i
<= limit
; i
++) {
2141 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
2142 fprintf(stderr
, "unsupported xlevel2 value: 0x%x\n", limit
);
2145 c
= &cpuid_data
.entries
[cpuid_i
++];
2149 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
2153 cpuid_data
.cpuid
.nent
= cpuid_i
;
2155 if (((env
->cpuid_version
>> 8)&0xF) >= 6
2156 && (env
->features
[FEAT_1_EDX
] & (CPUID_MCE
| CPUID_MCA
)) ==
2157 (CPUID_MCE
| CPUID_MCA
)
2158 && kvm_check_extension(cs
->kvm_state
, KVM_CAP_MCE
) > 0) {
2159 uint64_t mcg_cap
, unsupported_caps
;
2163 ret
= kvm_get_mce_cap_supported(cs
->kvm_state
, &mcg_cap
, &banks
);
2165 fprintf(stderr
, "kvm_get_mce_cap_supported: %s", strerror(-ret
));
2169 if (banks
< (env
->mcg_cap
& MCG_CAP_BANKS_MASK
)) {
2170 error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
2171 (int)(env
->mcg_cap
& MCG_CAP_BANKS_MASK
), banks
);
2175 unsupported_caps
= env
->mcg_cap
& ~(mcg_cap
| MCG_CAP_BANKS_MASK
);
2176 if (unsupported_caps
) {
2177 if (unsupported_caps
& MCG_LMCE_P
) {
2178 error_report("kvm: LMCE not supported");
2181 warn_report("Unsupported MCG_CAP bits: 0x%" PRIx64
,
2185 env
->mcg_cap
&= mcg_cap
| MCG_CAP_BANKS_MASK
;
2186 ret
= kvm_vcpu_ioctl(cs
, KVM_X86_SETUP_MCE
, &env
->mcg_cap
);
2188 fprintf(stderr
, "KVM_X86_SETUP_MCE: %s", strerror(-ret
));
2193 cpu
->vmsentry
= qemu_add_vm_change_state_handler(cpu_update_state
, env
);
2195 c
= cpuid_find_entry(&cpuid_data
.cpuid
, 1, 0);
2197 has_msr_feature_control
= !!(c
->ecx
& CPUID_EXT_VMX
) ||
2198 !!(c
->ecx
& CPUID_EXT_SMX
);
2201 c
= cpuid_find_entry(&cpuid_data
.cpuid
, 7, 0);
2202 if (c
&& (c
->ebx
& CPUID_7_0_EBX_SGX
)) {
2203 has_msr_feature_control
= true;
2206 if (env
->mcg_cap
& MCG_LMCE_P
) {
2207 has_msr_mcg_ext_ctl
= has_msr_feature_control
= true;
2210 if (!env
->user_tsc_khz
) {
2211 if ((env
->features
[FEAT_8000_0007_EDX
] & CPUID_APM_INVTSC
) &&
2212 invtsc_mig_blocker
== NULL
) {
2213 error_setg(&invtsc_mig_blocker
,
2214 "State blocked by non-migratable CPU device"
2216 r
= migrate_add_blocker(&invtsc_mig_blocker
, &local_err
);
2218 error_report_err(local_err
);
2224 if (cpu
->vmware_cpuid_freq
2225 /* Guests depend on 0x40000000 to detect this feature, so only expose
2226 * it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */
2228 && kvm_base
== KVM_CPUID_SIGNATURE
2229 /* TSC clock must be stable and known for this feature. */
2230 && tsc_is_stable_and_known(env
)) {
2232 c
= &cpuid_data
.entries
[cpuid_i
++];
2233 c
->function
= KVM_CPUID_SIGNATURE
| 0x10;
2234 c
->eax
= env
->tsc_khz
;
2235 c
->ebx
= env
->apic_bus_freq
/ 1000; /* Hz to KHz */
2236 c
->ecx
= c
->edx
= 0;
2238 c
= cpuid_find_entry(&cpuid_data
.cpuid
, kvm_base
, 0);
2239 c
->eax
= MAX(c
->eax
, KVM_CPUID_SIGNATURE
| 0x10);
2242 cpuid_data
.cpuid
.nent
= cpuid_i
;
2244 cpuid_data
.cpuid
.padding
= 0;
2245 r
= kvm_vcpu_ioctl(cs
, KVM_SET_CPUID2
, &cpuid_data
);
2249 kvm_init_xsave(env
);
2251 max_nested_state_len
= kvm_max_nested_state_length();
2252 if (max_nested_state_len
> 0) {
2253 assert(max_nested_state_len
>= offsetof(struct kvm_nested_state
, data
));
2255 if (cpu_has_vmx(env
) || cpu_has_svm(env
)) {
2256 env
->nested_state
= g_malloc0(max_nested_state_len
);
2257 env
->nested_state
->size
= max_nested_state_len
;
2259 kvm_init_nested_state(env
);
2263 cpu
->kvm_msr_buf
= g_malloc0(MSR_BUF_SIZE
);
2265 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_RDTSCP
)) {
2266 has_msr_tsc_aux
= false;
2274 migrate_del_blocker(&invtsc_mig_blocker
);
2279 int kvm_arch_destroy_vcpu(CPUState
*cs
)
2281 X86CPU
*cpu
= X86_CPU(cs
);
2282 CPUX86State
*env
= &cpu
->env
;
2284 g_free(env
->xsave_buf
);
2286 g_free(cpu
->kvm_msr_buf
);
2287 cpu
->kvm_msr_buf
= NULL
;
2289 g_free(env
->nested_state
);
2290 env
->nested_state
= NULL
;
2292 qemu_del_vm_change_state_handler(cpu
->vmsentry
);
2297 void kvm_arch_reset_vcpu(X86CPU
*cpu
)
2299 CPUX86State
*env
= &cpu
->env
;
2302 if (kvm_irqchip_in_kernel()) {
2303 env
->mp_state
= cpu_is_bsp(cpu
) ? KVM_MP_STATE_RUNNABLE
:
2304 KVM_MP_STATE_UNINITIALIZED
;
2306 env
->mp_state
= KVM_MP_STATE_RUNNABLE
;
2309 /* enabled by default */
2310 env
->poll_control_msr
= 1;
2312 kvm_init_nested_state(env
);
2314 sev_es_set_reset_vector(CPU(cpu
));
2317 void kvm_arch_after_reset_vcpu(X86CPU
*cpu
)
2319 CPUX86State
*env
= &cpu
->env
;
2323 * Reset SynIC after all other devices have been reset to let them remove
2324 * their SINT routes first.
2326 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNIC
)) {
2327 for (i
= 0; i
< ARRAY_SIZE(env
->msr_hv_synic_sint
); i
++) {
2328 env
->msr_hv_synic_sint
[i
] = HV_SINT_MASKED
;
2331 hyperv_x86_synic_reset(cpu
);
2335 void kvm_arch_do_init_vcpu(X86CPU
*cpu
)
2337 CPUX86State
*env
= &cpu
->env
;
2339 /* APs get directly into wait-for-SIPI state. */
2340 if (env
->mp_state
== KVM_MP_STATE_UNINITIALIZED
) {
2341 env
->mp_state
= KVM_MP_STATE_INIT_RECEIVED
;
2345 static int kvm_get_supported_feature_msrs(KVMState
*s
)
2349 if (kvm_feature_msrs
!= NULL
) {
2353 if (!kvm_check_extension(s
, KVM_CAP_GET_MSR_FEATURES
)) {
2357 struct kvm_msr_list msr_list
;
2360 ret
= kvm_ioctl(s
, KVM_GET_MSR_FEATURE_INDEX_LIST
, &msr_list
);
2361 if (ret
< 0 && ret
!= -E2BIG
) {
2362 error_report("Fetch KVM feature MSR list failed: %s",
2367 assert(msr_list
.nmsrs
> 0);
2368 kvm_feature_msrs
= g_malloc0(sizeof(msr_list
) +
2369 msr_list
.nmsrs
* sizeof(msr_list
.indices
[0]));
2371 kvm_feature_msrs
->nmsrs
= msr_list
.nmsrs
;
2372 ret
= kvm_ioctl(s
, KVM_GET_MSR_FEATURE_INDEX_LIST
, kvm_feature_msrs
);
2375 error_report("Fetch KVM feature MSR list failed: %s",
2377 g_free(kvm_feature_msrs
);
2378 kvm_feature_msrs
= NULL
;
2385 static int kvm_get_supported_msrs(KVMState
*s
)
2388 struct kvm_msr_list msr_list
, *kvm_msr_list
;
2391 * Obtain MSR list from KVM. These are the MSRs that we must
2395 ret
= kvm_ioctl(s
, KVM_GET_MSR_INDEX_LIST
, &msr_list
);
2396 if (ret
< 0 && ret
!= -E2BIG
) {
2400 * Old kernel modules had a bug and could write beyond the provided
2401 * memory. Allocate at least a safe amount of 1K.
2403 kvm_msr_list
= g_malloc0(MAX(1024, sizeof(msr_list
) +
2405 sizeof(msr_list
.indices
[0])));
2407 kvm_msr_list
->nmsrs
= msr_list
.nmsrs
;
2408 ret
= kvm_ioctl(s
, KVM_GET_MSR_INDEX_LIST
, kvm_msr_list
);
2412 for (i
= 0; i
< kvm_msr_list
->nmsrs
; i
++) {
2413 switch (kvm_msr_list
->indices
[i
]) {
2415 has_msr_star
= true;
2417 case MSR_VM_HSAVE_PA
:
2418 has_msr_hsave_pa
= true;
2421 has_msr_tsc_aux
= true;
2423 case MSR_TSC_ADJUST
:
2424 has_msr_tsc_adjust
= true;
2426 case MSR_IA32_TSCDEADLINE
:
2427 has_msr_tsc_deadline
= true;
2429 case MSR_IA32_SMBASE
:
2430 has_msr_smbase
= true;
2433 has_msr_smi_count
= true;
2435 case MSR_IA32_MISC_ENABLE
:
2436 has_msr_misc_enable
= true;
2438 case MSR_IA32_BNDCFGS
:
2439 has_msr_bndcfgs
= true;
2444 case MSR_IA32_UMWAIT_CONTROL
:
2445 has_msr_umwait
= true;
2447 case HV_X64_MSR_CRASH_CTL
:
2448 has_msr_hv_crash
= true;
2450 case HV_X64_MSR_RESET
:
2451 has_msr_hv_reset
= true;
2453 case HV_X64_MSR_VP_INDEX
:
2454 has_msr_hv_vpindex
= true;
2456 case HV_X64_MSR_VP_RUNTIME
:
2457 has_msr_hv_runtime
= true;
2459 case HV_X64_MSR_SCONTROL
:
2460 has_msr_hv_synic
= true;
2462 case HV_X64_MSR_STIMER0_CONFIG
:
2463 has_msr_hv_stimer
= true;
2465 case HV_X64_MSR_TSC_FREQUENCY
:
2466 has_msr_hv_frequencies
= true;
2468 case HV_X64_MSR_REENLIGHTENMENT_CONTROL
:
2469 has_msr_hv_reenlightenment
= true;
2471 case HV_X64_MSR_SYNDBG_OPTIONS
:
2472 has_msr_hv_syndbg_options
= true;
2474 case MSR_IA32_SPEC_CTRL
:
2475 has_msr_spec_ctrl
= true;
2477 case MSR_AMD64_TSC_RATIO
:
2478 has_tsc_scale_msr
= true;
2480 case MSR_IA32_TSX_CTRL
:
2481 has_msr_tsx_ctrl
= true;
2484 has_msr_virt_ssbd
= true;
2486 case MSR_IA32_ARCH_CAPABILITIES
:
2487 has_msr_arch_capabs
= true;
2489 case MSR_IA32_CORE_CAPABILITY
:
2490 has_msr_core_capabs
= true;
2492 case MSR_IA32_PERF_CAPABILITIES
:
2493 has_msr_perf_capabs
= true;
2495 case MSR_IA32_VMX_VMFUNC
:
2496 has_msr_vmx_vmfunc
= true;
2498 case MSR_IA32_UCODE_REV
:
2499 has_msr_ucode_rev
= true;
2501 case MSR_IA32_VMX_PROCBASED_CTLS2
:
2502 has_msr_vmx_procbased_ctls2
= true;
2505 has_msr_pkrs
= true;
2511 g_free(kvm_msr_list
);
2516 static bool kvm_rdmsr_core_thread_count(X86CPU
*cpu
, uint32_t msr
,
2519 CPUState
*cs
= CPU(cpu
);
2521 *val
= cs
->nr_threads
* cs
->nr_cores
; /* thread count, bits 15..0 */
2522 *val
|= ((uint32_t)cs
->nr_cores
<< 16); /* core count, bits 31..16 */
2527 static Notifier smram_machine_done
;
2528 static KVMMemoryListener smram_listener
;
2529 static AddressSpace smram_address_space
;
2530 static MemoryRegion smram_as_root
;
2531 static MemoryRegion smram_as_mem
;
2533 static void register_smram_listener(Notifier
*n
, void *unused
)
2535 MemoryRegion
*smram
=
2536 (MemoryRegion
*) object_resolve_path("/machine/smram", NULL
);
2538 /* Outer container... */
2539 memory_region_init(&smram_as_root
, OBJECT(kvm_state
), "mem-container-smram", ~0ull);
2540 memory_region_set_enabled(&smram_as_root
, true);
2542 /* ... with two regions inside: normal system memory with low
2545 memory_region_init_alias(&smram_as_mem
, OBJECT(kvm_state
), "mem-smram",
2546 get_system_memory(), 0, ~0ull);
2547 memory_region_add_subregion_overlap(&smram_as_root
, 0, &smram_as_mem
, 0);
2548 memory_region_set_enabled(&smram_as_mem
, true);
2551 /* ... SMRAM with higher priority */
2552 memory_region_add_subregion_overlap(&smram_as_root
, 0, smram
, 10);
2553 memory_region_set_enabled(smram
, true);
2556 address_space_init(&smram_address_space
, &smram_as_root
, "KVM-SMRAM");
2557 kvm_memory_listener_register(kvm_state
, &smram_listener
,
2558 &smram_address_space
, 1, "kvm-smram");
2561 int kvm_arch_get_default_type(MachineState
*ms
)
2566 int kvm_arch_init(MachineState
*ms
, KVMState
*s
)
2568 uint64_t identity_base
= 0xfffbc000;
2569 uint64_t shadow_mem
;
2571 struct utsname utsname
;
2572 Error
*local_err
= NULL
;
2575 * Initialize SEV context, if required
2577 * If no memory encryption is requested (ms->cgs == NULL) this is
2580 * It's also a no-op if a non-SEV confidential guest support
2581 * mechanism is selected. SEV is the only mechanism available to
2582 * select on x86 at present, so this doesn't arise, but if new
2583 * mechanisms are supported in future (e.g. TDX), they'll need
2584 * their own initialization either here or elsewhere.
2586 ret
= sev_kvm_init(ms
->cgs
, &local_err
);
2588 error_report_err(local_err
);
2592 if (!kvm_check_extension(s
, KVM_CAP_IRQ_ROUTING
)) {
2593 error_report("kvm: KVM_CAP_IRQ_ROUTING not supported by KVM");
2597 has_xsave
= kvm_check_extension(s
, KVM_CAP_XSAVE
);
2598 has_xcrs
= kvm_check_extension(s
, KVM_CAP_XCRS
);
2599 has_pit_state2
= kvm_check_extension(s
, KVM_CAP_PIT_STATE2
);
2600 has_sregs2
= kvm_check_extension(s
, KVM_CAP_SREGS2
) > 0;
2602 hv_vpindex_settable
= kvm_check_extension(s
, KVM_CAP_HYPERV_VP_INDEX
);
2604 has_exception_payload
= kvm_check_extension(s
, KVM_CAP_EXCEPTION_PAYLOAD
);
2605 if (has_exception_payload
) {
2606 ret
= kvm_vm_enable_cap(s
, KVM_CAP_EXCEPTION_PAYLOAD
, 0, true);
2608 error_report("kvm: Failed to enable exception payload cap: %s",
2614 has_triple_fault_event
= kvm_check_extension(s
, KVM_CAP_X86_TRIPLE_FAULT_EVENT
);
2615 if (has_triple_fault_event
) {
2616 ret
= kvm_vm_enable_cap(s
, KVM_CAP_X86_TRIPLE_FAULT_EVENT
, 0, true);
2618 error_report("kvm: Failed to enable triple fault event cap: %s",
2624 if (s
->xen_version
) {
2625 #ifdef CONFIG_XEN_EMU
2626 if (!object_dynamic_cast(OBJECT(ms
), TYPE_PC_MACHINE
)) {
2627 error_report("kvm: Xen support only available in PC machine");
2630 /* hyperv_enabled() doesn't work yet. */
2631 uint32_t msr
= XEN_HYPERCALL_MSR
;
2632 ret
= kvm_xen_init(s
, msr
);
2637 error_report("kvm: Xen support not enabled in qemu");
2642 ret
= kvm_get_supported_msrs(s
);
2647 kvm_get_supported_feature_msrs(s
);
2650 lm_capable_kernel
= strcmp(utsname
.machine
, "x86_64") == 0;
2653 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
2654 * In order to use vm86 mode, an EPT identity map and a TSS are needed.
2655 * Since these must be part of guest physical memory, we need to allocate
2656 * them, both by setting their start addresses in the kernel and by
2657 * creating a corresponding e820 entry. We need 4 pages before the BIOS.
2659 * Older KVM versions may not support setting the identity map base. In
2660 * that case we need to stick with the default, i.e. a 256K maximum BIOS
2663 if (kvm_check_extension(s
, KVM_CAP_SET_IDENTITY_MAP_ADDR
)) {
2664 /* Allows up to 16M BIOSes. */
2665 identity_base
= 0xfeffc000;
2667 ret
= kvm_vm_ioctl(s
, KVM_SET_IDENTITY_MAP_ADDR
, &identity_base
);
2673 /* Set TSS base one page after EPT identity map. */
2674 ret
= kvm_vm_ioctl(s
, KVM_SET_TSS_ADDR
, identity_base
+ 0x1000);
2679 /* Tell fw_cfg to notify the BIOS to reserve the range. */
2680 ret
= e820_add_entry(identity_base
, 0x4000, E820_RESERVED
);
2682 fprintf(stderr
, "e820_add_entry() table is full\n");
2686 shadow_mem
= object_property_get_int(OBJECT(s
), "kvm-shadow-mem", &error_abort
);
2687 if (shadow_mem
!= -1) {
2689 ret
= kvm_vm_ioctl(s
, KVM_SET_NR_MMU_PAGES
, shadow_mem
);
2695 if (kvm_check_extension(s
, KVM_CAP_X86_SMM
) &&
2696 object_dynamic_cast(OBJECT(ms
), TYPE_X86_MACHINE
) &&
2697 x86_machine_is_smm_enabled(X86_MACHINE(ms
))) {
2698 smram_machine_done
.notify
= register_smram_listener
;
2699 qemu_add_machine_init_done_notifier(&smram_machine_done
);
2702 if (enable_cpu_pm
) {
2703 int disable_exits
= kvm_check_extension(s
, KVM_CAP_X86_DISABLE_EXITS
);
2704 /* Work around for kernel header with a typo. TODO: fix header and drop. */
2705 #if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT)
2706 #define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL
2708 if (disable_exits
) {
2709 disable_exits
&= (KVM_X86_DISABLE_EXITS_MWAIT
|
2710 KVM_X86_DISABLE_EXITS_HLT
|
2711 KVM_X86_DISABLE_EXITS_PAUSE
|
2712 KVM_X86_DISABLE_EXITS_CSTATE
);
2715 ret
= kvm_vm_enable_cap(s
, KVM_CAP_X86_DISABLE_EXITS
, 0,
2718 error_report("kvm: guest stopping CPU not supported: %s",
2723 if (object_dynamic_cast(OBJECT(ms
), TYPE_X86_MACHINE
)) {
2724 X86MachineState
*x86ms
= X86_MACHINE(ms
);
2726 if (x86ms
->bus_lock_ratelimit
> 0) {
2727 ret
= kvm_check_extension(s
, KVM_CAP_X86_BUS_LOCK_EXIT
);
2728 if (!(ret
& KVM_BUS_LOCK_DETECTION_EXIT
)) {
2729 error_report("kvm: bus lock detection unsupported");
2732 ret
= kvm_vm_enable_cap(s
, KVM_CAP_X86_BUS_LOCK_EXIT
, 0,
2733 KVM_BUS_LOCK_DETECTION_EXIT
);
2735 error_report("kvm: Failed to enable bus lock detection cap: %s",
2739 ratelimit_init(&bus_lock_ratelimit_ctrl
);
2740 ratelimit_set_speed(&bus_lock_ratelimit_ctrl
,
2741 x86ms
->bus_lock_ratelimit
, BUS_LOCK_SLICE_TIME
);
2745 if (s
->notify_vmexit
!= NOTIFY_VMEXIT_OPTION_DISABLE
&&
2746 kvm_check_extension(s
, KVM_CAP_X86_NOTIFY_VMEXIT
)) {
2747 uint64_t notify_window_flags
=
2748 ((uint64_t)s
->notify_window
<< 32) |
2749 KVM_X86_NOTIFY_VMEXIT_ENABLED
|
2750 KVM_X86_NOTIFY_VMEXIT_USER
;
2751 ret
= kvm_vm_enable_cap(s
, KVM_CAP_X86_NOTIFY_VMEXIT
, 0,
2752 notify_window_flags
);
2754 error_report("kvm: Failed to enable notify vmexit cap: %s",
2759 if (kvm_vm_check_extension(s
, KVM_CAP_X86_USER_SPACE_MSR
)) {
2762 ret
= kvm_vm_enable_cap(s
, KVM_CAP_X86_USER_SPACE_MSR
, 0,
2763 KVM_MSR_EXIT_REASON_FILTER
);
2765 error_report("Could not enable user space MSRs: %s",
2770 r
= kvm_filter_msr(s
, MSR_CORE_THREAD_COUNT
,
2771 kvm_rdmsr_core_thread_count
, NULL
);
2773 error_report("Could not install MSR_CORE_THREAD_COUNT handler: %s",
2782 static void set_v8086_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
2784 lhs
->selector
= rhs
->selector
;
2785 lhs
->base
= rhs
->base
;
2786 lhs
->limit
= rhs
->limit
;
2798 static void set_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
2800 unsigned flags
= rhs
->flags
;
2801 lhs
->selector
= rhs
->selector
;
2802 lhs
->base
= rhs
->base
;
2803 lhs
->limit
= rhs
->limit
;
2804 lhs
->type
= (flags
>> DESC_TYPE_SHIFT
) & 15;
2805 lhs
->present
= (flags
& DESC_P_MASK
) != 0;
2806 lhs
->dpl
= (flags
>> DESC_DPL_SHIFT
) & 3;
2807 lhs
->db
= (flags
>> DESC_B_SHIFT
) & 1;
2808 lhs
->s
= (flags
& DESC_S_MASK
) != 0;
2809 lhs
->l
= (flags
>> DESC_L_SHIFT
) & 1;
2810 lhs
->g
= (flags
& DESC_G_MASK
) != 0;
2811 lhs
->avl
= (flags
& DESC_AVL_MASK
) != 0;
2812 lhs
->unusable
= !lhs
->present
;
2816 static void get_seg(SegmentCache
*lhs
, const struct kvm_segment
*rhs
)
2818 lhs
->selector
= rhs
->selector
;
2819 lhs
->base
= rhs
->base
;
2820 lhs
->limit
= rhs
->limit
;
2821 lhs
->flags
= (rhs
->type
<< DESC_TYPE_SHIFT
) |
2822 ((rhs
->present
&& !rhs
->unusable
) * DESC_P_MASK
) |
2823 (rhs
->dpl
<< DESC_DPL_SHIFT
) |
2824 (rhs
->db
<< DESC_B_SHIFT
) |
2825 (rhs
->s
* DESC_S_MASK
) |
2826 (rhs
->l
<< DESC_L_SHIFT
) |
2827 (rhs
->g
* DESC_G_MASK
) |
2828 (rhs
->avl
* DESC_AVL_MASK
);
2831 static void kvm_getput_reg(__u64
*kvm_reg
, target_ulong
*qemu_reg
, int set
)
2834 *kvm_reg
= *qemu_reg
;
2836 *qemu_reg
= *kvm_reg
;
2840 static int kvm_getput_regs(X86CPU
*cpu
, int set
)
2842 CPUX86State
*env
= &cpu
->env
;
2843 struct kvm_regs regs
;
2847 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_REGS
, ®s
);
2853 kvm_getput_reg(®s
.rax
, &env
->regs
[R_EAX
], set
);
2854 kvm_getput_reg(®s
.rbx
, &env
->regs
[R_EBX
], set
);
2855 kvm_getput_reg(®s
.rcx
, &env
->regs
[R_ECX
], set
);
2856 kvm_getput_reg(®s
.rdx
, &env
->regs
[R_EDX
], set
);
2857 kvm_getput_reg(®s
.rsi
, &env
->regs
[R_ESI
], set
);
2858 kvm_getput_reg(®s
.rdi
, &env
->regs
[R_EDI
], set
);
2859 kvm_getput_reg(®s
.rsp
, &env
->regs
[R_ESP
], set
);
2860 kvm_getput_reg(®s
.rbp
, &env
->regs
[R_EBP
], set
);
2861 #ifdef TARGET_X86_64
2862 kvm_getput_reg(®s
.r8
, &env
->regs
[8], set
);
2863 kvm_getput_reg(®s
.r9
, &env
->regs
[9], set
);
2864 kvm_getput_reg(®s
.r10
, &env
->regs
[10], set
);
2865 kvm_getput_reg(®s
.r11
, &env
->regs
[11], set
);
2866 kvm_getput_reg(®s
.r12
, &env
->regs
[12], set
);
2867 kvm_getput_reg(®s
.r13
, &env
->regs
[13], set
);
2868 kvm_getput_reg(®s
.r14
, &env
->regs
[14], set
);
2869 kvm_getput_reg(®s
.r15
, &env
->regs
[15], set
);
2872 kvm_getput_reg(®s
.rflags
, &env
->eflags
, set
);
2873 kvm_getput_reg(®s
.rip
, &env
->eip
, set
);
2876 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_REGS
, ®s
);
2882 static int kvm_put_fpu(X86CPU
*cpu
)
2884 CPUX86State
*env
= &cpu
->env
;
2888 memset(&fpu
, 0, sizeof fpu
);
2889 fpu
.fsw
= env
->fpus
& ~(7 << 11);
2890 fpu
.fsw
|= (env
->fpstt
& 7) << 11;
2891 fpu
.fcw
= env
->fpuc
;
2892 fpu
.last_opcode
= env
->fpop
;
2893 fpu
.last_ip
= env
->fpip
;
2894 fpu
.last_dp
= env
->fpdp
;
2895 for (i
= 0; i
< 8; ++i
) {
2896 fpu
.ftwx
|= (!env
->fptags
[i
]) << i
;
2898 memcpy(fpu
.fpr
, env
->fpregs
, sizeof env
->fpregs
);
2899 for (i
= 0; i
< CPU_NB_REGS
; i
++) {
2900 stq_p(&fpu
.xmm
[i
][0], env
->xmm_regs
[i
].ZMM_Q(0));
2901 stq_p(&fpu
.xmm
[i
][8], env
->xmm_regs
[i
].ZMM_Q(1));
2903 fpu
.mxcsr
= env
->mxcsr
;
2905 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_FPU
, &fpu
);
2908 static int kvm_put_xsave(X86CPU
*cpu
)
2910 CPUX86State
*env
= &cpu
->env
;
2911 void *xsave
= env
->xsave_buf
;
2914 return kvm_put_fpu(cpu
);
2916 x86_cpu_xsave_all_areas(cpu
, xsave
, env
->xsave_buf_len
);
2918 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_XSAVE
, xsave
);
2921 static int kvm_put_xcrs(X86CPU
*cpu
)
2923 CPUX86State
*env
= &cpu
->env
;
2924 struct kvm_xcrs xcrs
= {};
2932 xcrs
.xcrs
[0].xcr
= 0;
2933 xcrs
.xcrs
[0].value
= env
->xcr0
;
2934 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_XCRS
, &xcrs
);
2937 static int kvm_put_sregs(X86CPU
*cpu
)
2939 CPUX86State
*env
= &cpu
->env
;
2940 struct kvm_sregs sregs
;
2943 * The interrupt_bitmap is ignored because KVM_SET_SREGS is
2944 * always followed by KVM_SET_VCPU_EVENTS.
2946 memset(sregs
.interrupt_bitmap
, 0, sizeof(sregs
.interrupt_bitmap
));
2948 if ((env
->eflags
& VM_MASK
)) {
2949 set_v8086_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
2950 set_v8086_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
2951 set_v8086_seg(&sregs
.es
, &env
->segs
[R_ES
]);
2952 set_v8086_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
2953 set_v8086_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
2954 set_v8086_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
2956 set_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
2957 set_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
2958 set_seg(&sregs
.es
, &env
->segs
[R_ES
]);
2959 set_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
2960 set_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
2961 set_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
2964 set_seg(&sregs
.tr
, &env
->tr
);
2965 set_seg(&sregs
.ldt
, &env
->ldt
);
2967 sregs
.idt
.limit
= env
->idt
.limit
;
2968 sregs
.idt
.base
= env
->idt
.base
;
2969 memset(sregs
.idt
.padding
, 0, sizeof sregs
.idt
.padding
);
2970 sregs
.gdt
.limit
= env
->gdt
.limit
;
2971 sregs
.gdt
.base
= env
->gdt
.base
;
2972 memset(sregs
.gdt
.padding
, 0, sizeof sregs
.gdt
.padding
);
2974 sregs
.cr0
= env
->cr
[0];
2975 sregs
.cr2
= env
->cr
[2];
2976 sregs
.cr3
= env
->cr
[3];
2977 sregs
.cr4
= env
->cr
[4];
2979 sregs
.cr8
= cpu_get_apic_tpr(cpu
->apic_state
);
2980 sregs
.apic_base
= cpu_get_apic_base(cpu
->apic_state
);
2982 sregs
.efer
= env
->efer
;
2984 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_SREGS
, &sregs
);
2987 static int kvm_put_sregs2(X86CPU
*cpu
)
2989 CPUX86State
*env
= &cpu
->env
;
2990 struct kvm_sregs2 sregs
;
2995 if ((env
->eflags
& VM_MASK
)) {
2996 set_v8086_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
2997 set_v8086_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
2998 set_v8086_seg(&sregs
.es
, &env
->segs
[R_ES
]);
2999 set_v8086_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
3000 set_v8086_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
3001 set_v8086_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
3003 set_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
3004 set_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
3005 set_seg(&sregs
.es
, &env
->segs
[R_ES
]);
3006 set_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
3007 set_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
3008 set_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
3011 set_seg(&sregs
.tr
, &env
->tr
);
3012 set_seg(&sregs
.ldt
, &env
->ldt
);
3014 sregs
.idt
.limit
= env
->idt
.limit
;
3015 sregs
.idt
.base
= env
->idt
.base
;
3016 memset(sregs
.idt
.padding
, 0, sizeof sregs
.idt
.padding
);
3017 sregs
.gdt
.limit
= env
->gdt
.limit
;
3018 sregs
.gdt
.base
= env
->gdt
.base
;
3019 memset(sregs
.gdt
.padding
, 0, sizeof sregs
.gdt
.padding
);
3021 sregs
.cr0
= env
->cr
[0];
3022 sregs
.cr2
= env
->cr
[2];
3023 sregs
.cr3
= env
->cr
[3];
3024 sregs
.cr4
= env
->cr
[4];
3026 sregs
.cr8
= cpu_get_apic_tpr(cpu
->apic_state
);
3027 sregs
.apic_base
= cpu_get_apic_base(cpu
->apic_state
);
3029 sregs
.efer
= env
->efer
;
3031 if (env
->pdptrs_valid
) {
3032 for (i
= 0; i
< 4; i
++) {
3033 sregs
.pdptrs
[i
] = env
->pdptrs
[i
];
3035 sregs
.flags
|= KVM_SREGS2_FLAGS_PDPTRS_VALID
;
3038 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_SREGS2
, &sregs
);
3042 static void kvm_msr_buf_reset(X86CPU
*cpu
)
3044 memset(cpu
->kvm_msr_buf
, 0, MSR_BUF_SIZE
);
3047 static void kvm_msr_entry_add(X86CPU
*cpu
, uint32_t index
, uint64_t value
)
3049 struct kvm_msrs
*msrs
= cpu
->kvm_msr_buf
;
3050 void *limit
= ((void *)msrs
) + MSR_BUF_SIZE
;
3051 struct kvm_msr_entry
*entry
= &msrs
->entries
[msrs
->nmsrs
];
3053 assert((void *)(entry
+ 1) <= limit
);
3055 entry
->index
= index
;
3056 entry
->reserved
= 0;
3057 entry
->data
= value
;
3061 static int kvm_put_one_msr(X86CPU
*cpu
, int index
, uint64_t value
)
3063 kvm_msr_buf_reset(cpu
);
3064 kvm_msr_entry_add(cpu
, index
, value
);
3066 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MSRS
, cpu
->kvm_msr_buf
);
3069 static int kvm_get_one_msr(X86CPU
*cpu
, int index
, uint64_t *value
)
3073 struct kvm_msrs info
;
3074 struct kvm_msr_entry entries
[1];
3077 .entries
[0].index
= index
,
3080 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_MSRS
, &msr_data
);
3085 *value
= msr_data
.entries
[0].data
;
3088 void kvm_put_apicbase(X86CPU
*cpu
, uint64_t value
)
3092 ret
= kvm_put_one_msr(cpu
, MSR_IA32_APICBASE
, value
);
3096 static int kvm_put_tscdeadline_msr(X86CPU
*cpu
)
3098 CPUX86State
*env
= &cpu
->env
;
3101 if (!has_msr_tsc_deadline
) {
3105 ret
= kvm_put_one_msr(cpu
, MSR_IA32_TSCDEADLINE
, env
->tsc_deadline
);
3115 * Provide a separate write service for the feature control MSR in order to
3116 * kick the VCPU out of VMXON or even guest mode on reset. This has to be done
3117 * before writing any other state because forcibly leaving nested mode
3118 * invalidates the VCPU state.
3120 static int kvm_put_msr_feature_control(X86CPU
*cpu
)
3124 if (!has_msr_feature_control
) {
3128 ret
= kvm_put_one_msr(cpu
, MSR_IA32_FEATURE_CONTROL
,
3129 cpu
->env
.msr_ia32_feature_control
);
3138 static uint64_t make_vmx_msr_value(uint32_t index
, uint32_t features
)
3140 uint32_t default1
, can_be_one
, can_be_zero
;
3141 uint32_t must_be_one
;
3144 case MSR_IA32_VMX_TRUE_PINBASED_CTLS
:
3145 default1
= 0x00000016;
3147 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS
:
3148 default1
= 0x0401e172;
3150 case MSR_IA32_VMX_TRUE_ENTRY_CTLS
:
3151 default1
= 0x000011ff;
3153 case MSR_IA32_VMX_TRUE_EXIT_CTLS
:
3154 default1
= 0x00036dff;
3156 case MSR_IA32_VMX_PROCBASED_CTLS2
:
3163 /* If a feature bit is set, the control can be either set or clear.
3164 * Otherwise the value is limited to either 0 or 1 by default1.
3166 can_be_one
= features
| default1
;
3167 can_be_zero
= features
| ~default1
;
3168 must_be_one
= ~can_be_zero
;
3171 * Bit 0:31 -> 0 if the control bit can be zero (i.e. 1 if it must be one).
3172 * Bit 32:63 -> 1 if the control bit can be one.
3174 return must_be_one
| (((uint64_t)can_be_one
) << 32);
3177 static void kvm_msr_entry_add_vmx(X86CPU
*cpu
, FeatureWordArray f
)
3179 uint64_t kvm_vmx_basic
=
3180 kvm_arch_get_supported_msr_feature(kvm_state
,
3181 MSR_IA32_VMX_BASIC
);
3183 if (!kvm_vmx_basic
) {
3184 /* If the kernel doesn't support VMX feature (kvm_intel.nested=0),
3185 * then kvm_vmx_basic will be 0 and KVM_SET_MSR will fail.
3190 uint64_t kvm_vmx_misc
=
3191 kvm_arch_get_supported_msr_feature(kvm_state
,
3193 uint64_t kvm_vmx_ept_vpid
=
3194 kvm_arch_get_supported_msr_feature(kvm_state
,
3195 MSR_IA32_VMX_EPT_VPID_CAP
);
3198 * If the guest is 64-bit, a value of 1 is allowed for the host address
3199 * space size vmexit control.
3201 uint64_t fixed_vmx_exit
= f
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
3202 ? (uint64_t)VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE
<< 32 : 0;
3205 * Bits 0-30, 32-44 and 50-53 come from the host. KVM should
3206 * not change them for backwards compatibility.
3208 uint64_t fixed_vmx_basic
= kvm_vmx_basic
&
3209 (MSR_VMX_BASIC_VMCS_REVISION_MASK
|
3210 MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK
|
3211 MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK
);
3214 * Same for bits 0-4 and 25-27. Bits 16-24 (CR3 target count) can
3215 * change in the future but are always zero for now, clear them to be
3216 * future proof. Bits 32-63 in theory could change, though KVM does
3217 * not support dual-monitor treatment and probably never will; mask
3220 uint64_t fixed_vmx_misc
= kvm_vmx_misc
&
3221 (MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK
|
3222 MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK
);
3225 * EPT memory types should not change either, so we do not bother
3226 * adding features for them.
3228 uint64_t fixed_vmx_ept_mask
=
3229 (f
[FEAT_VMX_SECONDARY_CTLS
] & VMX_SECONDARY_EXEC_ENABLE_EPT
?
3230 MSR_VMX_EPT_UC
| MSR_VMX_EPT_WB
: 0);
3231 uint64_t fixed_vmx_ept_vpid
= kvm_vmx_ept_vpid
& fixed_vmx_ept_mask
;
3233 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_TRUE_PROCBASED_CTLS
,
3234 make_vmx_msr_value(MSR_IA32_VMX_TRUE_PROCBASED_CTLS
,
3235 f
[FEAT_VMX_PROCBASED_CTLS
]));
3236 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_TRUE_PINBASED_CTLS
,
3237 make_vmx_msr_value(MSR_IA32_VMX_TRUE_PINBASED_CTLS
,
3238 f
[FEAT_VMX_PINBASED_CTLS
]));
3239 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_TRUE_EXIT_CTLS
,
3240 make_vmx_msr_value(MSR_IA32_VMX_TRUE_EXIT_CTLS
,
3241 f
[FEAT_VMX_EXIT_CTLS
]) | fixed_vmx_exit
);
3242 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_TRUE_ENTRY_CTLS
,
3243 make_vmx_msr_value(MSR_IA32_VMX_TRUE_ENTRY_CTLS
,
3244 f
[FEAT_VMX_ENTRY_CTLS
]));
3245 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_PROCBASED_CTLS2
,
3246 make_vmx_msr_value(MSR_IA32_VMX_PROCBASED_CTLS2
,
3247 f
[FEAT_VMX_SECONDARY_CTLS
]));
3248 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_EPT_VPID_CAP
,
3249 f
[FEAT_VMX_EPT_VPID_CAPS
] | fixed_vmx_ept_vpid
);
3250 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_BASIC
,
3251 f
[FEAT_VMX_BASIC
] | fixed_vmx_basic
);
3252 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_MISC
,
3253 f
[FEAT_VMX_MISC
] | fixed_vmx_misc
);
3254 if (has_msr_vmx_vmfunc
) {
3255 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_VMFUNC
, f
[FEAT_VMX_VMFUNC
]);
3259 * Just to be safe, write these with constant values. The CRn_FIXED1
3260 * MSRs are generated by KVM based on the vCPU's CPUID.
3262 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_CR0_FIXED0
,
3263 CR0_PE_MASK
| CR0_PG_MASK
| CR0_NE_MASK
);
3264 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_CR4_FIXED0
,
3267 if (f
[FEAT_VMX_SECONDARY_CTLS
] & VMX_SECONDARY_EXEC_TSC_SCALING
) {
3268 /* TSC multiplier (0x2032). */
3269 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_VMCS_ENUM
, 0x32);
3271 /* Preemption timer (0x482E). */
3272 kvm_msr_entry_add(cpu
, MSR_IA32_VMX_VMCS_ENUM
, 0x2E);
3276 static void kvm_msr_entry_add_perf(X86CPU
*cpu
, FeatureWordArray f
)
3278 uint64_t kvm_perf_cap
=
3279 kvm_arch_get_supported_msr_feature(kvm_state
,
3280 MSR_IA32_PERF_CAPABILITIES
);
3283 kvm_msr_entry_add(cpu
, MSR_IA32_PERF_CAPABILITIES
,
3284 kvm_perf_cap
& f
[FEAT_PERF_CAPABILITIES
]);
3288 static int kvm_buf_set_msrs(X86CPU
*cpu
)
3290 int ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MSRS
, cpu
->kvm_msr_buf
);
3295 if (ret
< cpu
->kvm_msr_buf
->nmsrs
) {
3296 struct kvm_msr_entry
*e
= &cpu
->kvm_msr_buf
->entries
[ret
];
3297 error_report("error: failed to set MSR 0x%" PRIx32
" to 0x%" PRIx64
,
3298 (uint32_t)e
->index
, (uint64_t)e
->data
);
3301 assert(ret
== cpu
->kvm_msr_buf
->nmsrs
);
3305 static void kvm_init_msrs(X86CPU
*cpu
)
3307 CPUX86State
*env
= &cpu
->env
;
3309 kvm_msr_buf_reset(cpu
);
3310 if (has_msr_arch_capabs
) {
3311 kvm_msr_entry_add(cpu
, MSR_IA32_ARCH_CAPABILITIES
,
3312 env
->features
[FEAT_ARCH_CAPABILITIES
]);
3315 if (has_msr_core_capabs
) {
3316 kvm_msr_entry_add(cpu
, MSR_IA32_CORE_CAPABILITY
,
3317 env
->features
[FEAT_CORE_CAPABILITY
]);
3320 if (has_msr_perf_capabs
&& cpu
->enable_pmu
) {
3321 kvm_msr_entry_add_perf(cpu
, env
->features
);
3324 if (has_msr_ucode_rev
) {
3325 kvm_msr_entry_add(cpu
, MSR_IA32_UCODE_REV
, cpu
->ucode_rev
);
3329 * Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but
3330 * all kernels with MSR features should have them.
3332 if (kvm_feature_msrs
&& cpu_has_vmx(env
)) {
3333 kvm_msr_entry_add_vmx(cpu
, env
->features
);
3336 assert(kvm_buf_set_msrs(cpu
) == 0);
3339 static int kvm_put_msrs(X86CPU
*cpu
, int level
)
3341 CPUX86State
*env
= &cpu
->env
;
3344 kvm_msr_buf_reset(cpu
);
3346 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_CS
, env
->sysenter_cs
);
3347 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_ESP
, env
->sysenter_esp
);
3348 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_EIP
, env
->sysenter_eip
);
3349 kvm_msr_entry_add(cpu
, MSR_PAT
, env
->pat
);
3351 kvm_msr_entry_add(cpu
, MSR_STAR
, env
->star
);
3353 if (has_msr_hsave_pa
) {
3354 kvm_msr_entry_add(cpu
, MSR_VM_HSAVE_PA
, env
->vm_hsave
);
3356 if (has_msr_tsc_aux
) {
3357 kvm_msr_entry_add(cpu
, MSR_TSC_AUX
, env
->tsc_aux
);
3359 if (has_msr_tsc_adjust
) {
3360 kvm_msr_entry_add(cpu
, MSR_TSC_ADJUST
, env
->tsc_adjust
);
3362 if (has_msr_misc_enable
) {
3363 kvm_msr_entry_add(cpu
, MSR_IA32_MISC_ENABLE
,
3364 env
->msr_ia32_misc_enable
);
3366 if (has_msr_smbase
) {
3367 kvm_msr_entry_add(cpu
, MSR_IA32_SMBASE
, env
->smbase
);
3369 if (has_msr_smi_count
) {
3370 kvm_msr_entry_add(cpu
, MSR_SMI_COUNT
, env
->msr_smi_count
);
3373 kvm_msr_entry_add(cpu
, MSR_IA32_PKRS
, env
->pkrs
);
3375 if (has_msr_bndcfgs
) {
3376 kvm_msr_entry_add(cpu
, MSR_IA32_BNDCFGS
, env
->msr_bndcfgs
);
3379 kvm_msr_entry_add(cpu
, MSR_IA32_XSS
, env
->xss
);
3381 if (has_msr_umwait
) {
3382 kvm_msr_entry_add(cpu
, MSR_IA32_UMWAIT_CONTROL
, env
->umwait
);
3384 if (has_msr_spec_ctrl
) {
3385 kvm_msr_entry_add(cpu
, MSR_IA32_SPEC_CTRL
, env
->spec_ctrl
);
3387 if (has_tsc_scale_msr
) {
3388 kvm_msr_entry_add(cpu
, MSR_AMD64_TSC_RATIO
, env
->amd_tsc_scale_msr
);
3391 if (has_msr_tsx_ctrl
) {
3392 kvm_msr_entry_add(cpu
, MSR_IA32_TSX_CTRL
, env
->tsx_ctrl
);
3394 if (has_msr_virt_ssbd
) {
3395 kvm_msr_entry_add(cpu
, MSR_VIRT_SSBD
, env
->virt_ssbd
);
3398 #ifdef TARGET_X86_64
3399 if (lm_capable_kernel
) {
3400 kvm_msr_entry_add(cpu
, MSR_CSTAR
, env
->cstar
);
3401 kvm_msr_entry_add(cpu
, MSR_KERNELGSBASE
, env
->kernelgsbase
);
3402 kvm_msr_entry_add(cpu
, MSR_FMASK
, env
->fmask
);
3403 kvm_msr_entry_add(cpu
, MSR_LSTAR
, env
->lstar
);
3408 * The following MSRs have side effects on the guest or are too heavy
3409 * for normal writeback. Limit them to reset or full state updates.
3411 if (level
>= KVM_PUT_RESET_STATE
) {
3412 kvm_msr_entry_add(cpu
, MSR_IA32_TSC
, env
->tsc
);
3413 kvm_msr_entry_add(cpu
, MSR_KVM_SYSTEM_TIME
, env
->system_time_msr
);
3414 kvm_msr_entry_add(cpu
, MSR_KVM_WALL_CLOCK
, env
->wall_clock_msr
);
3415 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_ASYNC_PF_INT
)) {
3416 kvm_msr_entry_add(cpu
, MSR_KVM_ASYNC_PF_INT
, env
->async_pf_int_msr
);
3418 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_ASYNC_PF
)) {
3419 kvm_msr_entry_add(cpu
, MSR_KVM_ASYNC_PF_EN
, env
->async_pf_en_msr
);
3421 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_PV_EOI
)) {
3422 kvm_msr_entry_add(cpu
, MSR_KVM_PV_EOI_EN
, env
->pv_eoi_en_msr
);
3424 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_STEAL_TIME
)) {
3425 kvm_msr_entry_add(cpu
, MSR_KVM_STEAL_TIME
, env
->steal_time_msr
);
3428 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_POLL_CONTROL
)) {
3429 kvm_msr_entry_add(cpu
, MSR_KVM_POLL_CONTROL
, env
->poll_control_msr
);
3432 if (has_architectural_pmu_version
> 0) {
3433 if (has_architectural_pmu_version
> 1) {
3434 /* Stop the counter. */
3435 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR_CTRL
, 0);
3436 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_CTRL
, 0);
3439 /* Set the counter values. */
3440 for (i
= 0; i
< num_architectural_pmu_fixed_counters
; i
++) {
3441 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR0
+ i
,
3442 env
->msr_fixed_counters
[i
]);
3444 for (i
= 0; i
< num_architectural_pmu_gp_counters
; i
++) {
3445 kvm_msr_entry_add(cpu
, MSR_P6_PERFCTR0
+ i
,
3446 env
->msr_gp_counters
[i
]);
3447 kvm_msr_entry_add(cpu
, MSR_P6_EVNTSEL0
+ i
,
3448 env
->msr_gp_evtsel
[i
]);
3450 if (has_architectural_pmu_version
> 1) {
3451 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_STATUS
,
3452 env
->msr_global_status
);
3453 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_OVF_CTRL
,
3454 env
->msr_global_ovf_ctrl
);
3456 /* Now start the PMU. */
3457 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR_CTRL
,
3458 env
->msr_fixed_ctr_ctrl
);
3459 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_CTRL
,
3460 env
->msr_global_ctrl
);
3464 * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add,
3465 * only sync them to KVM on the first cpu
3467 if (current_cpu
== first_cpu
) {
3468 if (has_msr_hv_hypercall
) {
3469 kvm_msr_entry_add(cpu
, HV_X64_MSR_GUEST_OS_ID
,
3470 env
->msr_hv_guest_os_id
);
3471 kvm_msr_entry_add(cpu
, HV_X64_MSR_HYPERCALL
,
3472 env
->msr_hv_hypercall
);
3474 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_TIME
)) {
3475 kvm_msr_entry_add(cpu
, HV_X64_MSR_REFERENCE_TSC
,
3478 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_REENLIGHTENMENT
)) {
3479 kvm_msr_entry_add(cpu
, HV_X64_MSR_REENLIGHTENMENT_CONTROL
,
3480 env
->msr_hv_reenlightenment_control
);
3481 kvm_msr_entry_add(cpu
, HV_X64_MSR_TSC_EMULATION_CONTROL
,
3482 env
->msr_hv_tsc_emulation_control
);
3483 kvm_msr_entry_add(cpu
, HV_X64_MSR_TSC_EMULATION_STATUS
,
3484 env
->msr_hv_tsc_emulation_status
);
3486 #ifdef CONFIG_SYNDBG
3487 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNDBG
) &&
3488 has_msr_hv_syndbg_options
) {
3489 kvm_msr_entry_add(cpu
, HV_X64_MSR_SYNDBG_OPTIONS
,
3490 hyperv_syndbg_query_options());
3494 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_VAPIC
)) {
3495 kvm_msr_entry_add(cpu
, HV_X64_MSR_APIC_ASSIST_PAGE
,
3498 if (has_msr_hv_crash
) {
3501 for (j
= 0; j
< HV_CRASH_PARAMS
; j
++)
3502 kvm_msr_entry_add(cpu
, HV_X64_MSR_CRASH_P0
+ j
,
3503 env
->msr_hv_crash_params
[j
]);
3505 kvm_msr_entry_add(cpu
, HV_X64_MSR_CRASH_CTL
, HV_CRASH_CTL_NOTIFY
);
3507 if (has_msr_hv_runtime
) {
3508 kvm_msr_entry_add(cpu
, HV_X64_MSR_VP_RUNTIME
, env
->msr_hv_runtime
);
3510 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_VPINDEX
)
3511 && hv_vpindex_settable
) {
3512 kvm_msr_entry_add(cpu
, HV_X64_MSR_VP_INDEX
,
3513 hyperv_vp_index(CPU(cpu
)));
3515 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNIC
)) {
3518 kvm_msr_entry_add(cpu
, HV_X64_MSR_SVERSION
, HV_SYNIC_VERSION
);
3520 kvm_msr_entry_add(cpu
, HV_X64_MSR_SCONTROL
,
3521 env
->msr_hv_synic_control
);
3522 kvm_msr_entry_add(cpu
, HV_X64_MSR_SIEFP
,
3523 env
->msr_hv_synic_evt_page
);
3524 kvm_msr_entry_add(cpu
, HV_X64_MSR_SIMP
,
3525 env
->msr_hv_synic_msg_page
);
3527 for (j
= 0; j
< ARRAY_SIZE(env
->msr_hv_synic_sint
); j
++) {
3528 kvm_msr_entry_add(cpu
, HV_X64_MSR_SINT0
+ j
,
3529 env
->msr_hv_synic_sint
[j
]);
3532 if (has_msr_hv_stimer
) {
3535 for (j
= 0; j
< ARRAY_SIZE(env
->msr_hv_stimer_config
); j
++) {
3536 kvm_msr_entry_add(cpu
, HV_X64_MSR_STIMER0_CONFIG
+ j
* 2,
3537 env
->msr_hv_stimer_config
[j
]);
3540 for (j
= 0; j
< ARRAY_SIZE(env
->msr_hv_stimer_count
); j
++) {
3541 kvm_msr_entry_add(cpu
, HV_X64_MSR_STIMER0_COUNT
+ j
* 2,
3542 env
->msr_hv_stimer_count
[j
]);
3545 if (env
->features
[FEAT_1_EDX
] & CPUID_MTRR
) {
3546 uint64_t phys_mask
= MAKE_64BIT_MASK(0, cpu
->phys_bits
);
3548 kvm_msr_entry_add(cpu
, MSR_MTRRdefType
, env
->mtrr_deftype
);
3549 kvm_msr_entry_add(cpu
, MSR_MTRRfix64K_00000
, env
->mtrr_fixed
[0]);
3550 kvm_msr_entry_add(cpu
, MSR_MTRRfix16K_80000
, env
->mtrr_fixed
[1]);
3551 kvm_msr_entry_add(cpu
, MSR_MTRRfix16K_A0000
, env
->mtrr_fixed
[2]);
3552 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_C0000
, env
->mtrr_fixed
[3]);
3553 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_C8000
, env
->mtrr_fixed
[4]);
3554 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_D0000
, env
->mtrr_fixed
[5]);
3555 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_D8000
, env
->mtrr_fixed
[6]);
3556 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_E0000
, env
->mtrr_fixed
[7]);
3557 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_E8000
, env
->mtrr_fixed
[8]);
3558 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_F0000
, env
->mtrr_fixed
[9]);
3559 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_F8000
, env
->mtrr_fixed
[10]);
3560 for (i
= 0; i
< MSR_MTRRcap_VCNT
; i
++) {
3561 /* The CPU GPs if we write to a bit above the physical limit of
3562 * the host CPU (and KVM emulates that)
3564 uint64_t mask
= env
->mtrr_var
[i
].mask
;
3567 kvm_msr_entry_add(cpu
, MSR_MTRRphysBase(i
),
3568 env
->mtrr_var
[i
].base
);
3569 kvm_msr_entry_add(cpu
, MSR_MTRRphysMask(i
), mask
);
3572 if (env
->features
[FEAT_7_0_EBX
] & CPUID_7_0_EBX_INTEL_PT
) {
3573 int addr_num
= kvm_arch_get_supported_cpuid(kvm_state
,
3574 0x14, 1, R_EAX
) & 0x7;
3576 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_CTL
,
3577 env
->msr_rtit_ctrl
);
3578 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_STATUS
,
3579 env
->msr_rtit_status
);
3580 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_OUTPUT_BASE
,
3581 env
->msr_rtit_output_base
);
3582 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_OUTPUT_MASK
,
3583 env
->msr_rtit_output_mask
);
3584 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_CR3_MATCH
,
3585 env
->msr_rtit_cr3_match
);
3586 for (i
= 0; i
< addr_num
; i
++) {
3587 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_ADDR0_A
+ i
,
3588 env
->msr_rtit_addrs
[i
]);
3592 if (env
->features
[FEAT_7_0_ECX
] & CPUID_7_0_ECX_SGX_LC
) {
3593 kvm_msr_entry_add(cpu
, MSR_IA32_SGXLEPUBKEYHASH0
,
3594 env
->msr_ia32_sgxlepubkeyhash
[0]);
3595 kvm_msr_entry_add(cpu
, MSR_IA32_SGXLEPUBKEYHASH1
,
3596 env
->msr_ia32_sgxlepubkeyhash
[1]);
3597 kvm_msr_entry_add(cpu
, MSR_IA32_SGXLEPUBKEYHASH2
,
3598 env
->msr_ia32_sgxlepubkeyhash
[2]);
3599 kvm_msr_entry_add(cpu
, MSR_IA32_SGXLEPUBKEYHASH3
,
3600 env
->msr_ia32_sgxlepubkeyhash
[3]);
3603 if (env
->features
[FEAT_XSAVE
] & CPUID_D_1_EAX_XFD
) {
3604 kvm_msr_entry_add(cpu
, MSR_IA32_XFD
,
3606 kvm_msr_entry_add(cpu
, MSR_IA32_XFD_ERR
,
3610 if (kvm_enabled() && cpu
->enable_pmu
&&
3611 (env
->features
[FEAT_7_0_EDX
] & CPUID_7_0_EDX_ARCH_LBR
)) {
3616 * Only migrate Arch LBR states when the host Arch LBR depth
3617 * equals that of source guest's, this is to avoid mismatch
3618 * of guest/host config for the msr hence avoid unexpected
3621 ret
= kvm_get_one_msr(cpu
, MSR_ARCH_LBR_DEPTH
, &depth
);
3623 if (ret
== 1 && !!depth
&& depth
== env
->msr_lbr_depth
) {
3624 kvm_msr_entry_add(cpu
, MSR_ARCH_LBR_CTL
, env
->msr_lbr_ctl
);
3625 kvm_msr_entry_add(cpu
, MSR_ARCH_LBR_DEPTH
, env
->msr_lbr_depth
);
3627 for (i
= 0; i
< ARCH_LBR_NR_ENTRIES
; i
++) {
3628 if (!env
->lbr_records
[i
].from
) {
3631 kvm_msr_entry_add(cpu
, MSR_ARCH_LBR_FROM_0
+ i
,
3632 env
->lbr_records
[i
].from
);
3633 kvm_msr_entry_add(cpu
, MSR_ARCH_LBR_TO_0
+ i
,
3634 env
->lbr_records
[i
].to
);
3635 kvm_msr_entry_add(cpu
, MSR_ARCH_LBR_INFO_0
+ i
,
3636 env
->lbr_records
[i
].info
);
3641 /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
3642 * kvm_put_msr_feature_control. */
3646 kvm_msr_entry_add(cpu
, MSR_MCG_STATUS
, env
->mcg_status
);
3647 kvm_msr_entry_add(cpu
, MSR_MCG_CTL
, env
->mcg_ctl
);
3648 if (has_msr_mcg_ext_ctl
) {
3649 kvm_msr_entry_add(cpu
, MSR_MCG_EXT_CTL
, env
->mcg_ext_ctl
);
3651 for (i
= 0; i
< (env
->mcg_cap
& 0xff) * 4; i
++) {
3652 kvm_msr_entry_add(cpu
, MSR_MC0_CTL
+ i
, env
->mce_banks
[i
]);
3656 return kvm_buf_set_msrs(cpu
);
3660 static int kvm_get_fpu(X86CPU
*cpu
)
3662 CPUX86State
*env
= &cpu
->env
;
3666 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_FPU
, &fpu
);
3671 env
->fpstt
= (fpu
.fsw
>> 11) & 7;
3672 env
->fpus
= fpu
.fsw
;
3673 env
->fpuc
= fpu
.fcw
;
3674 env
->fpop
= fpu
.last_opcode
;
3675 env
->fpip
= fpu
.last_ip
;
3676 env
->fpdp
= fpu
.last_dp
;
3677 for (i
= 0; i
< 8; ++i
) {
3678 env
->fptags
[i
] = !((fpu
.ftwx
>> i
) & 1);
3680 memcpy(env
->fpregs
, fpu
.fpr
, sizeof env
->fpregs
);
3681 for (i
= 0; i
< CPU_NB_REGS
; i
++) {
3682 env
->xmm_regs
[i
].ZMM_Q(0) = ldq_p(&fpu
.xmm
[i
][0]);
3683 env
->xmm_regs
[i
].ZMM_Q(1) = ldq_p(&fpu
.xmm
[i
][8]);
3685 env
->mxcsr
= fpu
.mxcsr
;
3690 static int kvm_get_xsave(X86CPU
*cpu
)
3692 CPUX86State
*env
= &cpu
->env
;
3693 void *xsave
= env
->xsave_buf
;
3697 return kvm_get_fpu(cpu
);
3700 type
= has_xsave2
? KVM_GET_XSAVE2
: KVM_GET_XSAVE
;
3701 ret
= kvm_vcpu_ioctl(CPU(cpu
), type
, xsave
);
3705 x86_cpu_xrstor_all_areas(cpu
, xsave
, env
->xsave_buf_len
);
3710 static int kvm_get_xcrs(X86CPU
*cpu
)
3712 CPUX86State
*env
= &cpu
->env
;
3714 struct kvm_xcrs xcrs
;
3720 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_XCRS
, &xcrs
);
3725 for (i
= 0; i
< xcrs
.nr_xcrs
; i
++) {
3726 /* Only support xcr0 now */
3727 if (xcrs
.xcrs
[i
].xcr
== 0) {
3728 env
->xcr0
= xcrs
.xcrs
[i
].value
;
3735 static int kvm_get_sregs(X86CPU
*cpu
)
3737 CPUX86State
*env
= &cpu
->env
;
3738 struct kvm_sregs sregs
;
3741 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_SREGS
, &sregs
);
3747 * The interrupt_bitmap is ignored because KVM_GET_SREGS is
3748 * always preceded by KVM_GET_VCPU_EVENTS.
3751 get_seg(&env
->segs
[R_CS
], &sregs
.cs
);
3752 get_seg(&env
->segs
[R_DS
], &sregs
.ds
);
3753 get_seg(&env
->segs
[R_ES
], &sregs
.es
);
3754 get_seg(&env
->segs
[R_FS
], &sregs
.fs
);
3755 get_seg(&env
->segs
[R_GS
], &sregs
.gs
);
3756 get_seg(&env
->segs
[R_SS
], &sregs
.ss
);
3758 get_seg(&env
->tr
, &sregs
.tr
);
3759 get_seg(&env
->ldt
, &sregs
.ldt
);
3761 env
->idt
.limit
= sregs
.idt
.limit
;
3762 env
->idt
.base
= sregs
.idt
.base
;
3763 env
->gdt
.limit
= sregs
.gdt
.limit
;
3764 env
->gdt
.base
= sregs
.gdt
.base
;
3766 env
->cr
[0] = sregs
.cr0
;
3767 env
->cr
[2] = sregs
.cr2
;
3768 env
->cr
[3] = sregs
.cr3
;
3769 env
->cr
[4] = sregs
.cr4
;
3771 env
->efer
= sregs
.efer
;
3773 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
3774 x86_update_hflags(env
);
3779 static int kvm_get_sregs2(X86CPU
*cpu
)
3781 CPUX86State
*env
= &cpu
->env
;
3782 struct kvm_sregs2 sregs
;
3785 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_SREGS2
, &sregs
);
3790 get_seg(&env
->segs
[R_CS
], &sregs
.cs
);
3791 get_seg(&env
->segs
[R_DS
], &sregs
.ds
);
3792 get_seg(&env
->segs
[R_ES
], &sregs
.es
);
3793 get_seg(&env
->segs
[R_FS
], &sregs
.fs
);
3794 get_seg(&env
->segs
[R_GS
], &sregs
.gs
);
3795 get_seg(&env
->segs
[R_SS
], &sregs
.ss
);
3797 get_seg(&env
->tr
, &sregs
.tr
);
3798 get_seg(&env
->ldt
, &sregs
.ldt
);
3800 env
->idt
.limit
= sregs
.idt
.limit
;
3801 env
->idt
.base
= sregs
.idt
.base
;
3802 env
->gdt
.limit
= sregs
.gdt
.limit
;
3803 env
->gdt
.base
= sregs
.gdt
.base
;
3805 env
->cr
[0] = sregs
.cr0
;
3806 env
->cr
[2] = sregs
.cr2
;
3807 env
->cr
[3] = sregs
.cr3
;
3808 env
->cr
[4] = sregs
.cr4
;
3810 env
->efer
= sregs
.efer
;
3812 env
->pdptrs_valid
= sregs
.flags
& KVM_SREGS2_FLAGS_PDPTRS_VALID
;
3814 if (env
->pdptrs_valid
) {
3815 for (i
= 0; i
< 4; i
++) {
3816 env
->pdptrs
[i
] = sregs
.pdptrs
[i
];
3820 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
3821 x86_update_hflags(env
);
3826 static int kvm_get_msrs(X86CPU
*cpu
)
3828 CPUX86State
*env
= &cpu
->env
;
3829 struct kvm_msr_entry
*msrs
= cpu
->kvm_msr_buf
->entries
;
3831 uint64_t mtrr_top_bits
;
3833 kvm_msr_buf_reset(cpu
);
3835 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_CS
, 0);
3836 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_ESP
, 0);
3837 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_EIP
, 0);
3838 kvm_msr_entry_add(cpu
, MSR_PAT
, 0);
3840 kvm_msr_entry_add(cpu
, MSR_STAR
, 0);
3842 if (has_msr_hsave_pa
) {
3843 kvm_msr_entry_add(cpu
, MSR_VM_HSAVE_PA
, 0);
3845 if (has_msr_tsc_aux
) {
3846 kvm_msr_entry_add(cpu
, MSR_TSC_AUX
, 0);
3848 if (has_msr_tsc_adjust
) {
3849 kvm_msr_entry_add(cpu
, MSR_TSC_ADJUST
, 0);
3851 if (has_msr_tsc_deadline
) {
3852 kvm_msr_entry_add(cpu
, MSR_IA32_TSCDEADLINE
, 0);
3854 if (has_msr_misc_enable
) {
3855 kvm_msr_entry_add(cpu
, MSR_IA32_MISC_ENABLE
, 0);
3857 if (has_msr_smbase
) {
3858 kvm_msr_entry_add(cpu
, MSR_IA32_SMBASE
, 0);
3860 if (has_msr_smi_count
) {
3861 kvm_msr_entry_add(cpu
, MSR_SMI_COUNT
, 0);
3863 if (has_msr_feature_control
) {
3864 kvm_msr_entry_add(cpu
, MSR_IA32_FEATURE_CONTROL
, 0);
3867 kvm_msr_entry_add(cpu
, MSR_IA32_PKRS
, 0);
3869 if (has_msr_bndcfgs
) {
3870 kvm_msr_entry_add(cpu
, MSR_IA32_BNDCFGS
, 0);
3873 kvm_msr_entry_add(cpu
, MSR_IA32_XSS
, 0);
3875 if (has_msr_umwait
) {
3876 kvm_msr_entry_add(cpu
, MSR_IA32_UMWAIT_CONTROL
, 0);
3878 if (has_msr_spec_ctrl
) {
3879 kvm_msr_entry_add(cpu
, MSR_IA32_SPEC_CTRL
, 0);
3881 if (has_tsc_scale_msr
) {
3882 kvm_msr_entry_add(cpu
, MSR_AMD64_TSC_RATIO
, 0);
3885 if (has_msr_tsx_ctrl
) {
3886 kvm_msr_entry_add(cpu
, MSR_IA32_TSX_CTRL
, 0);
3888 if (has_msr_virt_ssbd
) {
3889 kvm_msr_entry_add(cpu
, MSR_VIRT_SSBD
, 0);
3891 if (!env
->tsc_valid
) {
3892 kvm_msr_entry_add(cpu
, MSR_IA32_TSC
, 0);
3893 env
->tsc_valid
= !runstate_is_running();
3896 #ifdef TARGET_X86_64
3897 if (lm_capable_kernel
) {
3898 kvm_msr_entry_add(cpu
, MSR_CSTAR
, 0);
3899 kvm_msr_entry_add(cpu
, MSR_KERNELGSBASE
, 0);
3900 kvm_msr_entry_add(cpu
, MSR_FMASK
, 0);
3901 kvm_msr_entry_add(cpu
, MSR_LSTAR
, 0);
3904 kvm_msr_entry_add(cpu
, MSR_KVM_SYSTEM_TIME
, 0);
3905 kvm_msr_entry_add(cpu
, MSR_KVM_WALL_CLOCK
, 0);
3906 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_ASYNC_PF_INT
)) {
3907 kvm_msr_entry_add(cpu
, MSR_KVM_ASYNC_PF_INT
, 0);
3909 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_ASYNC_PF
)) {
3910 kvm_msr_entry_add(cpu
, MSR_KVM_ASYNC_PF_EN
, 0);
3912 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_PV_EOI
)) {
3913 kvm_msr_entry_add(cpu
, MSR_KVM_PV_EOI_EN
, 0);
3915 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_STEAL_TIME
)) {
3916 kvm_msr_entry_add(cpu
, MSR_KVM_STEAL_TIME
, 0);
3918 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_POLL_CONTROL
)) {
3919 kvm_msr_entry_add(cpu
, MSR_KVM_POLL_CONTROL
, 1);
3921 if (has_architectural_pmu_version
> 0) {
3922 if (has_architectural_pmu_version
> 1) {
3923 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR_CTRL
, 0);
3924 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_CTRL
, 0);
3925 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_STATUS
, 0);
3926 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_OVF_CTRL
, 0);
3928 for (i
= 0; i
< num_architectural_pmu_fixed_counters
; i
++) {
3929 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR0
+ i
, 0);
3931 for (i
= 0; i
< num_architectural_pmu_gp_counters
; i
++) {
3932 kvm_msr_entry_add(cpu
, MSR_P6_PERFCTR0
+ i
, 0);
3933 kvm_msr_entry_add(cpu
, MSR_P6_EVNTSEL0
+ i
, 0);
3938 kvm_msr_entry_add(cpu
, MSR_MCG_STATUS
, 0);
3939 kvm_msr_entry_add(cpu
, MSR_MCG_CTL
, 0);
3940 if (has_msr_mcg_ext_ctl
) {
3941 kvm_msr_entry_add(cpu
, MSR_MCG_EXT_CTL
, 0);
3943 for (i
= 0; i
< (env
->mcg_cap
& 0xff) * 4; i
++) {
3944 kvm_msr_entry_add(cpu
, MSR_MC0_CTL
+ i
, 0);
3948 if (has_msr_hv_hypercall
) {
3949 kvm_msr_entry_add(cpu
, HV_X64_MSR_HYPERCALL
, 0);
3950 kvm_msr_entry_add(cpu
, HV_X64_MSR_GUEST_OS_ID
, 0);
3952 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_VAPIC
)) {
3953 kvm_msr_entry_add(cpu
, HV_X64_MSR_APIC_ASSIST_PAGE
, 0);
3955 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_TIME
)) {
3956 kvm_msr_entry_add(cpu
, HV_X64_MSR_REFERENCE_TSC
, 0);
3958 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_REENLIGHTENMENT
)) {
3959 kvm_msr_entry_add(cpu
, HV_X64_MSR_REENLIGHTENMENT_CONTROL
, 0);
3960 kvm_msr_entry_add(cpu
, HV_X64_MSR_TSC_EMULATION_CONTROL
, 0);
3961 kvm_msr_entry_add(cpu
, HV_X64_MSR_TSC_EMULATION_STATUS
, 0);
3963 if (has_msr_hv_syndbg_options
) {
3964 kvm_msr_entry_add(cpu
, HV_X64_MSR_SYNDBG_OPTIONS
, 0);
3966 if (has_msr_hv_crash
) {
3969 for (j
= 0; j
< HV_CRASH_PARAMS
; j
++) {
3970 kvm_msr_entry_add(cpu
, HV_X64_MSR_CRASH_P0
+ j
, 0);
3973 if (has_msr_hv_runtime
) {
3974 kvm_msr_entry_add(cpu
, HV_X64_MSR_VP_RUNTIME
, 0);
3976 if (hyperv_feat_enabled(cpu
, HYPERV_FEAT_SYNIC
)) {
3979 kvm_msr_entry_add(cpu
, HV_X64_MSR_SCONTROL
, 0);
3980 kvm_msr_entry_add(cpu
, HV_X64_MSR_SIEFP
, 0);
3981 kvm_msr_entry_add(cpu
, HV_X64_MSR_SIMP
, 0);
3982 for (msr
= HV_X64_MSR_SINT0
; msr
<= HV_X64_MSR_SINT15
; msr
++) {
3983 kvm_msr_entry_add(cpu
, msr
, 0);
3986 if (has_msr_hv_stimer
) {
3989 for (msr
= HV_X64_MSR_STIMER0_CONFIG
; msr
<= HV_X64_MSR_STIMER3_COUNT
;
3991 kvm_msr_entry_add(cpu
, msr
, 0);
3994 if (env
->features
[FEAT_1_EDX
] & CPUID_MTRR
) {
3995 kvm_msr_entry_add(cpu
, MSR_MTRRdefType
, 0);
3996 kvm_msr_entry_add(cpu
, MSR_MTRRfix64K_00000
, 0);
3997 kvm_msr_entry_add(cpu
, MSR_MTRRfix16K_80000
, 0);
3998 kvm_msr_entry_add(cpu
, MSR_MTRRfix16K_A0000
, 0);
3999 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_C0000
, 0);
4000 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_C8000
, 0);
4001 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_D0000
, 0);
4002 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_D8000
, 0);
4003 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_E0000
, 0);
4004 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_E8000
, 0);
4005 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_F0000
, 0);
4006 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_F8000
, 0);
4007 for (i
= 0; i
< MSR_MTRRcap_VCNT
; i
++) {
4008 kvm_msr_entry_add(cpu
, MSR_MTRRphysBase(i
), 0);
4009 kvm_msr_entry_add(cpu
, MSR_MTRRphysMask(i
), 0);
4013 if (env
->features
[FEAT_7_0_EBX
] & CPUID_7_0_EBX_INTEL_PT
) {
4015 kvm_arch_get_supported_cpuid(kvm_state
, 0x14, 1, R_EAX
) & 0x7;
4017 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_CTL
, 0);
4018 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_STATUS
, 0);
4019 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_OUTPUT_BASE
, 0);
4020 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_OUTPUT_MASK
, 0);
4021 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_CR3_MATCH
, 0);
4022 for (i
= 0; i
< addr_num
; i
++) {
4023 kvm_msr_entry_add(cpu
, MSR_IA32_RTIT_ADDR0_A
+ i
, 0);
4027 if (env
->features
[FEAT_7_0_ECX
] & CPUID_7_0_ECX_SGX_LC
) {
4028 kvm_msr_entry_add(cpu
, MSR_IA32_SGXLEPUBKEYHASH0
, 0);
4029 kvm_msr_entry_add(cpu
, MSR_IA32_SGXLEPUBKEYHASH1
, 0);
4030 kvm_msr_entry_add(cpu
, MSR_IA32_SGXLEPUBKEYHASH2
, 0);
4031 kvm_msr_entry_add(cpu
, MSR_IA32_SGXLEPUBKEYHASH3
, 0);
4034 if (env
->features
[FEAT_XSAVE
] & CPUID_D_1_EAX_XFD
) {
4035 kvm_msr_entry_add(cpu
, MSR_IA32_XFD
, 0);
4036 kvm_msr_entry_add(cpu
, MSR_IA32_XFD_ERR
, 0);
4039 if (kvm_enabled() && cpu
->enable_pmu
&&
4040 (env
->features
[FEAT_7_0_EDX
] & CPUID_7_0_EDX_ARCH_LBR
)) {
4043 ret
= kvm_get_one_msr(cpu
, MSR_ARCH_LBR_DEPTH
, &depth
);
4044 if (ret
== 1 && depth
== ARCH_LBR_NR_ENTRIES
) {
4045 kvm_msr_entry_add(cpu
, MSR_ARCH_LBR_CTL
, 0);
4046 kvm_msr_entry_add(cpu
, MSR_ARCH_LBR_DEPTH
, 0);
4048 for (i
= 0; i
< ARCH_LBR_NR_ENTRIES
; i
++) {
4049 kvm_msr_entry_add(cpu
, MSR_ARCH_LBR_FROM_0
+ i
, 0);
4050 kvm_msr_entry_add(cpu
, MSR_ARCH_LBR_TO_0
+ i
, 0);
4051 kvm_msr_entry_add(cpu
, MSR_ARCH_LBR_INFO_0
+ i
, 0);
4056 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_MSRS
, cpu
->kvm_msr_buf
);
4061 if (ret
< cpu
->kvm_msr_buf
->nmsrs
) {
4062 struct kvm_msr_entry
*e
= &cpu
->kvm_msr_buf
->entries
[ret
];
4063 error_report("error: failed to get MSR 0x%" PRIx32
,
4064 (uint32_t)e
->index
);
4067 assert(ret
== cpu
->kvm_msr_buf
->nmsrs
);
4069 * MTRR masks: Each mask consists of 5 parts
4070 * a 10..0: must be zero
4072 * c n-1.12: actual mask bits
4073 * d 51..n: reserved must be zero
4074 * e 63.52: reserved must be zero
4076 * 'n' is the number of physical bits supported by the CPU and is
4077 * apparently always <= 52. We know our 'n' but don't know what
4078 * the destinations 'n' is; it might be smaller, in which case
4079 * it masks (c) on loading. It might be larger, in which case
4080 * we fill 'd' so that d..c is consistent irrespetive of the 'n'
4081 * we're migrating to.
4084 if (cpu
->fill_mtrr_mask
) {
4085 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS
> 52);
4086 assert(cpu
->phys_bits
<= TARGET_PHYS_ADDR_SPACE_BITS
);
4087 mtrr_top_bits
= MAKE_64BIT_MASK(cpu
->phys_bits
, 52 - cpu
->phys_bits
);
4092 for (i
= 0; i
< ret
; i
++) {
4093 uint32_t index
= msrs
[i
].index
;
4095 case MSR_IA32_SYSENTER_CS
:
4096 env
->sysenter_cs
= msrs
[i
].data
;
4098 case MSR_IA32_SYSENTER_ESP
:
4099 env
->sysenter_esp
= msrs
[i
].data
;
4101 case MSR_IA32_SYSENTER_EIP
:
4102 env
->sysenter_eip
= msrs
[i
].data
;
4105 env
->pat
= msrs
[i
].data
;
4108 env
->star
= msrs
[i
].data
;
4110 #ifdef TARGET_X86_64
4112 env
->cstar
= msrs
[i
].data
;
4114 case MSR_KERNELGSBASE
:
4115 env
->kernelgsbase
= msrs
[i
].data
;
4118 env
->fmask
= msrs
[i
].data
;
4121 env
->lstar
= msrs
[i
].data
;
4125 env
->tsc
= msrs
[i
].data
;
4128 env
->tsc_aux
= msrs
[i
].data
;
4130 case MSR_TSC_ADJUST
:
4131 env
->tsc_adjust
= msrs
[i
].data
;
4133 case MSR_IA32_TSCDEADLINE
:
4134 env
->tsc_deadline
= msrs
[i
].data
;
4136 case MSR_VM_HSAVE_PA
:
4137 env
->vm_hsave
= msrs
[i
].data
;
4139 case MSR_KVM_SYSTEM_TIME
:
4140 env
->system_time_msr
= msrs
[i
].data
;
4142 case MSR_KVM_WALL_CLOCK
:
4143 env
->wall_clock_msr
= msrs
[i
].data
;
4145 case MSR_MCG_STATUS
:
4146 env
->mcg_status
= msrs
[i
].data
;
4149 env
->mcg_ctl
= msrs
[i
].data
;
4151 case MSR_MCG_EXT_CTL
:
4152 env
->mcg_ext_ctl
= msrs
[i
].data
;
4154 case MSR_IA32_MISC_ENABLE
:
4155 env
->msr_ia32_misc_enable
= msrs
[i
].data
;
4157 case MSR_IA32_SMBASE
:
4158 env
->smbase
= msrs
[i
].data
;
4161 env
->msr_smi_count
= msrs
[i
].data
;
4163 case MSR_IA32_FEATURE_CONTROL
:
4164 env
->msr_ia32_feature_control
= msrs
[i
].data
;
4166 case MSR_IA32_BNDCFGS
:
4167 env
->msr_bndcfgs
= msrs
[i
].data
;
4170 env
->xss
= msrs
[i
].data
;
4172 case MSR_IA32_UMWAIT_CONTROL
:
4173 env
->umwait
= msrs
[i
].data
;
4176 env
->pkrs
= msrs
[i
].data
;
4179 if (msrs
[i
].index
>= MSR_MC0_CTL
&&
4180 msrs
[i
].index
< MSR_MC0_CTL
+ (env
->mcg_cap
& 0xff) * 4) {
4181 env
->mce_banks
[msrs
[i
].index
- MSR_MC0_CTL
] = msrs
[i
].data
;
4184 case MSR_KVM_ASYNC_PF_EN
:
4185 env
->async_pf_en_msr
= msrs
[i
].data
;
4187 case MSR_KVM_ASYNC_PF_INT
:
4188 env
->async_pf_int_msr
= msrs
[i
].data
;
4190 case MSR_KVM_PV_EOI_EN
:
4191 env
->pv_eoi_en_msr
= msrs
[i
].data
;
4193 case MSR_KVM_STEAL_TIME
:
4194 env
->steal_time_msr
= msrs
[i
].data
;
4196 case MSR_KVM_POLL_CONTROL
: {
4197 env
->poll_control_msr
= msrs
[i
].data
;
4200 case MSR_CORE_PERF_FIXED_CTR_CTRL
:
4201 env
->msr_fixed_ctr_ctrl
= msrs
[i
].data
;
4203 case MSR_CORE_PERF_GLOBAL_CTRL
:
4204 env
->msr_global_ctrl
= msrs
[i
].data
;
4206 case MSR_CORE_PERF_GLOBAL_STATUS
:
4207 env
->msr_global_status
= msrs
[i
].data
;
4209 case MSR_CORE_PERF_GLOBAL_OVF_CTRL
:
4210 env
->msr_global_ovf_ctrl
= msrs
[i
].data
;
4212 case MSR_CORE_PERF_FIXED_CTR0
... MSR_CORE_PERF_FIXED_CTR0
+ MAX_FIXED_COUNTERS
- 1:
4213 env
->msr_fixed_counters
[index
- MSR_CORE_PERF_FIXED_CTR0
] = msrs
[i
].data
;
4215 case MSR_P6_PERFCTR0
... MSR_P6_PERFCTR0
+ MAX_GP_COUNTERS
- 1:
4216 env
->msr_gp_counters
[index
- MSR_P6_PERFCTR0
] = msrs
[i
].data
;
4218 case MSR_P6_EVNTSEL0
... MSR_P6_EVNTSEL0
+ MAX_GP_COUNTERS
- 1:
4219 env
->msr_gp_evtsel
[index
- MSR_P6_EVNTSEL0
] = msrs
[i
].data
;
4221 case HV_X64_MSR_HYPERCALL
:
4222 env
->msr_hv_hypercall
= msrs
[i
].data
;
4224 case HV_X64_MSR_GUEST_OS_ID
:
4225 env
->msr_hv_guest_os_id
= msrs
[i
].data
;
4227 case HV_X64_MSR_APIC_ASSIST_PAGE
:
4228 env
->msr_hv_vapic
= msrs
[i
].data
;
4230 case HV_X64_MSR_REFERENCE_TSC
:
4231 env
->msr_hv_tsc
= msrs
[i
].data
;
4233 case HV_X64_MSR_CRASH_P0
... HV_X64_MSR_CRASH_P4
:
4234 env
->msr_hv_crash_params
[index
- HV_X64_MSR_CRASH_P0
] = msrs
[i
].data
;
4236 case HV_X64_MSR_VP_RUNTIME
:
4237 env
->msr_hv_runtime
= msrs
[i
].data
;
4239 case HV_X64_MSR_SCONTROL
:
4240 env
->msr_hv_synic_control
= msrs
[i
].data
;
4242 case HV_X64_MSR_SIEFP
:
4243 env
->msr_hv_synic_evt_page
= msrs
[i
].data
;
4245 case HV_X64_MSR_SIMP
:
4246 env
->msr_hv_synic_msg_page
= msrs
[i
].data
;
4248 case HV_X64_MSR_SINT0
... HV_X64_MSR_SINT15
:
4249 env
->msr_hv_synic_sint
[index
- HV_X64_MSR_SINT0
] = msrs
[i
].data
;
4251 case HV_X64_MSR_STIMER0_CONFIG
:
4252 case HV_X64_MSR_STIMER1_CONFIG
:
4253 case HV_X64_MSR_STIMER2_CONFIG
:
4254 case HV_X64_MSR_STIMER3_CONFIG
:
4255 env
->msr_hv_stimer_config
[(index
- HV_X64_MSR_STIMER0_CONFIG
)/2] =
4258 case HV_X64_MSR_STIMER0_COUNT
:
4259 case HV_X64_MSR_STIMER1_COUNT
:
4260 case HV_X64_MSR_STIMER2_COUNT
:
4261 case HV_X64_MSR_STIMER3_COUNT
:
4262 env
->msr_hv_stimer_count
[(index
- HV_X64_MSR_STIMER0_COUNT
)/2] =
4265 case HV_X64_MSR_REENLIGHTENMENT_CONTROL
:
4266 env
->msr_hv_reenlightenment_control
= msrs
[i
].data
;
4268 case HV_X64_MSR_TSC_EMULATION_CONTROL
:
4269 env
->msr_hv_tsc_emulation_control
= msrs
[i
].data
;
4271 case HV_X64_MSR_TSC_EMULATION_STATUS
:
4272 env
->msr_hv_tsc_emulation_status
= msrs
[i
].data
;
4274 case HV_X64_MSR_SYNDBG_OPTIONS
:
4275 env
->msr_hv_syndbg_options
= msrs
[i
].data
;
4277 case MSR_MTRRdefType
:
4278 env
->mtrr_deftype
= msrs
[i
].data
;
4280 case MSR_MTRRfix64K_00000
:
4281 env
->mtrr_fixed
[0] = msrs
[i
].data
;
4283 case MSR_MTRRfix16K_80000
:
4284 env
->mtrr_fixed
[1] = msrs
[i
].data
;
4286 case MSR_MTRRfix16K_A0000
:
4287 env
->mtrr_fixed
[2] = msrs
[i
].data
;
4289 case MSR_MTRRfix4K_C0000
:
4290 env
->mtrr_fixed
[3] = msrs
[i
].data
;
4292 case MSR_MTRRfix4K_C8000
:
4293 env
->mtrr_fixed
[4] = msrs
[i
].data
;
4295 case MSR_MTRRfix4K_D0000
:
4296 env
->mtrr_fixed
[5] = msrs
[i
].data
;
4298 case MSR_MTRRfix4K_D8000
:
4299 env
->mtrr_fixed
[6] = msrs
[i
].data
;
4301 case MSR_MTRRfix4K_E0000
:
4302 env
->mtrr_fixed
[7] = msrs
[i
].data
;
4304 case MSR_MTRRfix4K_E8000
:
4305 env
->mtrr_fixed
[8] = msrs
[i
].data
;
4307 case MSR_MTRRfix4K_F0000
:
4308 env
->mtrr_fixed
[9] = msrs
[i
].data
;
4310 case MSR_MTRRfix4K_F8000
:
4311 env
->mtrr_fixed
[10] = msrs
[i
].data
;
4313 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT
- 1):
4315 env
->mtrr_var
[MSR_MTRRphysIndex(index
)].mask
= msrs
[i
].data
|
4318 env
->mtrr_var
[MSR_MTRRphysIndex(index
)].base
= msrs
[i
].data
;
4321 case MSR_IA32_SPEC_CTRL
:
4322 env
->spec_ctrl
= msrs
[i
].data
;
4324 case MSR_AMD64_TSC_RATIO
:
4325 env
->amd_tsc_scale_msr
= msrs
[i
].data
;
4327 case MSR_IA32_TSX_CTRL
:
4328 env
->tsx_ctrl
= msrs
[i
].data
;
4331 env
->virt_ssbd
= msrs
[i
].data
;
4333 case MSR_IA32_RTIT_CTL
:
4334 env
->msr_rtit_ctrl
= msrs
[i
].data
;
4336 case MSR_IA32_RTIT_STATUS
:
4337 env
->msr_rtit_status
= msrs
[i
].data
;
4339 case MSR_IA32_RTIT_OUTPUT_BASE
:
4340 env
->msr_rtit_output_base
= msrs
[i
].data
;
4342 case MSR_IA32_RTIT_OUTPUT_MASK
:
4343 env
->msr_rtit_output_mask
= msrs
[i
].data
;
4345 case MSR_IA32_RTIT_CR3_MATCH
:
4346 env
->msr_rtit_cr3_match
= msrs
[i
].data
;
4348 case MSR_IA32_RTIT_ADDR0_A
... MSR_IA32_RTIT_ADDR3_B
:
4349 env
->msr_rtit_addrs
[index
- MSR_IA32_RTIT_ADDR0_A
] = msrs
[i
].data
;
4351 case MSR_IA32_SGXLEPUBKEYHASH0
... MSR_IA32_SGXLEPUBKEYHASH3
:
4352 env
->msr_ia32_sgxlepubkeyhash
[index
- MSR_IA32_SGXLEPUBKEYHASH0
] =
4356 env
->msr_xfd
= msrs
[i
].data
;
4358 case MSR_IA32_XFD_ERR
:
4359 env
->msr_xfd_err
= msrs
[i
].data
;
4361 case MSR_ARCH_LBR_CTL
:
4362 env
->msr_lbr_ctl
= msrs
[i
].data
;
4364 case MSR_ARCH_LBR_DEPTH
:
4365 env
->msr_lbr_depth
= msrs
[i
].data
;
4367 case MSR_ARCH_LBR_FROM_0
... MSR_ARCH_LBR_FROM_0
+ 31:
4368 env
->lbr_records
[index
- MSR_ARCH_LBR_FROM_0
].from
= msrs
[i
].data
;
4370 case MSR_ARCH_LBR_TO_0
... MSR_ARCH_LBR_TO_0
+ 31:
4371 env
->lbr_records
[index
- MSR_ARCH_LBR_TO_0
].to
= msrs
[i
].data
;
4373 case MSR_ARCH_LBR_INFO_0
... MSR_ARCH_LBR_INFO_0
+ 31:
4374 env
->lbr_records
[index
- MSR_ARCH_LBR_INFO_0
].info
= msrs
[i
].data
;
4382 static int kvm_put_mp_state(X86CPU
*cpu
)
4384 struct kvm_mp_state mp_state
= { .mp_state
= cpu
->env
.mp_state
};
4386 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MP_STATE
, &mp_state
);
4389 static int kvm_get_mp_state(X86CPU
*cpu
)
4391 CPUState
*cs
= CPU(cpu
);
4392 CPUX86State
*env
= &cpu
->env
;
4393 struct kvm_mp_state mp_state
;
4396 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_MP_STATE
, &mp_state
);
4400 env
->mp_state
= mp_state
.mp_state
;
4401 if (kvm_irqchip_in_kernel()) {
4402 cs
->halted
= (mp_state
.mp_state
== KVM_MP_STATE_HALTED
);
4407 static int kvm_get_apic(X86CPU
*cpu
)
4409 DeviceState
*apic
= cpu
->apic_state
;
4410 struct kvm_lapic_state kapic
;
4413 if (apic
&& kvm_irqchip_in_kernel()) {
4414 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_LAPIC
, &kapic
);
4419 kvm_get_apic_state(apic
, &kapic
);
4424 static int kvm_put_vcpu_events(X86CPU
*cpu
, int level
)
4426 CPUState
*cs
= CPU(cpu
);
4427 CPUX86State
*env
= &cpu
->env
;
4428 struct kvm_vcpu_events events
= {};
4430 if (!kvm_has_vcpu_events()) {
4436 if (has_exception_payload
) {
4437 events
.flags
|= KVM_VCPUEVENT_VALID_PAYLOAD
;
4438 events
.exception
.pending
= env
->exception_pending
;
4439 events
.exception_has_payload
= env
->exception_has_payload
;
4440 events
.exception_payload
= env
->exception_payload
;
4442 events
.exception
.nr
= env
->exception_nr
;
4443 events
.exception
.injected
= env
->exception_injected
;
4444 events
.exception
.has_error_code
= env
->has_error_code
;
4445 events
.exception
.error_code
= env
->error_code
;
4447 events
.interrupt
.injected
= (env
->interrupt_injected
>= 0);
4448 events
.interrupt
.nr
= env
->interrupt_injected
;
4449 events
.interrupt
.soft
= env
->soft_interrupt
;
4451 events
.nmi
.injected
= env
->nmi_injected
;
4452 events
.nmi
.pending
= env
->nmi_pending
;
4453 events
.nmi
.masked
= !!(env
->hflags2
& HF2_NMI_MASK
);
4455 events
.sipi_vector
= env
->sipi_vector
;
4457 if (has_msr_smbase
) {
4458 events
.smi
.smm
= !!(env
->hflags
& HF_SMM_MASK
);
4459 events
.smi
.smm_inside_nmi
= !!(env
->hflags2
& HF2_SMM_INSIDE_NMI_MASK
);
4460 if (kvm_irqchip_in_kernel()) {
4461 /* As soon as these are moved to the kernel, remove them
4462 * from cs->interrupt_request.
4464 events
.smi
.pending
= cs
->interrupt_request
& CPU_INTERRUPT_SMI
;
4465 events
.smi
.latched_init
= cs
->interrupt_request
& CPU_INTERRUPT_INIT
;
4466 cs
->interrupt_request
&= ~(CPU_INTERRUPT_INIT
| CPU_INTERRUPT_SMI
);
4468 /* Keep these in cs->interrupt_request. */
4469 events
.smi
.pending
= 0;
4470 events
.smi
.latched_init
= 0;
4472 /* Stop SMI delivery on old machine types to avoid a reboot
4473 * on an inward migration of an old VM.
4475 if (!cpu
->kvm_no_smi_migration
) {
4476 events
.flags
|= KVM_VCPUEVENT_VALID_SMM
;
4480 if (level
>= KVM_PUT_RESET_STATE
) {
4481 events
.flags
|= KVM_VCPUEVENT_VALID_NMI_PENDING
;
4482 if (env
->mp_state
== KVM_MP_STATE_SIPI_RECEIVED
) {
4483 events
.flags
|= KVM_VCPUEVENT_VALID_SIPI_VECTOR
;
4487 if (has_triple_fault_event
) {
4488 events
.flags
|= KVM_VCPUEVENT_VALID_TRIPLE_FAULT
;
4489 events
.triple_fault
.pending
= env
->triple_fault_pending
;
4492 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_VCPU_EVENTS
, &events
);
4495 static int kvm_get_vcpu_events(X86CPU
*cpu
)
4497 CPUX86State
*env
= &cpu
->env
;
4498 struct kvm_vcpu_events events
;
4501 if (!kvm_has_vcpu_events()) {
4505 memset(&events
, 0, sizeof(events
));
4506 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_VCPU_EVENTS
, &events
);
4511 if (events
.flags
& KVM_VCPUEVENT_VALID_PAYLOAD
) {
4512 env
->exception_pending
= events
.exception
.pending
;
4513 env
->exception_has_payload
= events
.exception_has_payload
;
4514 env
->exception_payload
= events
.exception_payload
;
4516 env
->exception_pending
= 0;
4517 env
->exception_has_payload
= false;
4519 env
->exception_injected
= events
.exception
.injected
;
4521 (env
->exception_pending
|| env
->exception_injected
) ?
4522 events
.exception
.nr
: -1;
4523 env
->has_error_code
= events
.exception
.has_error_code
;
4524 env
->error_code
= events
.exception
.error_code
;
4526 env
->interrupt_injected
=
4527 events
.interrupt
.injected
? events
.interrupt
.nr
: -1;
4528 env
->soft_interrupt
= events
.interrupt
.soft
;
4530 env
->nmi_injected
= events
.nmi
.injected
;
4531 env
->nmi_pending
= events
.nmi
.pending
;
4532 if (events
.nmi
.masked
) {
4533 env
->hflags2
|= HF2_NMI_MASK
;
4535 env
->hflags2
&= ~HF2_NMI_MASK
;
4538 if (events
.flags
& KVM_VCPUEVENT_VALID_SMM
) {
4539 if (events
.smi
.smm
) {
4540 env
->hflags
|= HF_SMM_MASK
;
4542 env
->hflags
&= ~HF_SMM_MASK
;
4544 if (events
.smi
.pending
) {
4545 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_SMI
);
4547 cpu_reset_interrupt(CPU(cpu
), CPU_INTERRUPT_SMI
);
4549 if (events
.smi
.smm_inside_nmi
) {
4550 env
->hflags2
|= HF2_SMM_INSIDE_NMI_MASK
;
4552 env
->hflags2
&= ~HF2_SMM_INSIDE_NMI_MASK
;
4554 if (events
.smi
.latched_init
) {
4555 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_INIT
);
4557 cpu_reset_interrupt(CPU(cpu
), CPU_INTERRUPT_INIT
);
4561 if (events
.flags
& KVM_VCPUEVENT_VALID_TRIPLE_FAULT
) {
4562 env
->triple_fault_pending
= events
.triple_fault
.pending
;
4565 env
->sipi_vector
= events
.sipi_vector
;
4570 static int kvm_guest_debug_workarounds(X86CPU
*cpu
)
4572 CPUState
*cs
= CPU(cpu
);
4573 CPUX86State
*env
= &cpu
->env
;
4575 unsigned long reinject_trap
= 0;
4577 if (!kvm_has_vcpu_events()) {
4578 if (env
->exception_nr
== EXCP01_DB
) {
4579 reinject_trap
= KVM_GUESTDBG_INJECT_DB
;
4580 } else if (env
->exception_injected
== EXCP03_INT3
) {
4581 reinject_trap
= KVM_GUESTDBG_INJECT_BP
;
4583 kvm_reset_exception(env
);
4587 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
4588 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
4589 * by updating the debug state once again if single-stepping is on.
4590 * Another reason to call kvm_update_guest_debug here is a pending debug
4591 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
4592 * reinject them via SET_GUEST_DEBUG.
4594 if (reinject_trap
||
4595 (!kvm_has_robust_singlestep() && cs
->singlestep_enabled
)) {
4596 ret
= kvm_update_guest_debug(cs
, reinject_trap
);
4601 static int kvm_put_debugregs(X86CPU
*cpu
)
4603 CPUX86State
*env
= &cpu
->env
;
4604 struct kvm_debugregs dbgregs
;
4607 if (!kvm_has_debugregs()) {
4611 memset(&dbgregs
, 0, sizeof(dbgregs
));
4612 for (i
= 0; i
< 4; i
++) {
4613 dbgregs
.db
[i
] = env
->dr
[i
];
4615 dbgregs
.dr6
= env
->dr
[6];
4616 dbgregs
.dr7
= env
->dr
[7];
4619 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_DEBUGREGS
, &dbgregs
);
4622 static int kvm_get_debugregs(X86CPU
*cpu
)
4624 CPUX86State
*env
= &cpu
->env
;
4625 struct kvm_debugregs dbgregs
;
4628 if (!kvm_has_debugregs()) {
4632 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_DEBUGREGS
, &dbgregs
);
4636 for (i
= 0; i
< 4; i
++) {
4637 env
->dr
[i
] = dbgregs
.db
[i
];
4639 env
->dr
[4] = env
->dr
[6] = dbgregs
.dr6
;
4640 env
->dr
[5] = env
->dr
[7] = dbgregs
.dr7
;
4645 static int kvm_put_nested_state(X86CPU
*cpu
)
4647 CPUX86State
*env
= &cpu
->env
;
4648 int max_nested_state_len
= kvm_max_nested_state_length();
4650 if (!env
->nested_state
) {
4655 * Copy flags that are affected by reset from env->hflags and env->hflags2.
4657 if (env
->hflags
& HF_GUEST_MASK
) {
4658 env
->nested_state
->flags
|= KVM_STATE_NESTED_GUEST_MODE
;
4660 env
->nested_state
->flags
&= ~KVM_STATE_NESTED_GUEST_MODE
;
4663 /* Don't set KVM_STATE_NESTED_GIF_SET on VMX as it is illegal */
4664 if (cpu_has_svm(env
) && (env
->hflags2
& HF2_GIF_MASK
)) {
4665 env
->nested_state
->flags
|= KVM_STATE_NESTED_GIF_SET
;
4667 env
->nested_state
->flags
&= ~KVM_STATE_NESTED_GIF_SET
;
4670 assert(env
->nested_state
->size
<= max_nested_state_len
);
4671 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_NESTED_STATE
, env
->nested_state
);
4674 static int kvm_get_nested_state(X86CPU
*cpu
)
4676 CPUX86State
*env
= &cpu
->env
;
4677 int max_nested_state_len
= kvm_max_nested_state_length();
4680 if (!env
->nested_state
) {
4685 * It is possible that migration restored a smaller size into
4686 * nested_state->hdr.size than what our kernel support.
4687 * We preserve migration origin nested_state->hdr.size for
4688 * call to KVM_SET_NESTED_STATE but wish that our next call
4689 * to KVM_GET_NESTED_STATE will use max size our kernel support.
4691 env
->nested_state
->size
= max_nested_state_len
;
4693 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_NESTED_STATE
, env
->nested_state
);
4699 * Copy flags that are affected by reset to env->hflags and env->hflags2.
4701 if (env
->nested_state
->flags
& KVM_STATE_NESTED_GUEST_MODE
) {
4702 env
->hflags
|= HF_GUEST_MASK
;
4704 env
->hflags
&= ~HF_GUEST_MASK
;
4707 /* Keep HF2_GIF_MASK set on !SVM as x86_cpu_pending_interrupt() needs it */
4708 if (cpu_has_svm(env
)) {
4709 if (env
->nested_state
->flags
& KVM_STATE_NESTED_GIF_SET
) {
4710 env
->hflags2
|= HF2_GIF_MASK
;
4712 env
->hflags2
&= ~HF2_GIF_MASK
;
4719 int kvm_arch_put_registers(CPUState
*cpu
, int level
)
4721 X86CPU
*x86_cpu
= X86_CPU(cpu
);
4724 assert(cpu_is_stopped(cpu
) || qemu_cpu_is_self(cpu
));
4727 * Put MSR_IA32_FEATURE_CONTROL first, this ensures the VM gets out of VMX
4728 * root operation upon vCPU reset. kvm_put_msr_feature_control() should also
4729 * precede kvm_put_nested_state() when 'real' nested state is set.
4731 if (level
>= KVM_PUT_RESET_STATE
) {
4732 ret
= kvm_put_msr_feature_control(x86_cpu
);
4738 /* must be before kvm_put_nested_state so that EFER.SVME is set */
4739 ret
= has_sregs2
? kvm_put_sregs2(x86_cpu
) : kvm_put_sregs(x86_cpu
);
4744 if (level
>= KVM_PUT_RESET_STATE
) {
4745 ret
= kvm_put_nested_state(x86_cpu
);
4751 if (level
== KVM_PUT_FULL_STATE
) {
4752 /* We don't check for kvm_arch_set_tsc_khz() errors here,
4753 * because TSC frequency mismatch shouldn't abort migration,
4754 * unless the user explicitly asked for a more strict TSC
4755 * setting (e.g. using an explicit "tsc-freq" option).
4757 kvm_arch_set_tsc_khz(cpu
);
4760 #ifdef CONFIG_XEN_EMU
4761 if (xen_mode
== XEN_EMULATE
&& level
== KVM_PUT_FULL_STATE
) {
4762 ret
= kvm_put_xen_state(cpu
);
4769 ret
= kvm_getput_regs(x86_cpu
, 1);
4773 ret
= kvm_put_xsave(x86_cpu
);
4777 ret
= kvm_put_xcrs(x86_cpu
);
4781 /* must be before kvm_put_msrs */
4782 ret
= kvm_inject_mce_oldstyle(x86_cpu
);
4786 ret
= kvm_put_msrs(x86_cpu
, level
);
4790 ret
= kvm_put_vcpu_events(x86_cpu
, level
);
4794 if (level
>= KVM_PUT_RESET_STATE
) {
4795 ret
= kvm_put_mp_state(x86_cpu
);
4801 ret
= kvm_put_tscdeadline_msr(x86_cpu
);
4805 ret
= kvm_put_debugregs(x86_cpu
);
4810 ret
= kvm_guest_debug_workarounds(x86_cpu
);
4817 int kvm_arch_get_registers(CPUState
*cs
)
4819 X86CPU
*cpu
= X86_CPU(cs
);
4822 assert(cpu_is_stopped(cs
) || qemu_cpu_is_self(cs
));
4824 ret
= kvm_get_vcpu_events(cpu
);
4829 * KVM_GET_MPSTATE can modify CS and RIP, call it before
4830 * KVM_GET_REGS and KVM_GET_SREGS.
4832 ret
= kvm_get_mp_state(cpu
);
4836 ret
= kvm_getput_regs(cpu
, 0);
4840 ret
= kvm_get_xsave(cpu
);
4844 ret
= kvm_get_xcrs(cpu
);
4848 ret
= has_sregs2
? kvm_get_sregs2(cpu
) : kvm_get_sregs(cpu
);
4852 ret
= kvm_get_msrs(cpu
);
4856 ret
= kvm_get_apic(cpu
);
4860 ret
= kvm_get_debugregs(cpu
);
4864 ret
= kvm_get_nested_state(cpu
);
4868 #ifdef CONFIG_XEN_EMU
4869 if (xen_mode
== XEN_EMULATE
) {
4870 ret
= kvm_get_xen_state(cs
);
4878 cpu_sync_bndcs_hflags(&cpu
->env
);
4882 void kvm_arch_pre_run(CPUState
*cpu
, struct kvm_run
*run
)
4884 X86CPU
*x86_cpu
= X86_CPU(cpu
);
4885 CPUX86State
*env
= &x86_cpu
->env
;
4889 if (cpu
->interrupt_request
& (CPU_INTERRUPT_NMI
| CPU_INTERRUPT_SMI
)) {
4890 if (cpu
->interrupt_request
& CPU_INTERRUPT_NMI
) {
4891 qemu_mutex_lock_iothread();
4892 cpu
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
4893 qemu_mutex_unlock_iothread();
4894 DPRINTF("injected NMI\n");
4895 ret
= kvm_vcpu_ioctl(cpu
, KVM_NMI
);
4897 fprintf(stderr
, "KVM: injection failed, NMI lost (%s)\n",
4901 if (cpu
->interrupt_request
& CPU_INTERRUPT_SMI
) {
4902 qemu_mutex_lock_iothread();
4903 cpu
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
4904 qemu_mutex_unlock_iothread();
4905 DPRINTF("injected SMI\n");
4906 ret
= kvm_vcpu_ioctl(cpu
, KVM_SMI
);
4908 fprintf(stderr
, "KVM: injection failed, SMI lost (%s)\n",
4914 if (!kvm_pic_in_kernel()) {
4915 qemu_mutex_lock_iothread();
4918 /* Force the VCPU out of its inner loop to process any INIT requests
4919 * or (for userspace APIC, but it is cheap to combine the checks here)
4920 * pending TPR access reports.
4922 if (cpu
->interrupt_request
& (CPU_INTERRUPT_INIT
| CPU_INTERRUPT_TPR
)) {
4923 if ((cpu
->interrupt_request
& CPU_INTERRUPT_INIT
) &&
4924 !(env
->hflags
& HF_SMM_MASK
)) {
4925 cpu
->exit_request
= 1;
4927 if (cpu
->interrupt_request
& CPU_INTERRUPT_TPR
) {
4928 cpu
->exit_request
= 1;
4932 if (!kvm_pic_in_kernel()) {
4933 /* Try to inject an interrupt if the guest can accept it */
4934 if (run
->ready_for_interrupt_injection
&&
4935 (cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
4936 (env
->eflags
& IF_MASK
)) {
4939 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
4940 irq
= cpu_get_pic_interrupt(env
);
4942 struct kvm_interrupt intr
;
4945 DPRINTF("injected interrupt %d\n", irq
);
4946 ret
= kvm_vcpu_ioctl(cpu
, KVM_INTERRUPT
, &intr
);
4949 "KVM: injection failed, interrupt lost (%s)\n",
4955 /* If we have an interrupt but the guest is not ready to receive an
4956 * interrupt, request an interrupt window exit. This will
4957 * cause a return to userspace as soon as the guest is ready to
4958 * receive interrupts. */
4959 if ((cpu
->interrupt_request
& CPU_INTERRUPT_HARD
)) {
4960 run
->request_interrupt_window
= 1;
4962 run
->request_interrupt_window
= 0;
4965 DPRINTF("setting tpr\n");
4966 run
->cr8
= cpu_get_apic_tpr(x86_cpu
->apic_state
);
4968 qemu_mutex_unlock_iothread();
4972 static void kvm_rate_limit_on_bus_lock(void)
4974 uint64_t delay_ns
= ratelimit_calculate_delay(&bus_lock_ratelimit_ctrl
, 1);
4977 g_usleep(delay_ns
/ SCALE_US
);
4981 MemTxAttrs
kvm_arch_post_run(CPUState
*cpu
, struct kvm_run
*run
)
4983 X86CPU
*x86_cpu
= X86_CPU(cpu
);
4984 CPUX86State
*env
= &x86_cpu
->env
;
4986 if (run
->flags
& KVM_RUN_X86_SMM
) {
4987 env
->hflags
|= HF_SMM_MASK
;
4989 env
->hflags
&= ~HF_SMM_MASK
;
4992 env
->eflags
|= IF_MASK
;
4994 env
->eflags
&= ~IF_MASK
;
4996 if (run
->flags
& KVM_RUN_X86_BUS_LOCK
) {
4997 kvm_rate_limit_on_bus_lock();
5000 #ifdef CONFIG_XEN_EMU
5002 * If the callback is asserted as a GSI (or PCI INTx) then check if
5003 * vcpu_info->evtchn_upcall_pending has been cleared, and deassert
5004 * the callback IRQ if so. Ideally we could hook into the PIC/IOAPIC
5005 * EOI and only resample then, exactly how the VFIO eventfd pairs
5006 * are designed to work for level triggered interrupts.
5008 if (x86_cpu
->env
.xen_callback_asserted
) {
5009 kvm_xen_maybe_deassert_callback(cpu
);
5013 /* We need to protect the apic state against concurrent accesses from
5014 * different threads in case the userspace irqchip is used. */
5015 if (!kvm_irqchip_in_kernel()) {
5016 qemu_mutex_lock_iothread();
5018 cpu_set_apic_tpr(x86_cpu
->apic_state
, run
->cr8
);
5019 cpu_set_apic_base(x86_cpu
->apic_state
, run
->apic_base
);
5020 if (!kvm_irqchip_in_kernel()) {
5021 qemu_mutex_unlock_iothread();
5023 return cpu_get_mem_attrs(env
);
5026 int kvm_arch_process_async_events(CPUState
*cs
)
5028 X86CPU
*cpu
= X86_CPU(cs
);
5029 CPUX86State
*env
= &cpu
->env
;
5031 if (cs
->interrupt_request
& CPU_INTERRUPT_MCE
) {
5032 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
5033 assert(env
->mcg_cap
);
5035 cs
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
5037 kvm_cpu_synchronize_state(cs
);
5039 if (env
->exception_nr
== EXCP08_DBLE
) {
5040 /* this means triple fault */
5041 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
5042 cs
->exit_request
= 1;
5045 kvm_queue_exception(env
, EXCP12_MCHK
, 0, 0);
5046 env
->has_error_code
= 0;
5049 if (kvm_irqchip_in_kernel() && env
->mp_state
== KVM_MP_STATE_HALTED
) {
5050 env
->mp_state
= KVM_MP_STATE_RUNNABLE
;
5054 if ((cs
->interrupt_request
& CPU_INTERRUPT_INIT
) &&
5055 !(env
->hflags
& HF_SMM_MASK
)) {
5056 kvm_cpu_synchronize_state(cs
);
5060 if (kvm_irqchip_in_kernel()) {
5064 if (cs
->interrupt_request
& CPU_INTERRUPT_POLL
) {
5065 cs
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
5066 apic_poll_irq(cpu
->apic_state
);
5068 if (((cs
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
5069 (env
->eflags
& IF_MASK
)) ||
5070 (cs
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
5073 if (cs
->interrupt_request
& CPU_INTERRUPT_SIPI
) {
5074 kvm_cpu_synchronize_state(cs
);
5077 if (cs
->interrupt_request
& CPU_INTERRUPT_TPR
) {
5078 cs
->interrupt_request
&= ~CPU_INTERRUPT_TPR
;
5079 kvm_cpu_synchronize_state(cs
);
5080 apic_handle_tpr_access_report(cpu
->apic_state
, env
->eip
,
5081 env
->tpr_access_type
);
5087 static int kvm_handle_halt(X86CPU
*cpu
)
5089 CPUState
*cs
= CPU(cpu
);
5090 CPUX86State
*env
= &cpu
->env
;
5092 if (!((cs
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
5093 (env
->eflags
& IF_MASK
)) &&
5094 !(cs
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
5102 static int kvm_handle_tpr_access(X86CPU
*cpu
)
5104 CPUState
*cs
= CPU(cpu
);
5105 struct kvm_run
*run
= cs
->kvm_run
;
5107 apic_handle_tpr_access_report(cpu
->apic_state
, run
->tpr_access
.rip
,
5108 run
->tpr_access
.is_write
? TPR_ACCESS_WRITE
5113 int kvm_arch_insert_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
5115 static const uint8_t int3
= 0xcc;
5117 if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 0) ||
5118 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&int3
, 1, 1)) {
5124 int kvm_arch_remove_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
5128 if (cpu_memory_rw_debug(cs
, bp
->pc
, &int3
, 1, 0)) {
5134 if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 1)) {
5146 static int nb_hw_breakpoint
;
5148 static int find_hw_breakpoint(target_ulong addr
, int len
, int type
)
5152 for (n
= 0; n
< nb_hw_breakpoint
; n
++) {
5153 if (hw_breakpoint
[n
].addr
== addr
&& hw_breakpoint
[n
].type
== type
&&
5154 (hw_breakpoint
[n
].len
== len
|| len
== -1)) {
5161 int kvm_arch_insert_hw_breakpoint(vaddr addr
, vaddr len
, int type
)
5164 case GDB_BREAKPOINT_HW
:
5167 case GDB_WATCHPOINT_WRITE
:
5168 case GDB_WATCHPOINT_ACCESS
:
5175 if (addr
& (len
- 1)) {
5187 if (nb_hw_breakpoint
== 4) {
5190 if (find_hw_breakpoint(addr
, len
, type
) >= 0) {
5193 hw_breakpoint
[nb_hw_breakpoint
].addr
= addr
;
5194 hw_breakpoint
[nb_hw_breakpoint
].len
= len
;
5195 hw_breakpoint
[nb_hw_breakpoint
].type
= type
;
5201 int kvm_arch_remove_hw_breakpoint(vaddr addr
, vaddr len
, int type
)
5205 n
= find_hw_breakpoint(addr
, (type
== GDB_BREAKPOINT_HW
) ? 1 : len
, type
);
5210 hw_breakpoint
[n
] = hw_breakpoint
[nb_hw_breakpoint
];
5215 void kvm_arch_remove_all_hw_breakpoints(void)
5217 nb_hw_breakpoint
= 0;
5220 static CPUWatchpoint hw_watchpoint
;
5222 static int kvm_handle_debug(X86CPU
*cpu
,
5223 struct kvm_debug_exit_arch
*arch_info
)
5225 CPUState
*cs
= CPU(cpu
);
5226 CPUX86State
*env
= &cpu
->env
;
5230 if (arch_info
->exception
== EXCP01_DB
) {
5231 if (arch_info
->dr6
& DR6_BS
) {
5232 if (cs
->singlestep_enabled
) {
5236 for (n
= 0; n
< 4; n
++) {
5237 if (arch_info
->dr6
& (1 << n
)) {
5238 switch ((arch_info
->dr7
>> (16 + n
*4)) & 0x3) {
5244 cs
->watchpoint_hit
= &hw_watchpoint
;
5245 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
5246 hw_watchpoint
.flags
= BP_MEM_WRITE
;
5250 cs
->watchpoint_hit
= &hw_watchpoint
;
5251 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
5252 hw_watchpoint
.flags
= BP_MEM_ACCESS
;
5258 } else if (kvm_find_sw_breakpoint(cs
, arch_info
->pc
)) {
5262 cpu_synchronize_state(cs
);
5263 assert(env
->exception_nr
== -1);
5266 kvm_queue_exception(env
, arch_info
->exception
,
5267 arch_info
->exception
== EXCP01_DB
,
5269 env
->has_error_code
= 0;
5275 void kvm_arch_update_guest_debug(CPUState
*cpu
, struct kvm_guest_debug
*dbg
)
5277 const uint8_t type_code
[] = {
5278 [GDB_BREAKPOINT_HW
] = 0x0,
5279 [GDB_WATCHPOINT_WRITE
] = 0x1,
5280 [GDB_WATCHPOINT_ACCESS
] = 0x3
5282 const uint8_t len_code
[] = {
5283 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
5287 if (kvm_sw_breakpoints_active(cpu
)) {
5288 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_SW_BP
;
5290 if (nb_hw_breakpoint
> 0) {
5291 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_HW_BP
;
5292 dbg
->arch
.debugreg
[7] = 0x0600;
5293 for (n
= 0; n
< nb_hw_breakpoint
; n
++) {
5294 dbg
->arch
.debugreg
[n
] = hw_breakpoint
[n
].addr
;
5295 dbg
->arch
.debugreg
[7] |= (2 << (n
* 2)) |
5296 (type_code
[hw_breakpoint
[n
].type
] << (16 + n
*4)) |
5297 ((uint32_t)len_code
[hw_breakpoint
[n
].len
] << (18 + n
*4));
5302 static bool kvm_install_msr_filters(KVMState
*s
)
5305 struct kvm_msr_filter filter
= {
5306 .flags
= KVM_MSR_FILTER_DEFAULT_ALLOW
,
5310 for (i
= 0; i
< KVM_MSR_FILTER_MAX_RANGES
; i
++) {
5311 KVMMSRHandlers
*handler
= &msr_handlers
[i
];
5313 struct kvm_msr_filter_range
*range
= &filter
.ranges
[j
++];
5315 *range
= (struct kvm_msr_filter_range
) {
5318 .base
= handler
->msr
,
5319 .bitmap
= (__u8
*)&zero
,
5322 if (handler
->rdmsr
) {
5323 range
->flags
|= KVM_MSR_FILTER_READ
;
5326 if (handler
->wrmsr
) {
5327 range
->flags
|= KVM_MSR_FILTER_WRITE
;
5332 r
= kvm_vm_ioctl(s
, KVM_X86_SET_MSR_FILTER
, &filter
);
5340 bool kvm_filter_msr(KVMState
*s
, uint32_t msr
, QEMURDMSRHandler
*rdmsr
,
5341 QEMUWRMSRHandler
*wrmsr
)
5345 for (i
= 0; i
< ARRAY_SIZE(msr_handlers
); i
++) {
5346 if (!msr_handlers
[i
].msr
) {
5347 msr_handlers
[i
] = (KVMMSRHandlers
) {
5353 if (!kvm_install_msr_filters(s
)) {
5354 msr_handlers
[i
] = (KVMMSRHandlers
) { };
5365 static int kvm_handle_rdmsr(X86CPU
*cpu
, struct kvm_run
*run
)
5370 for (i
= 0; i
< ARRAY_SIZE(msr_handlers
); i
++) {
5371 KVMMSRHandlers
*handler
= &msr_handlers
[i
];
5372 if (run
->msr
.index
== handler
->msr
) {
5373 if (handler
->rdmsr
) {
5374 r
= handler
->rdmsr(cpu
, handler
->msr
,
5375 (uint64_t *)&run
->msr
.data
);
5376 run
->msr
.error
= r
? 0 : 1;
5385 static int kvm_handle_wrmsr(X86CPU
*cpu
, struct kvm_run
*run
)
5390 for (i
= 0; i
< ARRAY_SIZE(msr_handlers
); i
++) {
5391 KVMMSRHandlers
*handler
= &msr_handlers
[i
];
5392 if (run
->msr
.index
== handler
->msr
) {
5393 if (handler
->wrmsr
) {
5394 r
= handler
->wrmsr(cpu
, handler
->msr
, run
->msr
.data
);
5395 run
->msr
.error
= r
? 0 : 1;
5404 static bool has_sgx_provisioning
;
5406 static bool __kvm_enable_sgx_provisioning(KVMState
*s
)
5410 if (!kvm_vm_check_extension(s
, KVM_CAP_SGX_ATTRIBUTE
)) {
5414 fd
= qemu_open_old("/dev/sgx_provision", O_RDONLY
);
5419 ret
= kvm_vm_enable_cap(s
, KVM_CAP_SGX_ATTRIBUTE
, 0, fd
);
5421 error_report("Could not enable SGX PROVISIONKEY: %s", strerror(-ret
));
5428 bool kvm_enable_sgx_provisioning(KVMState
*s
)
5430 return MEMORIZE(__kvm_enable_sgx_provisioning(s
), has_sgx_provisioning
);
5433 static bool host_supports_vmx(void)
5435 uint32_t ecx
, unused
;
5437 host_cpuid(1, 0, &unused
, &unused
, &ecx
, &unused
);
5438 return ecx
& CPUID_EXT_VMX
;
5441 #define VMX_INVALID_GUEST_STATE 0x80000021
5443 int kvm_arch_handle_exit(CPUState
*cs
, struct kvm_run
*run
)
5445 X86CPU
*cpu
= X86_CPU(cs
);
5452 switch (run
->exit_reason
) {
5454 DPRINTF("handle_hlt\n");
5455 qemu_mutex_lock_iothread();
5456 ret
= kvm_handle_halt(cpu
);
5457 qemu_mutex_unlock_iothread();
5459 case KVM_EXIT_SET_TPR
:
5462 case KVM_EXIT_TPR_ACCESS
:
5463 qemu_mutex_lock_iothread();
5464 ret
= kvm_handle_tpr_access(cpu
);
5465 qemu_mutex_unlock_iothread();
5467 case KVM_EXIT_FAIL_ENTRY
:
5468 code
= run
->fail_entry
.hardware_entry_failure_reason
;
5469 fprintf(stderr
, "KVM: entry failed, hardware error 0x%" PRIx64
"\n",
5471 if (host_supports_vmx() && code
== VMX_INVALID_GUEST_STATE
) {
5473 "\nIf you're running a guest on an Intel machine without "
5474 "unrestricted mode\n"
5475 "support, the failure can be most likely due to the guest "
5476 "entering an invalid\n"
5477 "state for Intel VT. For example, the guest maybe running "
5478 "in big real mode\n"
5479 "which is not supported on less recent Intel processors."
5484 case KVM_EXIT_EXCEPTION
:
5485 fprintf(stderr
, "KVM: exception %d exit (error code 0x%x)\n",
5486 run
->ex
.exception
, run
->ex
.error_code
);
5489 case KVM_EXIT_DEBUG
:
5490 DPRINTF("kvm_exit_debug\n");
5491 qemu_mutex_lock_iothread();
5492 ret
= kvm_handle_debug(cpu
, &run
->debug
.arch
);
5493 qemu_mutex_unlock_iothread();
5495 case KVM_EXIT_HYPERV
:
5496 ret
= kvm_hv_handle_exit(cpu
, &run
->hyperv
);
5498 case KVM_EXIT_IOAPIC_EOI
:
5499 ioapic_eoi_broadcast(run
->eoi
.vector
);
5502 case KVM_EXIT_X86_BUS_LOCK
:
5503 /* already handled in kvm_arch_post_run */
5506 case KVM_EXIT_NOTIFY
:
5507 ctx_invalid
= !!(run
->notify
.flags
& KVM_NOTIFY_CONTEXT_INVALID
);
5508 state
= KVM_STATE(current_accel());
5509 sprintf(str
, "Encounter a notify exit with %svalid context in"
5510 " guest. There can be possible misbehaves in guest."
5511 " Please have a look.", ctx_invalid
? "in" : "");
5513 state
->notify_vmexit
== NOTIFY_VMEXIT_OPTION_INTERNAL_ERROR
) {
5514 warn_report("KVM internal error: %s", str
);
5517 warn_report_once("KVM: %s", str
);
5521 case KVM_EXIT_X86_RDMSR
:
5522 /* We only enable MSR filtering, any other exit is bogus */
5523 assert(run
->msr
.reason
== KVM_MSR_EXIT_REASON_FILTER
);
5524 ret
= kvm_handle_rdmsr(cpu
, run
);
5526 case KVM_EXIT_X86_WRMSR
:
5527 /* We only enable MSR filtering, any other exit is bogus */
5528 assert(run
->msr
.reason
== KVM_MSR_EXIT_REASON_FILTER
);
5529 ret
= kvm_handle_wrmsr(cpu
, run
);
5531 #ifdef CONFIG_XEN_EMU
5533 ret
= kvm_xen_handle_exit(cpu
, &run
->xen
);
5537 fprintf(stderr
, "KVM: unknown exit reason %d\n", run
->exit_reason
);
5545 bool kvm_arch_stop_on_emulation_error(CPUState
*cs
)
5547 X86CPU
*cpu
= X86_CPU(cs
);
5548 CPUX86State
*env
= &cpu
->env
;
5550 kvm_cpu_synchronize_state(cs
);
5551 return !(env
->cr
[0] & CR0_PE_MASK
) ||
5552 ((env
->segs
[R_CS
].selector
& 3) != 3);
5555 void kvm_arch_init_irq_routing(KVMState
*s
)
5557 /* We know at this point that we're using the in-kernel
5558 * irqchip, so we can use irqfds, and on x86 we know
5559 * we can use msi via irqfd and GSI routing.
5561 kvm_msi_via_irqfd_allowed
= true;
5562 kvm_gsi_routing_allowed
= true;
5564 if (kvm_irqchip_is_split()) {
5565 KVMRouteChange c
= kvm_irqchip_begin_route_changes(s
);
5568 /* If the ioapic is in QEMU and the lapics are in KVM, reserve
5569 MSI routes for signaling interrupts to the local apics. */
5570 for (i
= 0; i
< IOAPIC_NUM_PINS
; i
++) {
5571 if (kvm_irqchip_add_msi_route(&c
, 0, NULL
) < 0) {
5572 error_report("Could not enable split IRQ mode.");
5576 kvm_irqchip_commit_route_changes(&c
);
5580 int kvm_arch_irqchip_create(KVMState
*s
)
5583 if (kvm_kernel_irqchip_split()) {
5584 ret
= kvm_vm_enable_cap(s
, KVM_CAP_SPLIT_IRQCHIP
, 0, 24);
5586 error_report("Could not enable split irqchip mode: %s",
5590 DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
5591 kvm_split_irqchip
= true;
5599 uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address
)
5607 env
= &X86_CPU(first_cpu
)->env
;
5608 if (!(env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_MSI_EXT_DEST_ID
))) {
5613 * If the remappable format bit is set, or the upper bits are
5614 * already set in address_hi, or the low extended bits aren't
5615 * there anyway, do nothing.
5617 ext_id
= address
& (0xff << MSI_ADDR_DEST_IDX_SHIFT
);
5618 if (!ext_id
|| (ext_id
& (1 << MSI_ADDR_DEST_IDX_SHIFT
)) || (address
>> 32)) {
5623 address
|= ext_id
<< 35;
5627 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry
*route
,
5628 uint64_t address
, uint32_t data
, PCIDevice
*dev
)
5630 X86IOMMUState
*iommu
= x86_iommu_get_default();
5633 X86IOMMUClass
*class = X86_IOMMU_DEVICE_GET_CLASS(iommu
);
5635 if (class->int_remap
) {
5637 MSIMessage src
, dst
;
5639 src
.address
= route
->u
.msi
.address_hi
;
5640 src
.address
<<= VTD_MSI_ADDR_HI_SHIFT
;
5641 src
.address
|= route
->u
.msi
.address_lo
;
5642 src
.data
= route
->u
.msi
.data
;
5644 ret
= class->int_remap(iommu
, &src
, &dst
, dev
? \
5645 pci_requester_id(dev
) : \
5646 X86_IOMMU_SID_INVALID
);
5648 trace_kvm_x86_fixup_msi_error(route
->gsi
);
5653 * Handled untranslated compatibility format interrupt with
5654 * extended destination ID in the low bits 11-5. */
5655 dst
.address
= kvm_swizzle_msi_ext_dest_id(dst
.address
);
5657 route
->u
.msi
.address_hi
= dst
.address
>> VTD_MSI_ADDR_HI_SHIFT
;
5658 route
->u
.msi
.address_lo
= dst
.address
& VTD_MSI_ADDR_LO_MASK
;
5659 route
->u
.msi
.data
= dst
.data
;
5664 #ifdef CONFIG_XEN_EMU
5665 if (xen_mode
== XEN_EMULATE
) {
5666 int handled
= xen_evtchn_translate_pirq_msi(route
, address
, data
);
5669 * If it was a PIRQ and successfully routed (handled == 0) or it was
5670 * an error (handled < 0), return. If it wasn't a PIRQ, keep going.
5678 address
= kvm_swizzle_msi_ext_dest_id(address
);
5679 route
->u
.msi
.address_hi
= address
>> VTD_MSI_ADDR_HI_SHIFT
;
5680 route
->u
.msi
.address_lo
= address
& VTD_MSI_ADDR_LO_MASK
;
5684 typedef struct MSIRouteEntry MSIRouteEntry
;
5686 struct MSIRouteEntry
{
5687 PCIDevice
*dev
; /* Device pointer */
5688 int vector
; /* MSI/MSIX vector index */
5689 int virq
; /* Virtual IRQ index */
5690 QLIST_ENTRY(MSIRouteEntry
) list
;
5693 /* List of used GSI routes */
5694 static QLIST_HEAD(, MSIRouteEntry
) msi_route_list
= \
5695 QLIST_HEAD_INITIALIZER(msi_route_list
);
5697 void kvm_update_msi_routes_all(void *private, bool global
,
5698 uint32_t index
, uint32_t mask
)
5700 int cnt
= 0, vector
;
5701 MSIRouteEntry
*entry
;
5705 /* TODO: explicit route update */
5706 QLIST_FOREACH(entry
, &msi_route_list
, list
) {
5708 vector
= entry
->vector
;
5710 if (msix_enabled(dev
) && !msix_is_masked(dev
, vector
)) {
5711 msg
= msix_get_message(dev
, vector
);
5712 } else if (msi_enabled(dev
) && !msi_is_masked(dev
, vector
)) {
5713 msg
= msi_get_message(dev
, vector
);
5716 * Either MSI/MSIX is disabled for the device, or the
5717 * specific message was masked out. Skip this one.
5721 kvm_irqchip_update_msi_route(kvm_state
, entry
->virq
, msg
, dev
);
5723 kvm_irqchip_commit_routes(kvm_state
);
5724 trace_kvm_x86_update_msi_routes(cnt
);
5727 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry
*route
,
5728 int vector
, PCIDevice
*dev
)
5730 static bool notify_list_inited
= false;
5731 MSIRouteEntry
*entry
;
5734 /* These are (possibly) IOAPIC routes only used for split
5735 * kernel irqchip mode, while what we are housekeeping are
5736 * PCI devices only. */
5740 entry
= g_new0(MSIRouteEntry
, 1);
5742 entry
->vector
= vector
;
5743 entry
->virq
= route
->gsi
;
5744 QLIST_INSERT_HEAD(&msi_route_list
, entry
, list
);
5746 trace_kvm_x86_add_msi_route(route
->gsi
);
5748 if (!notify_list_inited
) {
5749 /* For the first time we do add route, add ourselves into
5750 * IOMMU's IEC notify list if needed. */
5751 X86IOMMUState
*iommu
= x86_iommu_get_default();
5753 x86_iommu_iec_register_notifier(iommu
,
5754 kvm_update_msi_routes_all
,
5757 notify_list_inited
= true;
5762 int kvm_arch_release_virq_post(int virq
)
5764 MSIRouteEntry
*entry
, *next
;
5765 QLIST_FOREACH_SAFE(entry
, &msi_route_list
, list
, next
) {
5766 if (entry
->virq
== virq
) {
5767 trace_kvm_x86_remove_msi_route(virq
);
5768 QLIST_REMOVE(entry
, list
);
5776 int kvm_arch_msi_data_to_gsi(uint32_t data
)
5781 bool kvm_has_waitpkg(void)
5783 return has_msr_umwait
;
5786 bool kvm_arch_cpu_check_are_resettable(void)
5788 return !sev_es_enabled();
5791 #define ARCH_REQ_XCOMP_GUEST_PERM 0x1025
5793 void kvm_request_xsave_components(X86CPU
*cpu
, uint64_t mask
)
5795 KVMState
*s
= kvm_state
;
5798 mask
&= XSTATE_DYNAMIC_MASK
;
5803 * Just ignore bits that are not in CPUID[EAX=0xD,ECX=0].
5804 * ARCH_REQ_XCOMP_GUEST_PERM would fail, and QEMU has warned
5805 * about them already because they are not supported features.
5807 supported
= kvm_arch_get_supported_cpuid(s
, 0xd, 0, R_EAX
);
5808 supported
|= (uint64_t)kvm_arch_get_supported_cpuid(s
, 0xd, 0, R_EDX
) << 32;
5812 int bit
= ctz64(mask
);
5813 int rc
= syscall(SYS_arch_prctl
, ARCH_REQ_XCOMP_GUEST_PERM
, bit
);
5816 * Older kernel version (<5.17) do not support
5817 * ARCH_REQ_XCOMP_GUEST_PERM, but also do not return
5818 * any dynamic feature from kvm_arch_get_supported_cpuid.
5820 warn_report("prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure "
5821 "for feature bit %d", bit
);
5823 mask
&= ~BIT_ULL(bit
);
5827 static int kvm_arch_get_notify_vmexit(Object
*obj
, Error
**errp
)
5829 KVMState
*s
= KVM_STATE(obj
);
5830 return s
->notify_vmexit
;
5833 static void kvm_arch_set_notify_vmexit(Object
*obj
, int value
, Error
**errp
)
5835 KVMState
*s
= KVM_STATE(obj
);
5838 error_setg(errp
, "Cannot set properties after the accelerator has been initialized");
5842 s
->notify_vmexit
= value
;
5845 static void kvm_arch_get_notify_window(Object
*obj
, Visitor
*v
,
5846 const char *name
, void *opaque
,
5849 KVMState
*s
= KVM_STATE(obj
);
5850 uint32_t value
= s
->notify_window
;
5852 visit_type_uint32(v
, name
, &value
, errp
);
5855 static void kvm_arch_set_notify_window(Object
*obj
, Visitor
*v
,
5856 const char *name
, void *opaque
,
5859 KVMState
*s
= KVM_STATE(obj
);
5863 error_setg(errp
, "Cannot set properties after the accelerator has been initialized");
5867 if (!visit_type_uint32(v
, name
, &value
, errp
)) {
5871 s
->notify_window
= value
;
5874 static void kvm_arch_get_xen_version(Object
*obj
, Visitor
*v
,
5875 const char *name
, void *opaque
,
5878 KVMState
*s
= KVM_STATE(obj
);
5879 uint32_t value
= s
->xen_version
;
5881 visit_type_uint32(v
, name
, &value
, errp
);
5884 static void kvm_arch_set_xen_version(Object
*obj
, Visitor
*v
,
5885 const char *name
, void *opaque
,
5888 KVMState
*s
= KVM_STATE(obj
);
5889 Error
*error
= NULL
;
5892 visit_type_uint32(v
, name
, &value
, &error
);
5894 error_propagate(errp
, error
);
5898 s
->xen_version
= value
;
5899 if (value
&& xen_mode
== XEN_DISABLED
) {
5900 xen_mode
= XEN_EMULATE
;
5904 static void kvm_arch_get_xen_gnttab_max_frames(Object
*obj
, Visitor
*v
,
5905 const char *name
, void *opaque
,
5908 KVMState
*s
= KVM_STATE(obj
);
5909 uint16_t value
= s
->xen_gnttab_max_frames
;
5911 visit_type_uint16(v
, name
, &value
, errp
);
5914 static void kvm_arch_set_xen_gnttab_max_frames(Object
*obj
, Visitor
*v
,
5915 const char *name
, void *opaque
,
5918 KVMState
*s
= KVM_STATE(obj
);
5919 Error
*error
= NULL
;
5922 visit_type_uint16(v
, name
, &value
, &error
);
5924 error_propagate(errp
, error
);
5928 s
->xen_gnttab_max_frames
= value
;
5931 static void kvm_arch_get_xen_evtchn_max_pirq(Object
*obj
, Visitor
*v
,
5932 const char *name
, void *opaque
,
5935 KVMState
*s
= KVM_STATE(obj
);
5936 uint16_t value
= s
->xen_evtchn_max_pirq
;
5938 visit_type_uint16(v
, name
, &value
, errp
);
5941 static void kvm_arch_set_xen_evtchn_max_pirq(Object
*obj
, Visitor
*v
,
5942 const char *name
, void *opaque
,
5945 KVMState
*s
= KVM_STATE(obj
);
5946 Error
*error
= NULL
;
5949 visit_type_uint16(v
, name
, &value
, &error
);
5951 error_propagate(errp
, error
);
5955 s
->xen_evtchn_max_pirq
= value
;
5958 void kvm_arch_accel_class_init(ObjectClass
*oc
)
5960 object_class_property_add_enum(oc
, "notify-vmexit", "NotifyVMexitOption",
5961 &NotifyVmexitOption_lookup
,
5962 kvm_arch_get_notify_vmexit
,
5963 kvm_arch_set_notify_vmexit
);
5964 object_class_property_set_description(oc
, "notify-vmexit",
5965 "Enable notify VM exit");
5967 object_class_property_add(oc
, "notify-window", "uint32",
5968 kvm_arch_get_notify_window
,
5969 kvm_arch_set_notify_window
,
5971 object_class_property_set_description(oc
, "notify-window",
5972 "Clock cycles without an event window "
5973 "after which a notification VM exit occurs");
5975 object_class_property_add(oc
, "xen-version", "uint32",
5976 kvm_arch_get_xen_version
,
5977 kvm_arch_set_xen_version
,
5979 object_class_property_set_description(oc
, "xen-version",
5980 "Xen version to be emulated "
5981 "(in XENVER_version form "
5982 "e.g. 0x4000a for 4.10)");
5984 object_class_property_add(oc
, "xen-gnttab-max-frames", "uint16",
5985 kvm_arch_get_xen_gnttab_max_frames
,
5986 kvm_arch_set_xen_gnttab_max_frames
,
5988 object_class_property_set_description(oc
, "xen-gnttab-max-frames",
5989 "Maximum number of grant table frames");
5991 object_class_property_add(oc
, "xen-evtchn-max-pirq", "uint16",
5992 kvm_arch_get_xen_evtchn_max_pirq
,
5993 kvm_arch_set_xen_evtchn_max_pirq
,
5995 object_class_property_set_description(oc
, "xen-evtchn-max-pirq",
5996 "Maximum number of Xen PIRQs");
5999 void kvm_set_max_apic_id(uint32_t max_apic_id
)
6001 kvm_vm_enable_cap(kvm_state
, KVM_CAP_MAX_VCPU_ID
, 0, max_apic_id
);