KVM: x86: Implement MSR_CORE_THREAD_COUNT MSR
[qemu.git] / target / i386 / kvm / kvm.c
blobbed6c00f2cb7284b3084191f612be9e34474d659
1 /*
2 * QEMU KVM support
4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "qapi/qapi-events-run-state.h"
17 #include "qapi/error.h"
18 #include "qapi/visitor.h"
19 #include <sys/ioctl.h>
20 #include <sys/utsname.h>
21 #include <sys/syscall.h>
23 #include <linux/kvm.h>
24 #include "standard-headers/asm-x86/kvm_para.h"
26 #include "cpu.h"
27 #include "host-cpu.h"
28 #include "sysemu/sysemu.h"
29 #include "sysemu/hw_accel.h"
30 #include "sysemu/kvm_int.h"
31 #include "sysemu/runstate.h"
32 #include "kvm_i386.h"
33 #include "sev.h"
34 #include "hyperv.h"
35 #include "hyperv-proto.h"
37 #include "exec/gdbstub.h"
38 #include "qemu/host-utils.h"
39 #include "qemu/main-loop.h"
40 #include "qemu/config-file.h"
41 #include "qemu/error-report.h"
42 #include "qemu/memalign.h"
43 #include "hw/i386/x86.h"
44 #include "hw/i386/apic.h"
45 #include "hw/i386/apic_internal.h"
46 #include "hw/i386/apic-msidef.h"
47 #include "hw/i386/intel_iommu.h"
48 #include "hw/i386/x86-iommu.h"
49 #include "hw/i386/e820_memory_layout.h"
51 #include "hw/pci/pci.h"
52 #include "hw/pci/msi.h"
53 #include "hw/pci/msix.h"
54 #include "migration/blocker.h"
55 #include "exec/memattrs.h"
56 #include "trace.h"
58 #include CONFIG_DEVICES
60 //#define DEBUG_KVM
62 #ifdef DEBUG_KVM
63 #define DPRINTF(fmt, ...) \
64 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
65 #else
66 #define DPRINTF(fmt, ...) \
67 do { } while (0)
68 #endif
70 /* From arch/x86/kvm/lapic.h */
71 #define KVM_APIC_BUS_CYCLE_NS 1
72 #define KVM_APIC_BUS_FREQUENCY (1000000000ULL / KVM_APIC_BUS_CYCLE_NS)
74 #define MSR_KVM_WALL_CLOCK 0x11
75 #define MSR_KVM_SYSTEM_TIME 0x12
77 /* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
78 * 255 kvm_msr_entry structs */
79 #define MSR_BUF_SIZE 4096
81 static void kvm_init_msrs(X86CPU *cpu);
83 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
84 KVM_CAP_INFO(SET_TSS_ADDR),
85 KVM_CAP_INFO(EXT_CPUID),
86 KVM_CAP_INFO(MP_STATE),
87 KVM_CAP_LAST_INFO
90 static bool has_msr_star;
91 static bool has_msr_hsave_pa;
92 static bool has_msr_tsc_aux;
93 static bool has_msr_tsc_adjust;
94 static bool has_msr_tsc_deadline;
95 static bool has_msr_feature_control;
96 static bool has_msr_misc_enable;
97 static bool has_msr_smbase;
98 static bool has_msr_bndcfgs;
99 static int lm_capable_kernel;
100 static bool has_msr_hv_hypercall;
101 static bool has_msr_hv_crash;
102 static bool has_msr_hv_reset;
103 static bool has_msr_hv_vpindex;
104 static bool hv_vpindex_settable;
105 static bool has_msr_hv_runtime;
106 static bool has_msr_hv_synic;
107 static bool has_msr_hv_stimer;
108 static bool has_msr_hv_frequencies;
109 static bool has_msr_hv_reenlightenment;
110 static bool has_msr_hv_syndbg_options;
111 static bool has_msr_xss;
112 static bool has_msr_umwait;
113 static bool has_msr_spec_ctrl;
114 static bool has_tsc_scale_msr;
115 static bool has_msr_tsx_ctrl;
116 static bool has_msr_virt_ssbd;
117 static bool has_msr_smi_count;
118 static bool has_msr_arch_capabs;
119 static bool has_msr_core_capabs;
120 static bool has_msr_vmx_vmfunc;
121 static bool has_msr_ucode_rev;
122 static bool has_msr_vmx_procbased_ctls2;
123 static bool has_msr_perf_capabs;
124 static bool has_msr_pkrs;
126 static uint32_t has_architectural_pmu_version;
127 static uint32_t num_architectural_pmu_gp_counters;
128 static uint32_t num_architectural_pmu_fixed_counters;
130 static int has_xsave;
131 static int has_xsave2;
132 static int has_xcrs;
133 static int has_pit_state2;
134 static int has_sregs2;
135 static int has_exception_payload;
136 static int has_triple_fault_event;
138 static bool has_msr_mcg_ext_ctl;
140 static struct kvm_cpuid2 *cpuid_cache;
141 static struct kvm_cpuid2 *hv_cpuid_cache;
142 static struct kvm_msr_list *kvm_feature_msrs;
144 static KVMMSRHandlers msr_handlers[KVM_MSR_FILTER_MAX_RANGES];
146 #define BUS_LOCK_SLICE_TIME 1000000000ULL /* ns */
147 static RateLimit bus_lock_ratelimit_ctrl;
148 static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value);
150 int kvm_has_pit_state2(void)
152 return has_pit_state2;
155 bool kvm_has_smm(void)
157 return kvm_vm_check_extension(kvm_state, KVM_CAP_X86_SMM);
160 bool kvm_has_adjust_clock_stable(void)
162 int ret = kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK);
164 return (ret & KVM_CLOCK_TSC_STABLE);
167 bool kvm_has_adjust_clock(void)
169 return kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK);
172 bool kvm_has_exception_payload(void)
174 return has_exception_payload;
177 static bool kvm_x2apic_api_set_flags(uint64_t flags)
179 KVMState *s = KVM_STATE(current_accel());
181 return !kvm_vm_enable_cap(s, KVM_CAP_X2APIC_API, 0, flags);
184 #define MEMORIZE(fn, _result) \
185 ({ \
186 static bool _memorized; \
188 if (_memorized) { \
189 return _result; \
191 _memorized = true; \
192 _result = fn; \
195 static bool has_x2apic_api;
197 bool kvm_has_x2apic_api(void)
199 return has_x2apic_api;
202 bool kvm_enable_x2apic(void)
204 return MEMORIZE(
205 kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS |
206 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK),
207 has_x2apic_api);
210 bool kvm_hv_vpindex_settable(void)
212 return hv_vpindex_settable;
215 static int kvm_get_tsc(CPUState *cs)
217 X86CPU *cpu = X86_CPU(cs);
218 CPUX86State *env = &cpu->env;
219 uint64_t value;
220 int ret;
222 if (env->tsc_valid) {
223 return 0;
226 env->tsc_valid = !runstate_is_running();
228 ret = kvm_get_one_msr(cpu, MSR_IA32_TSC, &value);
229 if (ret < 0) {
230 return ret;
233 env->tsc = value;
234 return 0;
237 static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg)
239 kvm_get_tsc(cpu);
242 void kvm_synchronize_all_tsc(void)
244 CPUState *cpu;
246 if (kvm_enabled()) {
247 CPU_FOREACH(cpu) {
248 run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL);
253 static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
255 struct kvm_cpuid2 *cpuid;
256 int r, size;
258 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
259 cpuid = g_malloc0(size);
260 cpuid->nent = max;
261 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
262 if (r == 0 && cpuid->nent >= max) {
263 r = -E2BIG;
265 if (r < 0) {
266 if (r == -E2BIG) {
267 g_free(cpuid);
268 return NULL;
269 } else {
270 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
271 strerror(-r));
272 exit(1);
275 return cpuid;
278 /* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
279 * for all entries.
281 static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s)
283 struct kvm_cpuid2 *cpuid;
284 int max = 1;
286 if (cpuid_cache != NULL) {
287 return cpuid_cache;
289 while ((cpuid = try_get_cpuid(s, max)) == NULL) {
290 max *= 2;
292 cpuid_cache = cpuid;
293 return cpuid;
296 static bool host_tsx_broken(void)
298 int family, model, stepping;\
299 char vendor[CPUID_VENDOR_SZ + 1];
301 host_cpu_vendor_fms(vendor, &family, &model, &stepping);
303 /* Check if we are running on a Haswell host known to have broken TSX */
304 return !strcmp(vendor, CPUID_VENDOR_INTEL) &&
305 (family == 6) &&
306 ((model == 63 && stepping < 4) ||
307 model == 60 || model == 69 || model == 70);
310 /* Returns the value for a specific register on the cpuid entry
312 static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
314 uint32_t ret = 0;
315 switch (reg) {
316 case R_EAX:
317 ret = entry->eax;
318 break;
319 case R_EBX:
320 ret = entry->ebx;
321 break;
322 case R_ECX:
323 ret = entry->ecx;
324 break;
325 case R_EDX:
326 ret = entry->edx;
327 break;
329 return ret;
332 /* Find matching entry for function/index on kvm_cpuid2 struct
334 static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
335 uint32_t function,
336 uint32_t index)
338 int i;
339 for (i = 0; i < cpuid->nent; ++i) {
340 if (cpuid->entries[i].function == function &&
341 cpuid->entries[i].index == index) {
342 return &cpuid->entries[i];
345 /* not found: */
346 return NULL;
349 uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
350 uint32_t index, int reg)
352 struct kvm_cpuid2 *cpuid;
353 uint32_t ret = 0;
354 uint32_t cpuid_1_edx;
355 uint64_t bitmask;
357 cpuid = get_supported_cpuid(s);
359 struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index);
360 if (entry) {
361 ret = cpuid_entry_get_reg(entry, reg);
364 /* Fixups for the data returned by KVM, below */
366 if (function == 1 && reg == R_EDX) {
367 /* KVM before 2.6.30 misreports the following features */
368 ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
369 } else if (function == 1 && reg == R_ECX) {
370 /* We can set the hypervisor flag, even if KVM does not return it on
371 * GET_SUPPORTED_CPUID
373 ret |= CPUID_EXT_HYPERVISOR;
374 /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
375 * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
376 * and the irqchip is in the kernel.
378 if (kvm_irqchip_in_kernel() &&
379 kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
380 ret |= CPUID_EXT_TSC_DEADLINE_TIMER;
383 /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
384 * without the in-kernel irqchip
386 if (!kvm_irqchip_in_kernel()) {
387 ret &= ~CPUID_EXT_X2APIC;
390 if (enable_cpu_pm) {
391 int disable_exits = kvm_check_extension(s,
392 KVM_CAP_X86_DISABLE_EXITS);
394 if (disable_exits & KVM_X86_DISABLE_EXITS_MWAIT) {
395 ret |= CPUID_EXT_MONITOR;
398 } else if (function == 6 && reg == R_EAX) {
399 ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */
400 } else if (function == 7 && index == 0 && reg == R_EBX) {
401 if (host_tsx_broken()) {
402 ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE);
404 } else if (function == 7 && index == 0 && reg == R_EDX) {
406 * Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts.
407 * We can detect the bug by checking if MSR_IA32_ARCH_CAPABILITIES is
408 * returned by KVM_GET_MSR_INDEX_LIST.
410 if (!has_msr_arch_capabs) {
411 ret &= ~CPUID_7_0_EDX_ARCH_CAPABILITIES;
413 } else if (function == 0xd && index == 0 &&
414 (reg == R_EAX || reg == R_EDX)) {
416 * The value returned by KVM_GET_SUPPORTED_CPUID does not include
417 * features that still have to be enabled with the arch_prctl
418 * system call. QEMU needs the full value, which is retrieved
419 * with KVM_GET_DEVICE_ATTR.
421 struct kvm_device_attr attr = {
422 .group = 0,
423 .attr = KVM_X86_XCOMP_GUEST_SUPP,
424 .addr = (unsigned long) &bitmask
427 bool sys_attr = kvm_check_extension(s, KVM_CAP_SYS_ATTRIBUTES);
428 if (!sys_attr) {
429 return ret;
432 int rc = kvm_ioctl(s, KVM_GET_DEVICE_ATTR, &attr);
433 if (rc < 0) {
434 if (rc != -ENXIO) {
435 warn_report("KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) "
436 "error: %d", rc);
438 return ret;
440 ret = (reg == R_EAX) ? bitmask : bitmask >> 32;
441 } else if (function == 0x80000001 && reg == R_ECX) {
443 * It's safe to enable TOPOEXT even if it's not returned by
444 * GET_SUPPORTED_CPUID. Unconditionally enabling TOPOEXT here allows
445 * us to keep CPU models including TOPOEXT runnable on older kernels.
447 ret |= CPUID_EXT3_TOPOEXT;
448 } else if (function == 0x80000001 && reg == R_EDX) {
449 /* On Intel, kvm returns cpuid according to the Intel spec,
450 * so add missing bits according to the AMD spec:
452 cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
453 ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
454 } else if (function == KVM_CPUID_FEATURES && reg == R_EAX) {
455 /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't
456 * be enabled without the in-kernel irqchip
458 if (!kvm_irqchip_in_kernel()) {
459 ret &= ~(1U << KVM_FEATURE_PV_UNHALT);
461 if (kvm_irqchip_is_split()) {
462 ret |= 1U << KVM_FEATURE_MSI_EXT_DEST_ID;
464 } else if (function == KVM_CPUID_FEATURES && reg == R_EDX) {
465 ret |= 1U << KVM_HINTS_REALTIME;
468 return ret;
471 uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index)
473 struct {
474 struct kvm_msrs info;
475 struct kvm_msr_entry entries[1];
476 } msr_data = {};
477 uint64_t value;
478 uint32_t ret, can_be_one, must_be_one;
480 if (kvm_feature_msrs == NULL) { /* Host doesn't support feature MSRs */
481 return 0;
484 /* Check if requested MSR is supported feature MSR */
485 int i;
486 for (i = 0; i < kvm_feature_msrs->nmsrs; i++)
487 if (kvm_feature_msrs->indices[i] == index) {
488 break;
490 if (i == kvm_feature_msrs->nmsrs) {
491 return 0; /* if the feature MSR is not supported, simply return 0 */
494 msr_data.info.nmsrs = 1;
495 msr_data.entries[0].index = index;
497 ret = kvm_ioctl(s, KVM_GET_MSRS, &msr_data);
498 if (ret != 1) {
499 error_report("KVM get MSR (index=0x%x) feature failed, %s",
500 index, strerror(-ret));
501 exit(1);
504 value = msr_data.entries[0].data;
505 switch (index) {
506 case MSR_IA32_VMX_PROCBASED_CTLS2:
507 if (!has_msr_vmx_procbased_ctls2) {
508 /* KVM forgot to add these bits for some time, do this ourselves. */
509 if (kvm_arch_get_supported_cpuid(s, 0xD, 1, R_ECX) &
510 CPUID_XSAVE_XSAVES) {
511 value |= (uint64_t)VMX_SECONDARY_EXEC_XSAVES << 32;
513 if (kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX) &
514 CPUID_EXT_RDRAND) {
515 value |= (uint64_t)VMX_SECONDARY_EXEC_RDRAND_EXITING << 32;
517 if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) &
518 CPUID_7_0_EBX_INVPCID) {
519 value |= (uint64_t)VMX_SECONDARY_EXEC_ENABLE_INVPCID << 32;
521 if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) &
522 CPUID_7_0_EBX_RDSEED) {
523 value |= (uint64_t)VMX_SECONDARY_EXEC_RDSEED_EXITING << 32;
525 if (kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_EDX) &
526 CPUID_EXT2_RDTSCP) {
527 value |= (uint64_t)VMX_SECONDARY_EXEC_RDTSCP << 32;
530 /* fall through */
531 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
532 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
533 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
534 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
536 * Return true for bits that can be one, but do not have to be one.
537 * The SDM tells us which bits could have a "must be one" setting,
538 * so we can do the opposite transformation in make_vmx_msr_value.
540 must_be_one = (uint32_t)value;
541 can_be_one = (uint32_t)(value >> 32);
542 return can_be_one & ~must_be_one;
544 default:
545 return value;
549 static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
550 int *max_banks)
552 int r;
554 r = kvm_check_extension(s, KVM_CAP_MCE);
555 if (r > 0) {
556 *max_banks = r;
557 return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
559 return -ENOSYS;
562 static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
564 CPUState *cs = CPU(cpu);
565 CPUX86State *env = &cpu->env;
566 uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
567 MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
568 uint64_t mcg_status = MCG_STATUS_MCIP;
569 int flags = 0;
571 if (code == BUS_MCEERR_AR) {
572 status |= MCI_STATUS_AR | 0x134;
573 mcg_status |= MCG_STATUS_RIPV | MCG_STATUS_EIPV;
574 } else {
575 status |= 0xc0;
576 mcg_status |= MCG_STATUS_RIPV;
579 flags = cpu_x86_support_mca_broadcast(env) ? MCE_INJECT_BROADCAST : 0;
580 /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the
581 * guest kernel back into env->mcg_ext_ctl.
583 cpu_synchronize_state(cs);
584 if (env->mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN) {
585 mcg_status |= MCG_STATUS_LMCE;
586 flags = 0;
589 cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
590 (MCM_ADDR_PHYS << 6) | 0xc, flags);
593 static void emit_hypervisor_memory_failure(MemoryFailureAction action, bool ar)
595 MemoryFailureFlags mff = {.action_required = ar, .recursive = false};
597 qapi_event_send_memory_failure(MEMORY_FAILURE_RECIPIENT_HYPERVISOR, action,
598 &mff);
601 static void hardware_memory_error(void *host_addr)
603 emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_FATAL, true);
604 error_report("QEMU got Hardware memory error at addr %p", host_addr);
605 exit(1);
608 void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
610 X86CPU *cpu = X86_CPU(c);
611 CPUX86State *env = &cpu->env;
612 ram_addr_t ram_addr;
613 hwaddr paddr;
615 /* If we get an action required MCE, it has been injected by KVM
616 * while the VM was running. An action optional MCE instead should
617 * be coming from the main thread, which qemu_init_sigbus identifies
618 * as the "early kill" thread.
620 assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
622 if ((env->mcg_cap & MCG_SER_P) && addr) {
623 ram_addr = qemu_ram_addr_from_host(addr);
624 if (ram_addr != RAM_ADDR_INVALID &&
625 kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
626 kvm_hwpoison_page_add(ram_addr);
627 kvm_mce_inject(cpu, paddr, code);
630 * Use different logging severity based on error type.
631 * If there is additional MCE reporting on the hypervisor, QEMU VA
632 * could be another source to identify the PA and MCE details.
634 if (code == BUS_MCEERR_AR) {
635 error_report("Guest MCE Memory Error at QEMU addr %p and "
636 "GUEST addr 0x%" HWADDR_PRIx " of type %s injected",
637 addr, paddr, "BUS_MCEERR_AR");
638 } else {
639 warn_report("Guest MCE Memory Error at QEMU addr %p and "
640 "GUEST addr 0x%" HWADDR_PRIx " of type %s injected",
641 addr, paddr, "BUS_MCEERR_AO");
644 return;
647 if (code == BUS_MCEERR_AO) {
648 warn_report("Hardware memory error at addr %p of type %s "
649 "for memory used by QEMU itself instead of guest system!",
650 addr, "BUS_MCEERR_AO");
654 if (code == BUS_MCEERR_AR) {
655 hardware_memory_error(addr);
658 /* Hope we are lucky for AO MCE, just notify a event */
659 emit_hypervisor_memory_failure(MEMORY_FAILURE_ACTION_IGNORE, false);
662 static void kvm_reset_exception(CPUX86State *env)
664 env->exception_nr = -1;
665 env->exception_pending = 0;
666 env->exception_injected = 0;
667 env->exception_has_payload = false;
668 env->exception_payload = 0;
671 static void kvm_queue_exception(CPUX86State *env,
672 int32_t exception_nr,
673 uint8_t exception_has_payload,
674 uint64_t exception_payload)
676 assert(env->exception_nr == -1);
677 assert(!env->exception_pending);
678 assert(!env->exception_injected);
679 assert(!env->exception_has_payload);
681 env->exception_nr = exception_nr;
683 if (has_exception_payload) {
684 env->exception_pending = 1;
686 env->exception_has_payload = exception_has_payload;
687 env->exception_payload = exception_payload;
688 } else {
689 env->exception_injected = 1;
691 if (exception_nr == EXCP01_DB) {
692 assert(exception_has_payload);
693 env->dr[6] = exception_payload;
694 } else if (exception_nr == EXCP0E_PAGE) {
695 assert(exception_has_payload);
696 env->cr[2] = exception_payload;
697 } else {
698 assert(!exception_has_payload);
703 static int kvm_inject_mce_oldstyle(X86CPU *cpu)
705 CPUX86State *env = &cpu->env;
707 if (!kvm_has_vcpu_events() && env->exception_nr == EXCP12_MCHK) {
708 unsigned int bank, bank_num = env->mcg_cap & 0xff;
709 struct kvm_x86_mce mce;
711 kvm_reset_exception(env);
714 * There must be at least one bank in use if an MCE is pending.
715 * Find it and use its values for the event injection.
717 for (bank = 0; bank < bank_num; bank++) {
718 if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
719 break;
722 assert(bank < bank_num);
724 mce.bank = bank;
725 mce.status = env->mce_banks[bank * 4 + 1];
726 mce.mcg_status = env->mcg_status;
727 mce.addr = env->mce_banks[bank * 4 + 2];
728 mce.misc = env->mce_banks[bank * 4 + 3];
730 return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce);
732 return 0;
735 static void cpu_update_state(void *opaque, bool running, RunState state)
737 CPUX86State *env = opaque;
739 if (running) {
740 env->tsc_valid = false;
744 unsigned long kvm_arch_vcpu_id(CPUState *cs)
746 X86CPU *cpu = X86_CPU(cs);
747 return cpu->apic_id;
750 #ifndef KVM_CPUID_SIGNATURE_NEXT
751 #define KVM_CPUID_SIGNATURE_NEXT 0x40000100
752 #endif
754 static bool hyperv_enabled(X86CPU *cpu)
756 return kvm_check_extension(kvm_state, KVM_CAP_HYPERV) > 0 &&
757 ((cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_NOTIFY) ||
758 cpu->hyperv_features || cpu->hyperv_passthrough);
762 * Check whether target_freq is within conservative
763 * ntp correctable bounds (250ppm) of freq
765 static inline bool freq_within_bounds(int freq, int target_freq)
767 int max_freq = freq + (freq * 250 / 1000000);
768 int min_freq = freq - (freq * 250 / 1000000);
770 if (target_freq >= min_freq && target_freq <= max_freq) {
771 return true;
774 return false;
777 static int kvm_arch_set_tsc_khz(CPUState *cs)
779 X86CPU *cpu = X86_CPU(cs);
780 CPUX86State *env = &cpu->env;
781 int r, cur_freq;
782 bool set_ioctl = false;
784 if (!env->tsc_khz) {
785 return 0;
788 cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
789 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : -ENOTSUP;
792 * If TSC scaling is supported, attempt to set TSC frequency.
794 if (kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL)) {
795 set_ioctl = true;
799 * If desired TSC frequency is within bounds of NTP correction,
800 * attempt to set TSC frequency.
802 if (cur_freq != -ENOTSUP && freq_within_bounds(cur_freq, env->tsc_khz)) {
803 set_ioctl = true;
806 r = set_ioctl ?
807 kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) :
808 -ENOTSUP;
810 if (r < 0) {
811 /* When KVM_SET_TSC_KHZ fails, it's an error only if the current
812 * TSC frequency doesn't match the one we want.
814 cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
815 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
816 -ENOTSUP;
817 if (cur_freq <= 0 || cur_freq != env->tsc_khz) {
818 warn_report("TSC frequency mismatch between "
819 "VM (%" PRId64 " kHz) and host (%d kHz), "
820 "and TSC scaling unavailable",
821 env->tsc_khz, cur_freq);
822 return r;
826 return 0;
829 static bool tsc_is_stable_and_known(CPUX86State *env)
831 if (!env->tsc_khz) {
832 return false;
834 return (env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC)
835 || env->user_tsc_khz;
838 #define DEFAULT_EVMCS_VERSION ((1 << 8) | 1)
840 static struct {
841 const char *desc;
842 struct {
843 uint32_t func;
844 int reg;
845 uint32_t bits;
846 } flags[2];
847 uint64_t dependencies;
848 } kvm_hyperv_properties[] = {
849 [HYPERV_FEAT_RELAXED] = {
850 .desc = "relaxed timing (hv-relaxed)",
851 .flags = {
852 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
853 .bits = HV_RELAXED_TIMING_RECOMMENDED}
856 [HYPERV_FEAT_VAPIC] = {
857 .desc = "virtual APIC (hv-vapic)",
858 .flags = {
859 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
860 .bits = HV_APIC_ACCESS_AVAILABLE}
863 [HYPERV_FEAT_TIME] = {
864 .desc = "clocksources (hv-time)",
865 .flags = {
866 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
867 .bits = HV_TIME_REF_COUNT_AVAILABLE | HV_REFERENCE_TSC_AVAILABLE}
870 [HYPERV_FEAT_CRASH] = {
871 .desc = "crash MSRs (hv-crash)",
872 .flags = {
873 {.func = HV_CPUID_FEATURES, .reg = R_EDX,
874 .bits = HV_GUEST_CRASH_MSR_AVAILABLE}
877 [HYPERV_FEAT_RESET] = {
878 .desc = "reset MSR (hv-reset)",
879 .flags = {
880 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
881 .bits = HV_RESET_AVAILABLE}
884 [HYPERV_FEAT_VPINDEX] = {
885 .desc = "VP_INDEX MSR (hv-vpindex)",
886 .flags = {
887 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
888 .bits = HV_VP_INDEX_AVAILABLE}
891 [HYPERV_FEAT_RUNTIME] = {
892 .desc = "VP_RUNTIME MSR (hv-runtime)",
893 .flags = {
894 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
895 .bits = HV_VP_RUNTIME_AVAILABLE}
898 [HYPERV_FEAT_SYNIC] = {
899 .desc = "synthetic interrupt controller (hv-synic)",
900 .flags = {
901 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
902 .bits = HV_SYNIC_AVAILABLE}
905 [HYPERV_FEAT_STIMER] = {
906 .desc = "synthetic timers (hv-stimer)",
907 .flags = {
908 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
909 .bits = HV_SYNTIMERS_AVAILABLE}
911 .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_TIME)
913 [HYPERV_FEAT_FREQUENCIES] = {
914 .desc = "frequency MSRs (hv-frequencies)",
915 .flags = {
916 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
917 .bits = HV_ACCESS_FREQUENCY_MSRS},
918 {.func = HV_CPUID_FEATURES, .reg = R_EDX,
919 .bits = HV_FREQUENCY_MSRS_AVAILABLE}
922 [HYPERV_FEAT_REENLIGHTENMENT] = {
923 .desc = "reenlightenment MSRs (hv-reenlightenment)",
924 .flags = {
925 {.func = HV_CPUID_FEATURES, .reg = R_EAX,
926 .bits = HV_ACCESS_REENLIGHTENMENTS_CONTROL}
929 [HYPERV_FEAT_TLBFLUSH] = {
930 .desc = "paravirtualized TLB flush (hv-tlbflush)",
931 .flags = {
932 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
933 .bits = HV_REMOTE_TLB_FLUSH_RECOMMENDED |
934 HV_EX_PROCESSOR_MASKS_RECOMMENDED}
936 .dependencies = BIT(HYPERV_FEAT_VPINDEX)
938 [HYPERV_FEAT_EVMCS] = {
939 .desc = "enlightened VMCS (hv-evmcs)",
940 .flags = {
941 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
942 .bits = HV_ENLIGHTENED_VMCS_RECOMMENDED}
944 .dependencies = BIT(HYPERV_FEAT_VAPIC)
946 [HYPERV_FEAT_IPI] = {
947 .desc = "paravirtualized IPI (hv-ipi)",
948 .flags = {
949 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
950 .bits = HV_CLUSTER_IPI_RECOMMENDED |
951 HV_EX_PROCESSOR_MASKS_RECOMMENDED}
953 .dependencies = BIT(HYPERV_FEAT_VPINDEX)
955 [HYPERV_FEAT_STIMER_DIRECT] = {
956 .desc = "direct mode synthetic timers (hv-stimer-direct)",
957 .flags = {
958 {.func = HV_CPUID_FEATURES, .reg = R_EDX,
959 .bits = HV_STIMER_DIRECT_MODE_AVAILABLE}
961 .dependencies = BIT(HYPERV_FEAT_STIMER)
963 [HYPERV_FEAT_AVIC] = {
964 .desc = "AVIC/APICv support (hv-avic/hv-apicv)",
965 .flags = {
966 {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX,
967 .bits = HV_DEPRECATING_AEOI_RECOMMENDED}
970 #ifdef CONFIG_SYNDBG
971 [HYPERV_FEAT_SYNDBG] = {
972 .desc = "Enable synthetic kernel debugger channel (hv-syndbg)",
973 .flags = {
974 {.func = HV_CPUID_FEATURES, .reg = R_EDX,
975 .bits = HV_FEATURE_DEBUG_MSRS_AVAILABLE}
977 .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_RELAXED)
979 #endif
980 [HYPERV_FEAT_MSR_BITMAP] = {
981 .desc = "enlightened MSR-Bitmap (hv-emsr-bitmap)",
982 .flags = {
983 {.func = HV_CPUID_NESTED_FEATURES, .reg = R_EAX,
984 .bits = HV_NESTED_MSR_BITMAP}
987 [HYPERV_FEAT_XMM_INPUT] = {
988 .desc = "XMM fast hypercall input (hv-xmm-input)",
989 .flags = {
990 {.func = HV_CPUID_FEATURES, .reg = R_EDX,
991 .bits = HV_HYPERCALL_XMM_INPUT_AVAILABLE}
994 [HYPERV_FEAT_TLBFLUSH_EXT] = {
995 .desc = "Extended gva ranges for TLB flush hypercalls (hv-tlbflush-ext)",
996 .flags = {
997 {.func = HV_CPUID_FEATURES, .reg = R_EDX,
998 .bits = HV_EXT_GVA_RANGES_FLUSH_AVAILABLE}
1000 .dependencies = BIT(HYPERV_FEAT_TLBFLUSH)
1002 [HYPERV_FEAT_TLBFLUSH_DIRECT] = {
1003 .desc = "direct TLB flush (hv-tlbflush-direct)",
1004 .flags = {
1005 {.func = HV_CPUID_NESTED_FEATURES, .reg = R_EAX,
1006 .bits = HV_NESTED_DIRECT_FLUSH}
1008 .dependencies = BIT(HYPERV_FEAT_VAPIC)
1012 static struct kvm_cpuid2 *try_get_hv_cpuid(CPUState *cs, int max,
1013 bool do_sys_ioctl)
1015 struct kvm_cpuid2 *cpuid;
1016 int r, size;
1018 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
1019 cpuid = g_malloc0(size);
1020 cpuid->nent = max;
1022 if (do_sys_ioctl) {
1023 r = kvm_ioctl(kvm_state, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
1024 } else {
1025 r = kvm_vcpu_ioctl(cs, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
1027 if (r == 0 && cpuid->nent >= max) {
1028 r = -E2BIG;
1030 if (r < 0) {
1031 if (r == -E2BIG) {
1032 g_free(cpuid);
1033 return NULL;
1034 } else {
1035 fprintf(stderr, "KVM_GET_SUPPORTED_HV_CPUID failed: %s\n",
1036 strerror(-r));
1037 exit(1);
1040 return cpuid;
1044 * Run KVM_GET_SUPPORTED_HV_CPUID ioctl(), allocating a buffer large enough
1045 * for all entries.
1047 static struct kvm_cpuid2 *get_supported_hv_cpuid(CPUState *cs)
1049 struct kvm_cpuid2 *cpuid;
1050 /* 0x40000000..0x40000005, 0x4000000A, 0x40000080..0x40000082 leaves */
1051 int max = 11;
1052 int i;
1053 bool do_sys_ioctl;
1055 do_sys_ioctl =
1056 kvm_check_extension(kvm_state, KVM_CAP_SYS_HYPERV_CPUID) > 0;
1059 * Non-empty KVM context is needed when KVM_CAP_SYS_HYPERV_CPUID is
1060 * unsupported, kvm_hyperv_expand_features() checks for that.
1062 assert(do_sys_ioctl || cs->kvm_state);
1065 * When the buffer is too small, KVM_GET_SUPPORTED_HV_CPUID fails with
1066 * -E2BIG, however, it doesn't report back the right size. Keep increasing
1067 * it and re-trying until we succeed.
1069 while ((cpuid = try_get_hv_cpuid(cs, max, do_sys_ioctl)) == NULL) {
1070 max++;
1074 * KVM_GET_SUPPORTED_HV_CPUID does not set EVMCS CPUID bit before
1075 * KVM_CAP_HYPERV_ENLIGHTENED_VMCS is enabled but we want to get the
1076 * information early, just check for the capability and set the bit
1077 * manually.
1079 if (!do_sys_ioctl && kvm_check_extension(cs->kvm_state,
1080 KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) {
1081 for (i = 0; i < cpuid->nent; i++) {
1082 if (cpuid->entries[i].function == HV_CPUID_ENLIGHTMENT_INFO) {
1083 cpuid->entries[i].eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED;
1088 return cpuid;
1092 * When KVM_GET_SUPPORTED_HV_CPUID is not supported we fill CPUID feature
1093 * leaves from KVM_CAP_HYPERV* and present MSRs data.
1095 static struct kvm_cpuid2 *get_supported_hv_cpuid_legacy(CPUState *cs)
1097 X86CPU *cpu = X86_CPU(cs);
1098 struct kvm_cpuid2 *cpuid;
1099 struct kvm_cpuid_entry2 *entry_feat, *entry_recomm;
1101 /* HV_CPUID_FEATURES, HV_CPUID_ENLIGHTMENT_INFO */
1102 cpuid = g_malloc0(sizeof(*cpuid) + 2 * sizeof(*cpuid->entries));
1103 cpuid->nent = 2;
1105 /* HV_CPUID_VENDOR_AND_MAX_FUNCTIONS */
1106 entry_feat = &cpuid->entries[0];
1107 entry_feat->function = HV_CPUID_FEATURES;
1109 entry_recomm = &cpuid->entries[1];
1110 entry_recomm->function = HV_CPUID_ENLIGHTMENT_INFO;
1111 entry_recomm->ebx = cpu->hyperv_spinlock_attempts;
1113 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0) {
1114 entry_feat->eax |= HV_HYPERCALL_AVAILABLE;
1115 entry_feat->eax |= HV_APIC_ACCESS_AVAILABLE;
1116 entry_feat->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
1117 entry_recomm->eax |= HV_RELAXED_TIMING_RECOMMENDED;
1118 entry_recomm->eax |= HV_APIC_ACCESS_RECOMMENDED;
1121 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) {
1122 entry_feat->eax |= HV_TIME_REF_COUNT_AVAILABLE;
1123 entry_feat->eax |= HV_REFERENCE_TSC_AVAILABLE;
1126 if (has_msr_hv_frequencies) {
1127 entry_feat->eax |= HV_ACCESS_FREQUENCY_MSRS;
1128 entry_feat->edx |= HV_FREQUENCY_MSRS_AVAILABLE;
1131 if (has_msr_hv_crash) {
1132 entry_feat->edx |= HV_GUEST_CRASH_MSR_AVAILABLE;
1135 if (has_msr_hv_reenlightenment) {
1136 entry_feat->eax |= HV_ACCESS_REENLIGHTENMENTS_CONTROL;
1139 if (has_msr_hv_reset) {
1140 entry_feat->eax |= HV_RESET_AVAILABLE;
1143 if (has_msr_hv_vpindex) {
1144 entry_feat->eax |= HV_VP_INDEX_AVAILABLE;
1147 if (has_msr_hv_runtime) {
1148 entry_feat->eax |= HV_VP_RUNTIME_AVAILABLE;
1151 if (has_msr_hv_synic) {
1152 unsigned int cap = cpu->hyperv_synic_kvm_only ?
1153 KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2;
1155 if (kvm_check_extension(cs->kvm_state, cap) > 0) {
1156 entry_feat->eax |= HV_SYNIC_AVAILABLE;
1160 if (has_msr_hv_stimer) {
1161 entry_feat->eax |= HV_SYNTIMERS_AVAILABLE;
1164 if (has_msr_hv_syndbg_options) {
1165 entry_feat->edx |= HV_GUEST_DEBUGGING_AVAILABLE;
1166 entry_feat->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
1167 entry_feat->ebx |= HV_PARTITION_DEBUGGING_ALLOWED;
1170 if (kvm_check_extension(cs->kvm_state,
1171 KVM_CAP_HYPERV_TLBFLUSH) > 0) {
1172 entry_recomm->eax |= HV_REMOTE_TLB_FLUSH_RECOMMENDED;
1173 entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
1176 if (kvm_check_extension(cs->kvm_state,
1177 KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) {
1178 entry_recomm->eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED;
1181 if (kvm_check_extension(cs->kvm_state,
1182 KVM_CAP_HYPERV_SEND_IPI) > 0) {
1183 entry_recomm->eax |= HV_CLUSTER_IPI_RECOMMENDED;
1184 entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
1187 return cpuid;
1190 static uint32_t hv_cpuid_get_host(CPUState *cs, uint32_t func, int reg)
1192 struct kvm_cpuid_entry2 *entry;
1193 struct kvm_cpuid2 *cpuid;
1195 if (hv_cpuid_cache) {
1196 cpuid = hv_cpuid_cache;
1197 } else {
1198 if (kvm_check_extension(kvm_state, KVM_CAP_HYPERV_CPUID) > 0) {
1199 cpuid = get_supported_hv_cpuid(cs);
1200 } else {
1202 * 'cs->kvm_state' may be NULL when Hyper-V features are expanded
1203 * before KVM context is created but this is only done when
1204 * KVM_CAP_SYS_HYPERV_CPUID is supported and it implies
1205 * KVM_CAP_HYPERV_CPUID.
1207 assert(cs->kvm_state);
1209 cpuid = get_supported_hv_cpuid_legacy(cs);
1211 hv_cpuid_cache = cpuid;
1214 if (!cpuid) {
1215 return 0;
1218 entry = cpuid_find_entry(cpuid, func, 0);
1219 if (!entry) {
1220 return 0;
1223 return cpuid_entry_get_reg(entry, reg);
1226 static bool hyperv_feature_supported(CPUState *cs, int feature)
1228 uint32_t func, bits;
1229 int i, reg;
1231 for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties[feature].flags); i++) {
1233 func = kvm_hyperv_properties[feature].flags[i].func;
1234 reg = kvm_hyperv_properties[feature].flags[i].reg;
1235 bits = kvm_hyperv_properties[feature].flags[i].bits;
1237 if (!func) {
1238 continue;
1241 if ((hv_cpuid_get_host(cs, func, reg) & bits) != bits) {
1242 return false;
1246 return true;
1249 /* Checks that all feature dependencies are enabled */
1250 static bool hv_feature_check_deps(X86CPU *cpu, int feature, Error **errp)
1252 uint64_t deps;
1253 int dep_feat;
1255 deps = kvm_hyperv_properties[feature].dependencies;
1256 while (deps) {
1257 dep_feat = ctz64(deps);
1258 if (!(hyperv_feat_enabled(cpu, dep_feat))) {
1259 error_setg(errp, "Hyper-V %s requires Hyper-V %s",
1260 kvm_hyperv_properties[feature].desc,
1261 kvm_hyperv_properties[dep_feat].desc);
1262 return false;
1264 deps &= ~(1ull << dep_feat);
1267 return true;
1270 static uint32_t hv_build_cpuid_leaf(CPUState *cs, uint32_t func, int reg)
1272 X86CPU *cpu = X86_CPU(cs);
1273 uint32_t r = 0;
1274 int i, j;
1276 for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties); i++) {
1277 if (!hyperv_feat_enabled(cpu, i)) {
1278 continue;
1281 for (j = 0; j < ARRAY_SIZE(kvm_hyperv_properties[i].flags); j++) {
1282 if (kvm_hyperv_properties[i].flags[j].func != func) {
1283 continue;
1285 if (kvm_hyperv_properties[i].flags[j].reg != reg) {
1286 continue;
1289 r |= kvm_hyperv_properties[i].flags[j].bits;
1293 /* HV_CPUID_NESTED_FEATURES.EAX also encodes the supported eVMCS range */
1294 if (func == HV_CPUID_NESTED_FEATURES && reg == R_EAX) {
1295 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) {
1296 r |= DEFAULT_EVMCS_VERSION;
1300 return r;
1304 * Expand Hyper-V CPU features. In partucular, check that all the requested
1305 * features are supported by the host and the sanity of the configuration
1306 * (that all the required dependencies are included). Also, this takes care
1307 * of 'hv_passthrough' mode and fills the environment with all supported
1308 * Hyper-V features.
1310 bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp)
1312 CPUState *cs = CPU(cpu);
1313 Error *local_err = NULL;
1314 int feat;
1316 if (!hyperv_enabled(cpu))
1317 return true;
1320 * When kvm_hyperv_expand_features is called at CPU feature expansion
1321 * time per-CPU kvm_state is not available yet so we can only proceed
1322 * when KVM_CAP_SYS_HYPERV_CPUID is supported.
1324 if (!cs->kvm_state &&
1325 !kvm_check_extension(kvm_state, KVM_CAP_SYS_HYPERV_CPUID))
1326 return true;
1328 if (cpu->hyperv_passthrough) {
1329 cpu->hyperv_vendor_id[0] =
1330 hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EBX);
1331 cpu->hyperv_vendor_id[1] =
1332 hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_ECX);
1333 cpu->hyperv_vendor_id[2] =
1334 hv_cpuid_get_host(cs, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EDX);
1335 cpu->hyperv_vendor = g_realloc(cpu->hyperv_vendor,
1336 sizeof(cpu->hyperv_vendor_id) + 1);
1337 memcpy(cpu->hyperv_vendor, cpu->hyperv_vendor_id,
1338 sizeof(cpu->hyperv_vendor_id));
1339 cpu->hyperv_vendor[sizeof(cpu->hyperv_vendor_id)] = 0;
1341 cpu->hyperv_interface_id[0] =
1342 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EAX);
1343 cpu->hyperv_interface_id[1] =
1344 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EBX);
1345 cpu->hyperv_interface_id[2] =
1346 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_ECX);
1347 cpu->hyperv_interface_id[3] =
1348 hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EDX);
1350 cpu->hyperv_ver_id_build =
1351 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EAX);
1352 cpu->hyperv_ver_id_major =
1353 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX) >> 16;
1354 cpu->hyperv_ver_id_minor =
1355 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX) & 0xffff;
1356 cpu->hyperv_ver_id_sp =
1357 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_ECX);
1358 cpu->hyperv_ver_id_sb =
1359 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX) >> 24;
1360 cpu->hyperv_ver_id_sn =
1361 hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX) & 0xffffff;
1363 cpu->hv_max_vps = hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS,
1364 R_EAX);
1365 cpu->hyperv_limits[0] =
1366 hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EBX);
1367 cpu->hyperv_limits[1] =
1368 hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_ECX);
1369 cpu->hyperv_limits[2] =
1370 hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EDX);
1372 cpu->hyperv_spinlock_attempts =
1373 hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EBX);
1376 * Mark feature as enabled in 'cpu->hyperv_features' as
1377 * hv_build_cpuid_leaf() uses this info to build guest CPUIDs.
1379 for (feat = 0; feat < ARRAY_SIZE(kvm_hyperv_properties); feat++) {
1380 if (hyperv_feature_supported(cs, feat)) {
1381 cpu->hyperv_features |= BIT(feat);
1384 } else {
1385 /* Check features availability and dependencies */
1386 for (feat = 0; feat < ARRAY_SIZE(kvm_hyperv_properties); feat++) {
1387 /* If the feature was not requested skip it. */
1388 if (!hyperv_feat_enabled(cpu, feat)) {
1389 continue;
1392 /* Check if the feature is supported by KVM */
1393 if (!hyperv_feature_supported(cs, feat)) {
1394 error_setg(errp, "Hyper-V %s is not supported by kernel",
1395 kvm_hyperv_properties[feat].desc);
1396 return false;
1399 /* Check dependencies */
1400 if (!hv_feature_check_deps(cpu, feat, &local_err)) {
1401 error_propagate(errp, local_err);
1402 return false;
1407 /* Additional dependencies not covered by kvm_hyperv_properties[] */
1408 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) &&
1409 !cpu->hyperv_synic_kvm_only &&
1410 !hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)) {
1411 error_setg(errp, "Hyper-V %s requires Hyper-V %s",
1412 kvm_hyperv_properties[HYPERV_FEAT_SYNIC].desc,
1413 kvm_hyperv_properties[HYPERV_FEAT_VPINDEX].desc);
1414 return false;
1417 return true;
1421 * Fill in Hyper-V CPUIDs. Returns the number of entries filled in cpuid_ent.
1423 static int hyperv_fill_cpuids(CPUState *cs,
1424 struct kvm_cpuid_entry2 *cpuid_ent)
1426 X86CPU *cpu = X86_CPU(cs);
1427 struct kvm_cpuid_entry2 *c;
1428 uint32_t signature[3];
1429 uint32_t cpuid_i = 0, max_cpuid_leaf = 0;
1430 uint32_t nested_eax =
1431 hv_build_cpuid_leaf(cs, HV_CPUID_NESTED_FEATURES, R_EAX);
1433 max_cpuid_leaf = nested_eax ? HV_CPUID_NESTED_FEATURES :
1434 HV_CPUID_IMPLEMENT_LIMITS;
1436 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG)) {
1437 max_cpuid_leaf =
1438 MAX(max_cpuid_leaf, HV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
1441 c = &cpuid_ent[cpuid_i++];
1442 c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
1443 c->eax = max_cpuid_leaf;
1444 c->ebx = cpu->hyperv_vendor_id[0];
1445 c->ecx = cpu->hyperv_vendor_id[1];
1446 c->edx = cpu->hyperv_vendor_id[2];
1448 c = &cpuid_ent[cpuid_i++];
1449 c->function = HV_CPUID_INTERFACE;
1450 c->eax = cpu->hyperv_interface_id[0];
1451 c->ebx = cpu->hyperv_interface_id[1];
1452 c->ecx = cpu->hyperv_interface_id[2];
1453 c->edx = cpu->hyperv_interface_id[3];
1455 c = &cpuid_ent[cpuid_i++];
1456 c->function = HV_CPUID_VERSION;
1457 c->eax = cpu->hyperv_ver_id_build;
1458 c->ebx = (uint32_t)cpu->hyperv_ver_id_major << 16 |
1459 cpu->hyperv_ver_id_minor;
1460 c->ecx = cpu->hyperv_ver_id_sp;
1461 c->edx = (uint32_t)cpu->hyperv_ver_id_sb << 24 |
1462 (cpu->hyperv_ver_id_sn & 0xffffff);
1464 c = &cpuid_ent[cpuid_i++];
1465 c->function = HV_CPUID_FEATURES;
1466 c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EAX);
1467 c->ebx = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EBX);
1468 c->edx = hv_build_cpuid_leaf(cs, HV_CPUID_FEATURES, R_EDX);
1470 /* Unconditionally required with any Hyper-V enlightenment */
1471 c->eax |= HV_HYPERCALL_AVAILABLE;
1473 /* SynIC and Vmbus devices require messages/signals hypercalls */
1474 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) &&
1475 !cpu->hyperv_synic_kvm_only) {
1476 c->ebx |= HV_POST_MESSAGES | HV_SIGNAL_EVENTS;
1480 /* Not exposed by KVM but needed to make CPU hotplug in Windows work */
1481 c->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
1483 c = &cpuid_ent[cpuid_i++];
1484 c->function = HV_CPUID_ENLIGHTMENT_INFO;
1485 c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX);
1486 c->ebx = cpu->hyperv_spinlock_attempts;
1488 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC) &&
1489 !hyperv_feat_enabled(cpu, HYPERV_FEAT_AVIC)) {
1490 c->eax |= HV_APIC_ACCESS_RECOMMENDED;
1493 if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_ON) {
1494 c->eax |= HV_NO_NONARCH_CORESHARING;
1495 } else if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO) {
1496 c->eax |= hv_cpuid_get_host(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX) &
1497 HV_NO_NONARCH_CORESHARING;
1500 c = &cpuid_ent[cpuid_i++];
1501 c->function = HV_CPUID_IMPLEMENT_LIMITS;
1502 c->eax = cpu->hv_max_vps;
1503 c->ebx = cpu->hyperv_limits[0];
1504 c->ecx = cpu->hyperv_limits[1];
1505 c->edx = cpu->hyperv_limits[2];
1507 if (nested_eax) {
1508 uint32_t function;
1510 /* Create zeroed 0x40000006..0x40000009 leaves */
1511 for (function = HV_CPUID_IMPLEMENT_LIMITS + 1;
1512 function < HV_CPUID_NESTED_FEATURES; function++) {
1513 c = &cpuid_ent[cpuid_i++];
1514 c->function = function;
1517 c = &cpuid_ent[cpuid_i++];
1518 c->function = HV_CPUID_NESTED_FEATURES;
1519 c->eax = nested_eax;
1522 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG)) {
1523 c = &cpuid_ent[cpuid_i++];
1524 c->function = HV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS;
1525 c->eax = hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ?
1526 HV_CPUID_NESTED_FEATURES : HV_CPUID_IMPLEMENT_LIMITS;
1527 memcpy(signature, "Microsoft VS", 12);
1528 c->eax = 0;
1529 c->ebx = signature[0];
1530 c->ecx = signature[1];
1531 c->edx = signature[2];
1533 c = &cpuid_ent[cpuid_i++];
1534 c->function = HV_CPUID_SYNDBG_INTERFACE;
1535 memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12);
1536 c->eax = signature[0];
1537 c->ebx = 0;
1538 c->ecx = 0;
1539 c->edx = 0;
1541 c = &cpuid_ent[cpuid_i++];
1542 c->function = HV_CPUID_SYNDBG_PLATFORM_CAPABILITIES;
1543 c->eax = HV_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
1544 c->ebx = 0;
1545 c->ecx = 0;
1546 c->edx = 0;
1549 return cpuid_i;
1552 static Error *hv_passthrough_mig_blocker;
1553 static Error *hv_no_nonarch_cs_mig_blocker;
1555 /* Checks that the exposed eVMCS version range is supported by KVM */
1556 static bool evmcs_version_supported(uint16_t evmcs_version,
1557 uint16_t supported_evmcs_version)
1559 uint8_t min_version = evmcs_version & 0xff;
1560 uint8_t max_version = evmcs_version >> 8;
1561 uint8_t min_supported_version = supported_evmcs_version & 0xff;
1562 uint8_t max_supported_version = supported_evmcs_version >> 8;
1564 return (min_version >= min_supported_version) &&
1565 (max_version <= max_supported_version);
1568 static int hyperv_init_vcpu(X86CPU *cpu)
1570 CPUState *cs = CPU(cpu);
1571 Error *local_err = NULL;
1572 int ret;
1574 if (cpu->hyperv_passthrough && hv_passthrough_mig_blocker == NULL) {
1575 error_setg(&hv_passthrough_mig_blocker,
1576 "'hv-passthrough' CPU flag prevents migration, use explicit"
1577 " set of hv-* flags instead");
1578 ret = migrate_add_blocker(hv_passthrough_mig_blocker, &local_err);
1579 if (ret < 0) {
1580 error_report_err(local_err);
1581 return ret;
1585 if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO &&
1586 hv_no_nonarch_cs_mig_blocker == NULL) {
1587 error_setg(&hv_no_nonarch_cs_mig_blocker,
1588 "'hv-no-nonarch-coresharing=auto' CPU flag prevents migration"
1589 " use explicit 'hv-no-nonarch-coresharing=on' instead (but"
1590 " make sure SMT is disabled and/or that vCPUs are properly"
1591 " pinned)");
1592 ret = migrate_add_blocker(hv_no_nonarch_cs_mig_blocker, &local_err);
1593 if (ret < 0) {
1594 error_report_err(local_err);
1595 return ret;
1599 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) && !hv_vpindex_settable) {
1601 * the kernel doesn't support setting vp_index; assert that its value
1602 * is in sync
1604 uint64_t value;
1606 ret = kvm_get_one_msr(cpu, HV_X64_MSR_VP_INDEX, &value);
1607 if (ret < 0) {
1608 return ret;
1611 if (value != hyperv_vp_index(CPU(cpu))) {
1612 error_report("kernel's vp_index != QEMU's vp_index");
1613 return -ENXIO;
1617 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
1618 uint32_t synic_cap = cpu->hyperv_synic_kvm_only ?
1619 KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2;
1620 ret = kvm_vcpu_enable_cap(cs, synic_cap, 0);
1621 if (ret < 0) {
1622 error_report("failed to turn on HyperV SynIC in KVM: %s",
1623 strerror(-ret));
1624 return ret;
1627 if (!cpu->hyperv_synic_kvm_only) {
1628 ret = hyperv_x86_synic_add(cpu);
1629 if (ret < 0) {
1630 error_report("failed to create HyperV SynIC: %s",
1631 strerror(-ret));
1632 return ret;
1637 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) {
1638 uint16_t evmcs_version = DEFAULT_EVMCS_VERSION;
1639 uint16_t supported_evmcs_version;
1641 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, 0,
1642 (uintptr_t)&supported_evmcs_version);
1645 * KVM is required to support EVMCS ver.1. as that's what 'hv-evmcs'
1646 * option sets. Note: we hardcode the maximum supported eVMCS version
1647 * to '1' as well so 'hv-evmcs' feature is migratable even when (and if)
1648 * ver.2 is implemented. A new option (e.g. 'hv-evmcs=2') will then have
1649 * to be added.
1651 if (ret < 0) {
1652 error_report("Hyper-V %s is not supported by kernel",
1653 kvm_hyperv_properties[HYPERV_FEAT_EVMCS].desc);
1654 return ret;
1657 if (!evmcs_version_supported(evmcs_version, supported_evmcs_version)) {
1658 error_report("eVMCS version range [%d..%d] is not supported by "
1659 "kernel (supported: [%d..%d])", evmcs_version & 0xff,
1660 evmcs_version >> 8, supported_evmcs_version & 0xff,
1661 supported_evmcs_version >> 8);
1662 return -ENOTSUP;
1666 if (cpu->hyperv_enforce_cpuid) {
1667 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENFORCE_CPUID, 0, 1);
1668 if (ret < 0) {
1669 error_report("failed to enable KVM_CAP_HYPERV_ENFORCE_CPUID: %s",
1670 strerror(-ret));
1671 return ret;
1675 return 0;
1678 static Error *invtsc_mig_blocker;
1680 #define KVM_MAX_CPUID_ENTRIES 100
1682 static void kvm_init_xsave(CPUX86State *env)
1684 if (has_xsave2) {
1685 env->xsave_buf_len = QEMU_ALIGN_UP(has_xsave2, 4096);
1686 } else if (has_xsave) {
1687 env->xsave_buf_len = sizeof(struct kvm_xsave);
1688 } else {
1689 return;
1692 env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len);
1693 memset(env->xsave_buf, 0, env->xsave_buf_len);
1695 * The allocated storage must be large enough for all of the
1696 * possible XSAVE state components.
1698 assert(kvm_arch_get_supported_cpuid(kvm_state, 0xd, 0, R_ECX) <=
1699 env->xsave_buf_len);
1702 static void kvm_init_nested_state(CPUX86State *env)
1704 struct kvm_vmx_nested_state_hdr *vmx_hdr;
1705 uint32_t size;
1707 if (!env->nested_state) {
1708 return;
1711 size = env->nested_state->size;
1713 memset(env->nested_state, 0, size);
1714 env->nested_state->size = size;
1716 if (cpu_has_vmx(env)) {
1717 env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX;
1718 vmx_hdr = &env->nested_state->hdr.vmx;
1719 vmx_hdr->vmxon_pa = -1ull;
1720 vmx_hdr->vmcs12_pa = -1ull;
1721 } else if (cpu_has_svm(env)) {
1722 env->nested_state->format = KVM_STATE_NESTED_FORMAT_SVM;
1726 int kvm_arch_init_vcpu(CPUState *cs)
1728 struct {
1729 struct kvm_cpuid2 cpuid;
1730 struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
1731 } cpuid_data;
1733 * The kernel defines these structs with padding fields so there
1734 * should be no extra padding in our cpuid_data struct.
1736 QEMU_BUILD_BUG_ON(sizeof(cpuid_data) !=
1737 sizeof(struct kvm_cpuid2) +
1738 sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES);
1740 X86CPU *cpu = X86_CPU(cs);
1741 CPUX86State *env = &cpu->env;
1742 uint32_t limit, i, j, cpuid_i;
1743 uint32_t unused;
1744 struct kvm_cpuid_entry2 *c;
1745 uint32_t signature[3];
1746 int kvm_base = KVM_CPUID_SIGNATURE;
1747 int max_nested_state_len;
1748 int r;
1749 Error *local_err = NULL;
1751 memset(&cpuid_data, 0, sizeof(cpuid_data));
1753 cpuid_i = 0;
1755 has_xsave2 = kvm_check_extension(cs->kvm_state, KVM_CAP_XSAVE2);
1757 r = kvm_arch_set_tsc_khz(cs);
1758 if (r < 0) {
1759 return r;
1762 /* vcpu's TSC frequency is either specified by user, or following
1763 * the value used by KVM if the former is not present. In the
1764 * latter case, we query it from KVM and record in env->tsc_khz,
1765 * so that vcpu's TSC frequency can be migrated later via this field.
1767 if (!env->tsc_khz) {
1768 r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
1769 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
1770 -ENOTSUP;
1771 if (r > 0) {
1772 env->tsc_khz = r;
1776 env->apic_bus_freq = KVM_APIC_BUS_FREQUENCY;
1779 * kvm_hyperv_expand_features() is called here for the second time in case
1780 * KVM_CAP_SYS_HYPERV_CPUID is not supported. While we can't possibly handle
1781 * 'query-cpu-model-expansion' in this case as we don't have a KVM vCPU to
1782 * check which Hyper-V enlightenments are supported and which are not, we
1783 * can still proceed and check/expand Hyper-V enlightenments here so legacy
1784 * behavior is preserved.
1786 if (!kvm_hyperv_expand_features(cpu, &local_err)) {
1787 error_report_err(local_err);
1788 return -ENOSYS;
1791 if (hyperv_enabled(cpu)) {
1792 r = hyperv_init_vcpu(cpu);
1793 if (r) {
1794 return r;
1797 cpuid_i = hyperv_fill_cpuids(cs, cpuid_data.entries);
1798 kvm_base = KVM_CPUID_SIGNATURE_NEXT;
1799 has_msr_hv_hypercall = true;
1802 if (cpu->expose_kvm) {
1803 memcpy(signature, "KVMKVMKVM\0\0\0", 12);
1804 c = &cpuid_data.entries[cpuid_i++];
1805 c->function = KVM_CPUID_SIGNATURE | kvm_base;
1806 c->eax = KVM_CPUID_FEATURES | kvm_base;
1807 c->ebx = signature[0];
1808 c->ecx = signature[1];
1809 c->edx = signature[2];
1811 c = &cpuid_data.entries[cpuid_i++];
1812 c->function = KVM_CPUID_FEATURES | kvm_base;
1813 c->eax = env->features[FEAT_KVM];
1814 c->edx = env->features[FEAT_KVM_HINTS];
1817 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
1819 if (cpu->kvm_pv_enforce_cpuid) {
1820 r = kvm_vcpu_enable_cap(cs, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 0, 1);
1821 if (r < 0) {
1822 fprintf(stderr,
1823 "failed to enable KVM_CAP_ENFORCE_PV_FEATURE_CPUID: %s",
1824 strerror(-r));
1825 abort();
1829 for (i = 0; i <= limit; i++) {
1830 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1831 fprintf(stderr, "unsupported level value: 0x%x\n", limit);
1832 abort();
1834 c = &cpuid_data.entries[cpuid_i++];
1836 switch (i) {
1837 case 2: {
1838 /* Keep reading function 2 till all the input is received */
1839 int times;
1841 c->function = i;
1842 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
1843 KVM_CPUID_FLAG_STATE_READ_NEXT;
1844 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1845 times = c->eax & 0xff;
1847 for (j = 1; j < times; ++j) {
1848 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1849 fprintf(stderr, "cpuid_data is full, no space for "
1850 "cpuid(eax:2):eax & 0xf = 0x%x\n", times);
1851 abort();
1853 c = &cpuid_data.entries[cpuid_i++];
1854 c->function = i;
1855 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
1856 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1858 break;
1860 case 0x1f:
1861 if (env->nr_dies < 2) {
1862 break;
1864 /* fallthrough */
1865 case 4:
1866 case 0xb:
1867 case 0xd:
1868 for (j = 0; ; j++) {
1869 if (i == 0xd && j == 64) {
1870 break;
1873 if (i == 0x1f && j == 64) {
1874 break;
1877 c->function = i;
1878 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1879 c->index = j;
1880 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1882 if (i == 4 && c->eax == 0) {
1883 break;
1885 if (i == 0xb && !(c->ecx & 0xff00)) {
1886 break;
1888 if (i == 0x1f && !(c->ecx & 0xff00)) {
1889 break;
1891 if (i == 0xd && c->eax == 0) {
1892 continue;
1894 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1895 fprintf(stderr, "cpuid_data is full, no space for "
1896 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
1897 abort();
1899 c = &cpuid_data.entries[cpuid_i++];
1901 break;
1902 case 0x7:
1903 case 0x12:
1904 for (j = 0; ; j++) {
1905 c->function = i;
1906 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1907 c->index = j;
1908 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1910 if (j > 1 && (c->eax & 0xf) != 1) {
1911 break;
1914 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1915 fprintf(stderr, "cpuid_data is full, no space for "
1916 "cpuid(eax:0x12,ecx:0x%x)\n", j);
1917 abort();
1919 c = &cpuid_data.entries[cpuid_i++];
1921 break;
1922 case 0x14:
1923 case 0x1d:
1924 case 0x1e: {
1925 uint32_t times;
1927 c->function = i;
1928 c->index = 0;
1929 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1930 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1931 times = c->eax;
1933 for (j = 1; j <= times; ++j) {
1934 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1935 fprintf(stderr, "cpuid_data is full, no space for "
1936 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
1937 abort();
1939 c = &cpuid_data.entries[cpuid_i++];
1940 c->function = i;
1941 c->index = j;
1942 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1943 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1945 break;
1947 default:
1948 c->function = i;
1949 c->flags = 0;
1950 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1951 if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
1953 * KVM already returns all zeroes if a CPUID entry is missing,
1954 * so we can omit it and avoid hitting KVM's 80-entry limit.
1956 cpuid_i--;
1958 break;
1962 if (limit >= 0x0a) {
1963 uint32_t eax, edx;
1965 cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx);
1967 has_architectural_pmu_version = eax & 0xff;
1968 if (has_architectural_pmu_version > 0) {
1969 num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8;
1971 /* Shouldn't be more than 32, since that's the number of bits
1972 * available in EBX to tell us _which_ counters are available.
1973 * Play it safe.
1975 if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) {
1976 num_architectural_pmu_gp_counters = MAX_GP_COUNTERS;
1979 if (has_architectural_pmu_version > 1) {
1980 num_architectural_pmu_fixed_counters = edx & 0x1f;
1982 if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
1983 num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS;
1989 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
1991 for (i = 0x80000000; i <= limit; i++) {
1992 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1993 fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
1994 abort();
1996 c = &cpuid_data.entries[cpuid_i++];
1998 switch (i) {
1999 case 0x8000001d:
2000 /* Query for all AMD cache information leaves */
2001 for (j = 0; ; j++) {
2002 c->function = i;
2003 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2004 c->index = j;
2005 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
2007 if (c->eax == 0) {
2008 break;
2010 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
2011 fprintf(stderr, "cpuid_data is full, no space for "
2012 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
2013 abort();
2015 c = &cpuid_data.entries[cpuid_i++];
2017 break;
2018 default:
2019 c->function = i;
2020 c->flags = 0;
2021 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
2022 if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
2024 * KVM already returns all zeroes if a CPUID entry is missing,
2025 * so we can omit it and avoid hitting KVM's 80-entry limit.
2027 cpuid_i--;
2029 break;
2033 /* Call Centaur's CPUID instructions they are supported. */
2034 if (env->cpuid_xlevel2 > 0) {
2035 cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
2037 for (i = 0xC0000000; i <= limit; i++) {
2038 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
2039 fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
2040 abort();
2042 c = &cpuid_data.entries[cpuid_i++];
2044 c->function = i;
2045 c->flags = 0;
2046 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
2050 cpuid_data.cpuid.nent = cpuid_i;
2052 if (((env->cpuid_version >> 8)&0xF) >= 6
2053 && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2054 (CPUID_MCE | CPUID_MCA)
2055 && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) {
2056 uint64_t mcg_cap, unsupported_caps;
2057 int banks;
2058 int ret;
2060 ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks);
2061 if (ret < 0) {
2062 fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
2063 return ret;
2066 if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) {
2067 error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
2068 (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks);
2069 return -ENOTSUP;
2072 unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK);
2073 if (unsupported_caps) {
2074 if (unsupported_caps & MCG_LMCE_P) {
2075 error_report("kvm: LMCE not supported");
2076 return -ENOTSUP;
2078 warn_report("Unsupported MCG_CAP bits: 0x%" PRIx64,
2079 unsupported_caps);
2082 env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK;
2083 ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap);
2084 if (ret < 0) {
2085 fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
2086 return ret;
2090 cpu->vmsentry = qemu_add_vm_change_state_handler(cpu_update_state, env);
2092 c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0);
2093 if (c) {
2094 has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) ||
2095 !!(c->ecx & CPUID_EXT_SMX);
2098 c = cpuid_find_entry(&cpuid_data.cpuid, 7, 0);
2099 if (c && (c->ebx & CPUID_7_0_EBX_SGX)) {
2100 has_msr_feature_control = true;
2103 if (env->mcg_cap & MCG_LMCE_P) {
2104 has_msr_mcg_ext_ctl = has_msr_feature_control = true;
2107 if (!env->user_tsc_khz) {
2108 if ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) &&
2109 invtsc_mig_blocker == NULL) {
2110 error_setg(&invtsc_mig_blocker,
2111 "State blocked by non-migratable CPU device"
2112 " (invtsc flag)");
2113 r = migrate_add_blocker(invtsc_mig_blocker, &local_err);
2114 if (r < 0) {
2115 error_report_err(local_err);
2116 return r;
2121 if (cpu->vmware_cpuid_freq
2122 /* Guests depend on 0x40000000 to detect this feature, so only expose
2123 * it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */
2124 && cpu->expose_kvm
2125 && kvm_base == KVM_CPUID_SIGNATURE
2126 /* TSC clock must be stable and known for this feature. */
2127 && tsc_is_stable_and_known(env)) {
2129 c = &cpuid_data.entries[cpuid_i++];
2130 c->function = KVM_CPUID_SIGNATURE | 0x10;
2131 c->eax = env->tsc_khz;
2132 c->ebx = env->apic_bus_freq / 1000; /* Hz to KHz */
2133 c->ecx = c->edx = 0;
2135 c = cpuid_find_entry(&cpuid_data.cpuid, kvm_base, 0);
2136 c->eax = MAX(c->eax, KVM_CPUID_SIGNATURE | 0x10);
2139 cpuid_data.cpuid.nent = cpuid_i;
2141 cpuid_data.cpuid.padding = 0;
2142 r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
2143 if (r) {
2144 goto fail;
2146 kvm_init_xsave(env);
2148 max_nested_state_len = kvm_max_nested_state_length();
2149 if (max_nested_state_len > 0) {
2150 assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data));
2152 if (cpu_has_vmx(env) || cpu_has_svm(env)) {
2153 env->nested_state = g_malloc0(max_nested_state_len);
2154 env->nested_state->size = max_nested_state_len;
2156 kvm_init_nested_state(env);
2160 cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE);
2162 if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
2163 has_msr_tsc_aux = false;
2166 kvm_init_msrs(cpu);
2168 return 0;
2170 fail:
2171 migrate_del_blocker(invtsc_mig_blocker);
2173 return r;
2176 int kvm_arch_destroy_vcpu(CPUState *cs)
2178 X86CPU *cpu = X86_CPU(cs);
2179 CPUX86State *env = &cpu->env;
2181 g_free(env->xsave_buf);
2183 g_free(cpu->kvm_msr_buf);
2184 cpu->kvm_msr_buf = NULL;
2186 g_free(env->nested_state);
2187 env->nested_state = NULL;
2189 qemu_del_vm_change_state_handler(cpu->vmsentry);
2191 return 0;
2194 void kvm_arch_reset_vcpu(X86CPU *cpu)
2196 CPUX86State *env = &cpu->env;
2198 env->xcr0 = 1;
2199 if (kvm_irqchip_in_kernel()) {
2200 env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
2201 KVM_MP_STATE_UNINITIALIZED;
2202 } else {
2203 env->mp_state = KVM_MP_STATE_RUNNABLE;
2206 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
2207 int i;
2208 for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
2209 env->msr_hv_synic_sint[i] = HV_SINT_MASKED;
2212 hyperv_x86_synic_reset(cpu);
2214 /* enabled by default */
2215 env->poll_control_msr = 1;
2217 kvm_init_nested_state(env);
2219 sev_es_set_reset_vector(CPU(cpu));
2222 void kvm_arch_do_init_vcpu(X86CPU *cpu)
2224 CPUX86State *env = &cpu->env;
2226 /* APs get directly into wait-for-SIPI state. */
2227 if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) {
2228 env->mp_state = KVM_MP_STATE_INIT_RECEIVED;
2232 static int kvm_get_supported_feature_msrs(KVMState *s)
2234 int ret = 0;
2236 if (kvm_feature_msrs != NULL) {
2237 return 0;
2240 if (!kvm_check_extension(s, KVM_CAP_GET_MSR_FEATURES)) {
2241 return 0;
2244 struct kvm_msr_list msr_list;
2246 msr_list.nmsrs = 0;
2247 ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, &msr_list);
2248 if (ret < 0 && ret != -E2BIG) {
2249 error_report("Fetch KVM feature MSR list failed: %s",
2250 strerror(-ret));
2251 return ret;
2254 assert(msr_list.nmsrs > 0);
2255 kvm_feature_msrs = (struct kvm_msr_list *) \
2256 g_malloc0(sizeof(msr_list) +
2257 msr_list.nmsrs * sizeof(msr_list.indices[0]));
2259 kvm_feature_msrs->nmsrs = msr_list.nmsrs;
2260 ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, kvm_feature_msrs);
2262 if (ret < 0) {
2263 error_report("Fetch KVM feature MSR list failed: %s",
2264 strerror(-ret));
2265 g_free(kvm_feature_msrs);
2266 kvm_feature_msrs = NULL;
2267 return ret;
2270 return 0;
2273 static int kvm_get_supported_msrs(KVMState *s)
2275 int ret = 0;
2276 struct kvm_msr_list msr_list, *kvm_msr_list;
2279 * Obtain MSR list from KVM. These are the MSRs that we must
2280 * save/restore.
2282 msr_list.nmsrs = 0;
2283 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
2284 if (ret < 0 && ret != -E2BIG) {
2285 return ret;
2288 * Old kernel modules had a bug and could write beyond the provided
2289 * memory. Allocate at least a safe amount of 1K.
2291 kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
2292 msr_list.nmsrs *
2293 sizeof(msr_list.indices[0])));
2295 kvm_msr_list->nmsrs = msr_list.nmsrs;
2296 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
2297 if (ret >= 0) {
2298 int i;
2300 for (i = 0; i < kvm_msr_list->nmsrs; i++) {
2301 switch (kvm_msr_list->indices[i]) {
2302 case MSR_STAR:
2303 has_msr_star = true;
2304 break;
2305 case MSR_VM_HSAVE_PA:
2306 has_msr_hsave_pa = true;
2307 break;
2308 case MSR_TSC_AUX:
2309 has_msr_tsc_aux = true;
2310 break;
2311 case MSR_TSC_ADJUST:
2312 has_msr_tsc_adjust = true;
2313 break;
2314 case MSR_IA32_TSCDEADLINE:
2315 has_msr_tsc_deadline = true;
2316 break;
2317 case MSR_IA32_SMBASE:
2318 has_msr_smbase = true;
2319 break;
2320 case MSR_SMI_COUNT:
2321 has_msr_smi_count = true;
2322 break;
2323 case MSR_IA32_MISC_ENABLE:
2324 has_msr_misc_enable = true;
2325 break;
2326 case MSR_IA32_BNDCFGS:
2327 has_msr_bndcfgs = true;
2328 break;
2329 case MSR_IA32_XSS:
2330 has_msr_xss = true;
2331 break;
2332 case MSR_IA32_UMWAIT_CONTROL:
2333 has_msr_umwait = true;
2334 break;
2335 case HV_X64_MSR_CRASH_CTL:
2336 has_msr_hv_crash = true;
2337 break;
2338 case HV_X64_MSR_RESET:
2339 has_msr_hv_reset = true;
2340 break;
2341 case HV_X64_MSR_VP_INDEX:
2342 has_msr_hv_vpindex = true;
2343 break;
2344 case HV_X64_MSR_VP_RUNTIME:
2345 has_msr_hv_runtime = true;
2346 break;
2347 case HV_X64_MSR_SCONTROL:
2348 has_msr_hv_synic = true;
2349 break;
2350 case HV_X64_MSR_STIMER0_CONFIG:
2351 has_msr_hv_stimer = true;
2352 break;
2353 case HV_X64_MSR_TSC_FREQUENCY:
2354 has_msr_hv_frequencies = true;
2355 break;
2356 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
2357 has_msr_hv_reenlightenment = true;
2358 break;
2359 case HV_X64_MSR_SYNDBG_OPTIONS:
2360 has_msr_hv_syndbg_options = true;
2361 break;
2362 case MSR_IA32_SPEC_CTRL:
2363 has_msr_spec_ctrl = true;
2364 break;
2365 case MSR_AMD64_TSC_RATIO:
2366 has_tsc_scale_msr = true;
2367 break;
2368 case MSR_IA32_TSX_CTRL:
2369 has_msr_tsx_ctrl = true;
2370 break;
2371 case MSR_VIRT_SSBD:
2372 has_msr_virt_ssbd = true;
2373 break;
2374 case MSR_IA32_ARCH_CAPABILITIES:
2375 has_msr_arch_capabs = true;
2376 break;
2377 case MSR_IA32_CORE_CAPABILITY:
2378 has_msr_core_capabs = true;
2379 break;
2380 case MSR_IA32_PERF_CAPABILITIES:
2381 has_msr_perf_capabs = true;
2382 break;
2383 case MSR_IA32_VMX_VMFUNC:
2384 has_msr_vmx_vmfunc = true;
2385 break;
2386 case MSR_IA32_UCODE_REV:
2387 has_msr_ucode_rev = true;
2388 break;
2389 case MSR_IA32_VMX_PROCBASED_CTLS2:
2390 has_msr_vmx_procbased_ctls2 = true;
2391 break;
2392 case MSR_IA32_PKRS:
2393 has_msr_pkrs = true;
2394 break;
2399 g_free(kvm_msr_list);
2401 return ret;
2404 static bool kvm_rdmsr_core_thread_count(X86CPU *cpu, uint32_t msr,
2405 uint64_t *val)
2407 CPUState *cs = CPU(cpu);
2409 *val = cs->nr_threads * cs->nr_cores; /* thread count, bits 15..0 */
2410 *val |= ((uint32_t)cs->nr_cores << 16); /* core count, bits 31..16 */
2412 return true;
2415 static Notifier smram_machine_done;
2416 static KVMMemoryListener smram_listener;
2417 static AddressSpace smram_address_space;
2418 static MemoryRegion smram_as_root;
2419 static MemoryRegion smram_as_mem;
2421 static void register_smram_listener(Notifier *n, void *unused)
2423 MemoryRegion *smram =
2424 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2426 /* Outer container... */
2427 memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull);
2428 memory_region_set_enabled(&smram_as_root, true);
2430 /* ... with two regions inside: normal system memory with low
2431 * priority, and...
2433 memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram",
2434 get_system_memory(), 0, ~0ull);
2435 memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0);
2436 memory_region_set_enabled(&smram_as_mem, true);
2438 if (smram) {
2439 /* ... SMRAM with higher priority */
2440 memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10);
2441 memory_region_set_enabled(smram, true);
2444 address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM");
2445 kvm_memory_listener_register(kvm_state, &smram_listener,
2446 &smram_address_space, 1, "kvm-smram");
2449 int kvm_arch_init(MachineState *ms, KVMState *s)
2451 uint64_t identity_base = 0xfffbc000;
2452 uint64_t shadow_mem;
2453 int ret;
2454 struct utsname utsname;
2455 Error *local_err = NULL;
2458 * Initialize SEV context, if required
2460 * If no memory encryption is requested (ms->cgs == NULL) this is
2461 * a no-op.
2463 * It's also a no-op if a non-SEV confidential guest support
2464 * mechanism is selected. SEV is the only mechanism available to
2465 * select on x86 at present, so this doesn't arise, but if new
2466 * mechanisms are supported in future (e.g. TDX), they'll need
2467 * their own initialization either here or elsewhere.
2469 ret = sev_kvm_init(ms->cgs, &local_err);
2470 if (ret < 0) {
2471 error_report_err(local_err);
2472 return ret;
2475 if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
2476 error_report("kvm: KVM_CAP_IRQ_ROUTING not supported by KVM");
2477 return -ENOTSUP;
2480 has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
2481 has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
2482 has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
2483 has_sregs2 = kvm_check_extension(s, KVM_CAP_SREGS2) > 0;
2485 hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX);
2487 has_exception_payload = kvm_check_extension(s, KVM_CAP_EXCEPTION_PAYLOAD);
2488 if (has_exception_payload) {
2489 ret = kvm_vm_enable_cap(s, KVM_CAP_EXCEPTION_PAYLOAD, 0, true);
2490 if (ret < 0) {
2491 error_report("kvm: Failed to enable exception payload cap: %s",
2492 strerror(-ret));
2493 return ret;
2497 has_triple_fault_event = kvm_check_extension(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT);
2498 if (has_triple_fault_event) {
2499 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 0, true);
2500 if (ret < 0) {
2501 error_report("kvm: Failed to enable triple fault event cap: %s",
2502 strerror(-ret));
2503 return ret;
2507 ret = kvm_get_supported_msrs(s);
2508 if (ret < 0) {
2509 return ret;
2512 kvm_get_supported_feature_msrs(s);
2514 uname(&utsname);
2515 lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
2518 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
2519 * In order to use vm86 mode, an EPT identity map and a TSS are needed.
2520 * Since these must be part of guest physical memory, we need to allocate
2521 * them, both by setting their start addresses in the kernel and by
2522 * creating a corresponding e820 entry. We need 4 pages before the BIOS.
2524 * Older KVM versions may not support setting the identity map base. In
2525 * that case we need to stick with the default, i.e. a 256K maximum BIOS
2526 * size.
2528 if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
2529 /* Allows up to 16M BIOSes. */
2530 identity_base = 0xfeffc000;
2532 ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
2533 if (ret < 0) {
2534 return ret;
2538 /* Set TSS base one page after EPT identity map. */
2539 ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
2540 if (ret < 0) {
2541 return ret;
2544 /* Tell fw_cfg to notify the BIOS to reserve the range. */
2545 ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
2546 if (ret < 0) {
2547 fprintf(stderr, "e820_add_entry() table is full\n");
2548 return ret;
2551 shadow_mem = object_property_get_int(OBJECT(s), "kvm-shadow-mem", &error_abort);
2552 if (shadow_mem != -1) {
2553 shadow_mem /= 4096;
2554 ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
2555 if (ret < 0) {
2556 return ret;
2560 if (kvm_check_extension(s, KVM_CAP_X86_SMM) &&
2561 object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE) &&
2562 x86_machine_is_smm_enabled(X86_MACHINE(ms))) {
2563 smram_machine_done.notify = register_smram_listener;
2564 qemu_add_machine_init_done_notifier(&smram_machine_done);
2567 if (enable_cpu_pm) {
2568 int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS);
2569 int ret;
2571 /* Work around for kernel header with a typo. TODO: fix header and drop. */
2572 #if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT)
2573 #define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL
2574 #endif
2575 if (disable_exits) {
2576 disable_exits &= (KVM_X86_DISABLE_EXITS_MWAIT |
2577 KVM_X86_DISABLE_EXITS_HLT |
2578 KVM_X86_DISABLE_EXITS_PAUSE |
2579 KVM_X86_DISABLE_EXITS_CSTATE);
2582 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_DISABLE_EXITS, 0,
2583 disable_exits);
2584 if (ret < 0) {
2585 error_report("kvm: guest stopping CPU not supported: %s",
2586 strerror(-ret));
2590 if (object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE)) {
2591 X86MachineState *x86ms = X86_MACHINE(ms);
2593 if (x86ms->bus_lock_ratelimit > 0) {
2594 ret = kvm_check_extension(s, KVM_CAP_X86_BUS_LOCK_EXIT);
2595 if (!(ret & KVM_BUS_LOCK_DETECTION_EXIT)) {
2596 error_report("kvm: bus lock detection unsupported");
2597 return -ENOTSUP;
2599 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_BUS_LOCK_EXIT, 0,
2600 KVM_BUS_LOCK_DETECTION_EXIT);
2601 if (ret < 0) {
2602 error_report("kvm: Failed to enable bus lock detection cap: %s",
2603 strerror(-ret));
2604 return ret;
2606 ratelimit_init(&bus_lock_ratelimit_ctrl);
2607 ratelimit_set_speed(&bus_lock_ratelimit_ctrl,
2608 x86ms->bus_lock_ratelimit, BUS_LOCK_SLICE_TIME);
2612 if (s->notify_vmexit != NOTIFY_VMEXIT_OPTION_DISABLE &&
2613 kvm_check_extension(s, KVM_CAP_X86_NOTIFY_VMEXIT)) {
2614 uint64_t notify_window_flags =
2615 ((uint64_t)s->notify_window << 32) |
2616 KVM_X86_NOTIFY_VMEXIT_ENABLED |
2617 KVM_X86_NOTIFY_VMEXIT_USER;
2618 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_NOTIFY_VMEXIT, 0,
2619 notify_window_flags);
2620 if (ret < 0) {
2621 error_report("kvm: Failed to enable notify vmexit cap: %s",
2622 strerror(-ret));
2623 return ret;
2626 if (kvm_vm_check_extension(s, KVM_CAP_X86_USER_SPACE_MSR)) {
2627 bool r;
2629 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_USER_SPACE_MSR, 0,
2630 KVM_MSR_EXIT_REASON_FILTER);
2631 if (ret) {
2632 error_report("Could not enable user space MSRs: %s",
2633 strerror(-ret));
2634 exit(1);
2637 r = kvm_filter_msr(s, MSR_CORE_THREAD_COUNT,
2638 kvm_rdmsr_core_thread_count, NULL);
2639 if (!r) {
2640 error_report("Could not install MSR_CORE_THREAD_COUNT handler: %s",
2641 strerror(-ret));
2642 exit(1);
2646 return 0;
2649 static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
2651 lhs->selector = rhs->selector;
2652 lhs->base = rhs->base;
2653 lhs->limit = rhs->limit;
2654 lhs->type = 3;
2655 lhs->present = 1;
2656 lhs->dpl = 3;
2657 lhs->db = 0;
2658 lhs->s = 1;
2659 lhs->l = 0;
2660 lhs->g = 0;
2661 lhs->avl = 0;
2662 lhs->unusable = 0;
2665 static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
2667 unsigned flags = rhs->flags;
2668 lhs->selector = rhs->selector;
2669 lhs->base = rhs->base;
2670 lhs->limit = rhs->limit;
2671 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
2672 lhs->present = (flags & DESC_P_MASK) != 0;
2673 lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
2674 lhs->db = (flags >> DESC_B_SHIFT) & 1;
2675 lhs->s = (flags & DESC_S_MASK) != 0;
2676 lhs->l = (flags >> DESC_L_SHIFT) & 1;
2677 lhs->g = (flags & DESC_G_MASK) != 0;
2678 lhs->avl = (flags & DESC_AVL_MASK) != 0;
2679 lhs->unusable = !lhs->present;
2680 lhs->padding = 0;
2683 static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
2685 lhs->selector = rhs->selector;
2686 lhs->base = rhs->base;
2687 lhs->limit = rhs->limit;
2688 lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
2689 ((rhs->present && !rhs->unusable) * DESC_P_MASK) |
2690 (rhs->dpl << DESC_DPL_SHIFT) |
2691 (rhs->db << DESC_B_SHIFT) |
2692 (rhs->s * DESC_S_MASK) |
2693 (rhs->l << DESC_L_SHIFT) |
2694 (rhs->g * DESC_G_MASK) |
2695 (rhs->avl * DESC_AVL_MASK);
2698 static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
2700 if (set) {
2701 *kvm_reg = *qemu_reg;
2702 } else {
2703 *qemu_reg = *kvm_reg;
2707 static int kvm_getput_regs(X86CPU *cpu, int set)
2709 CPUX86State *env = &cpu->env;
2710 struct kvm_regs regs;
2711 int ret = 0;
2713 if (!set) {
2714 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, &regs);
2715 if (ret < 0) {
2716 return ret;
2720 kvm_getput_reg(&regs.rax, &env->regs[R_EAX], set);
2721 kvm_getput_reg(&regs.rbx, &env->regs[R_EBX], set);
2722 kvm_getput_reg(&regs.rcx, &env->regs[R_ECX], set);
2723 kvm_getput_reg(&regs.rdx, &env->regs[R_EDX], set);
2724 kvm_getput_reg(&regs.rsi, &env->regs[R_ESI], set);
2725 kvm_getput_reg(&regs.rdi, &env->regs[R_EDI], set);
2726 kvm_getput_reg(&regs.rsp, &env->regs[R_ESP], set);
2727 kvm_getput_reg(&regs.rbp, &env->regs[R_EBP], set);
2728 #ifdef TARGET_X86_64
2729 kvm_getput_reg(&regs.r8, &env->regs[8], set);
2730 kvm_getput_reg(&regs.r9, &env->regs[9], set);
2731 kvm_getput_reg(&regs.r10, &env->regs[10], set);
2732 kvm_getput_reg(&regs.r11, &env->regs[11], set);
2733 kvm_getput_reg(&regs.r12, &env->regs[12], set);
2734 kvm_getput_reg(&regs.r13, &env->regs[13], set);
2735 kvm_getput_reg(&regs.r14, &env->regs[14], set);
2736 kvm_getput_reg(&regs.r15, &env->regs[15], set);
2737 #endif
2739 kvm_getput_reg(&regs.rflags, &env->eflags, set);
2740 kvm_getput_reg(&regs.rip, &env->eip, set);
2742 if (set) {
2743 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, &regs);
2746 return ret;
2749 static int kvm_put_fpu(X86CPU *cpu)
2751 CPUX86State *env = &cpu->env;
2752 struct kvm_fpu fpu;
2753 int i;
2755 memset(&fpu, 0, sizeof fpu);
2756 fpu.fsw = env->fpus & ~(7 << 11);
2757 fpu.fsw |= (env->fpstt & 7) << 11;
2758 fpu.fcw = env->fpuc;
2759 fpu.last_opcode = env->fpop;
2760 fpu.last_ip = env->fpip;
2761 fpu.last_dp = env->fpdp;
2762 for (i = 0; i < 8; ++i) {
2763 fpu.ftwx |= (!env->fptags[i]) << i;
2765 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
2766 for (i = 0; i < CPU_NB_REGS; i++) {
2767 stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0));
2768 stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1));
2770 fpu.mxcsr = env->mxcsr;
2772 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu);
2775 static int kvm_put_xsave(X86CPU *cpu)
2777 CPUX86State *env = &cpu->env;
2778 void *xsave = env->xsave_buf;
2780 if (!has_xsave) {
2781 return kvm_put_fpu(cpu);
2783 x86_cpu_xsave_all_areas(cpu, xsave, env->xsave_buf_len);
2785 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
2788 static int kvm_put_xcrs(X86CPU *cpu)
2790 CPUX86State *env = &cpu->env;
2791 struct kvm_xcrs xcrs = {};
2793 if (!has_xcrs) {
2794 return 0;
2797 xcrs.nr_xcrs = 1;
2798 xcrs.flags = 0;
2799 xcrs.xcrs[0].xcr = 0;
2800 xcrs.xcrs[0].value = env->xcr0;
2801 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs);
2804 static int kvm_put_sregs(X86CPU *cpu)
2806 CPUX86State *env = &cpu->env;
2807 struct kvm_sregs sregs;
2810 * The interrupt_bitmap is ignored because KVM_SET_SREGS is
2811 * always followed by KVM_SET_VCPU_EVENTS.
2813 memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
2815 if ((env->eflags & VM_MASK)) {
2816 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
2817 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
2818 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
2819 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
2820 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
2821 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
2822 } else {
2823 set_seg(&sregs.cs, &env->segs[R_CS]);
2824 set_seg(&sregs.ds, &env->segs[R_DS]);
2825 set_seg(&sregs.es, &env->segs[R_ES]);
2826 set_seg(&sregs.fs, &env->segs[R_FS]);
2827 set_seg(&sregs.gs, &env->segs[R_GS]);
2828 set_seg(&sregs.ss, &env->segs[R_SS]);
2831 set_seg(&sregs.tr, &env->tr);
2832 set_seg(&sregs.ldt, &env->ldt);
2834 sregs.idt.limit = env->idt.limit;
2835 sregs.idt.base = env->idt.base;
2836 memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
2837 sregs.gdt.limit = env->gdt.limit;
2838 sregs.gdt.base = env->gdt.base;
2839 memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
2841 sregs.cr0 = env->cr[0];
2842 sregs.cr2 = env->cr[2];
2843 sregs.cr3 = env->cr[3];
2844 sregs.cr4 = env->cr[4];
2846 sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
2847 sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
2849 sregs.efer = env->efer;
2851 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
2854 static int kvm_put_sregs2(X86CPU *cpu)
2856 CPUX86State *env = &cpu->env;
2857 struct kvm_sregs2 sregs;
2858 int i;
2860 sregs.flags = 0;
2862 if ((env->eflags & VM_MASK)) {
2863 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
2864 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
2865 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
2866 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
2867 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
2868 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
2869 } else {
2870 set_seg(&sregs.cs, &env->segs[R_CS]);
2871 set_seg(&sregs.ds, &env->segs[R_DS]);
2872 set_seg(&sregs.es, &env->segs[R_ES]);
2873 set_seg(&sregs.fs, &env->segs[R_FS]);
2874 set_seg(&sregs.gs, &env->segs[R_GS]);
2875 set_seg(&sregs.ss, &env->segs[R_SS]);
2878 set_seg(&sregs.tr, &env->tr);
2879 set_seg(&sregs.ldt, &env->ldt);
2881 sregs.idt.limit = env->idt.limit;
2882 sregs.idt.base = env->idt.base;
2883 memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
2884 sregs.gdt.limit = env->gdt.limit;
2885 sregs.gdt.base = env->gdt.base;
2886 memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
2888 sregs.cr0 = env->cr[0];
2889 sregs.cr2 = env->cr[2];
2890 sregs.cr3 = env->cr[3];
2891 sregs.cr4 = env->cr[4];
2893 sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
2894 sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
2896 sregs.efer = env->efer;
2898 if (env->pdptrs_valid) {
2899 for (i = 0; i < 4; i++) {
2900 sregs.pdptrs[i] = env->pdptrs[i];
2902 sregs.flags |= KVM_SREGS2_FLAGS_PDPTRS_VALID;
2905 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS2, &sregs);
2909 static void kvm_msr_buf_reset(X86CPU *cpu)
2911 memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE);
2914 static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value)
2916 struct kvm_msrs *msrs = cpu->kvm_msr_buf;
2917 void *limit = ((void *)msrs) + MSR_BUF_SIZE;
2918 struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs];
2920 assert((void *)(entry + 1) <= limit);
2922 entry->index = index;
2923 entry->reserved = 0;
2924 entry->data = value;
2925 msrs->nmsrs++;
2928 static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value)
2930 kvm_msr_buf_reset(cpu);
2931 kvm_msr_entry_add(cpu, index, value);
2933 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
2936 static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value)
2938 int ret;
2939 struct {
2940 struct kvm_msrs info;
2941 struct kvm_msr_entry entries[1];
2942 } msr_data = {
2943 .info.nmsrs = 1,
2944 .entries[0].index = index,
2947 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
2948 if (ret < 0) {
2949 return ret;
2951 assert(ret == 1);
2952 *value = msr_data.entries[0].data;
2953 return ret;
2955 void kvm_put_apicbase(X86CPU *cpu, uint64_t value)
2957 int ret;
2959 ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value);
2960 assert(ret == 1);
2963 static int kvm_put_tscdeadline_msr(X86CPU *cpu)
2965 CPUX86State *env = &cpu->env;
2966 int ret;
2968 if (!has_msr_tsc_deadline) {
2969 return 0;
2972 ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline);
2973 if (ret < 0) {
2974 return ret;
2977 assert(ret == 1);
2978 return 0;
2982 * Provide a separate write service for the feature control MSR in order to
2983 * kick the VCPU out of VMXON or even guest mode on reset. This has to be done
2984 * before writing any other state because forcibly leaving nested mode
2985 * invalidates the VCPU state.
2987 static int kvm_put_msr_feature_control(X86CPU *cpu)
2989 int ret;
2991 if (!has_msr_feature_control) {
2992 return 0;
2995 ret = kvm_put_one_msr(cpu, MSR_IA32_FEATURE_CONTROL,
2996 cpu->env.msr_ia32_feature_control);
2997 if (ret < 0) {
2998 return ret;
3001 assert(ret == 1);
3002 return 0;
3005 static uint64_t make_vmx_msr_value(uint32_t index, uint32_t features)
3007 uint32_t default1, can_be_one, can_be_zero;
3008 uint32_t must_be_one;
3010 switch (index) {
3011 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3012 default1 = 0x00000016;
3013 break;
3014 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3015 default1 = 0x0401e172;
3016 break;
3017 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3018 default1 = 0x000011ff;
3019 break;
3020 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3021 default1 = 0x00036dff;
3022 break;
3023 case MSR_IA32_VMX_PROCBASED_CTLS2:
3024 default1 = 0;
3025 break;
3026 default:
3027 abort();
3030 /* If a feature bit is set, the control can be either set or clear.
3031 * Otherwise the value is limited to either 0 or 1 by default1.
3033 can_be_one = features | default1;
3034 can_be_zero = features | ~default1;
3035 must_be_one = ~can_be_zero;
3038 * Bit 0:31 -> 0 if the control bit can be zero (i.e. 1 if it must be one).
3039 * Bit 32:63 -> 1 if the control bit can be one.
3041 return must_be_one | (((uint64_t)can_be_one) << 32);
3044 static void kvm_msr_entry_add_vmx(X86CPU *cpu, FeatureWordArray f)
3046 uint64_t kvm_vmx_basic =
3047 kvm_arch_get_supported_msr_feature(kvm_state,
3048 MSR_IA32_VMX_BASIC);
3050 if (!kvm_vmx_basic) {
3051 /* If the kernel doesn't support VMX feature (kvm_intel.nested=0),
3052 * then kvm_vmx_basic will be 0 and KVM_SET_MSR will fail.
3054 return;
3057 uint64_t kvm_vmx_misc =
3058 kvm_arch_get_supported_msr_feature(kvm_state,
3059 MSR_IA32_VMX_MISC);
3060 uint64_t kvm_vmx_ept_vpid =
3061 kvm_arch_get_supported_msr_feature(kvm_state,
3062 MSR_IA32_VMX_EPT_VPID_CAP);
3065 * If the guest is 64-bit, a value of 1 is allowed for the host address
3066 * space size vmexit control.
3068 uint64_t fixed_vmx_exit = f[FEAT_8000_0001_EDX] & CPUID_EXT2_LM
3069 ? (uint64_t)VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE << 32 : 0;
3072 * Bits 0-30, 32-44 and 50-53 come from the host. KVM should
3073 * not change them for backwards compatibility.
3075 uint64_t fixed_vmx_basic = kvm_vmx_basic &
3076 (MSR_VMX_BASIC_VMCS_REVISION_MASK |
3077 MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK |
3078 MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK);
3081 * Same for bits 0-4 and 25-27. Bits 16-24 (CR3 target count) can
3082 * change in the future but are always zero for now, clear them to be
3083 * future proof. Bits 32-63 in theory could change, though KVM does
3084 * not support dual-monitor treatment and probably never will; mask
3085 * them out as well.
3087 uint64_t fixed_vmx_misc = kvm_vmx_misc &
3088 (MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK |
3089 MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK);
3092 * EPT memory types should not change either, so we do not bother
3093 * adding features for them.
3095 uint64_t fixed_vmx_ept_mask =
3096 (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_ENABLE_EPT ?
3097 MSR_VMX_EPT_UC | MSR_VMX_EPT_WB : 0);
3098 uint64_t fixed_vmx_ept_vpid = kvm_vmx_ept_vpid & fixed_vmx_ept_mask;
3100 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
3101 make_vmx_msr_value(MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
3102 f[FEAT_VMX_PROCBASED_CTLS]));
3103 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PINBASED_CTLS,
3104 make_vmx_msr_value(MSR_IA32_VMX_TRUE_PINBASED_CTLS,
3105 f[FEAT_VMX_PINBASED_CTLS]));
3106 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_EXIT_CTLS,
3107 make_vmx_msr_value(MSR_IA32_VMX_TRUE_EXIT_CTLS,
3108 f[FEAT_VMX_EXIT_CTLS]) | fixed_vmx_exit);
3109 kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_ENTRY_CTLS,
3110 make_vmx_msr_value(MSR_IA32_VMX_TRUE_ENTRY_CTLS,
3111 f[FEAT_VMX_ENTRY_CTLS]));
3112 kvm_msr_entry_add(cpu, MSR_IA32_VMX_PROCBASED_CTLS2,
3113 make_vmx_msr_value(MSR_IA32_VMX_PROCBASED_CTLS2,
3114 f[FEAT_VMX_SECONDARY_CTLS]));
3115 kvm_msr_entry_add(cpu, MSR_IA32_VMX_EPT_VPID_CAP,
3116 f[FEAT_VMX_EPT_VPID_CAPS] | fixed_vmx_ept_vpid);
3117 kvm_msr_entry_add(cpu, MSR_IA32_VMX_BASIC,
3118 f[FEAT_VMX_BASIC] | fixed_vmx_basic);
3119 kvm_msr_entry_add(cpu, MSR_IA32_VMX_MISC,
3120 f[FEAT_VMX_MISC] | fixed_vmx_misc);
3121 if (has_msr_vmx_vmfunc) {
3122 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMFUNC, f[FEAT_VMX_VMFUNC]);
3126 * Just to be safe, write these with constant values. The CRn_FIXED1
3127 * MSRs are generated by KVM based on the vCPU's CPUID.
3129 kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR0_FIXED0,
3130 CR0_PE_MASK | CR0_PG_MASK | CR0_NE_MASK);
3131 kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR4_FIXED0,
3132 CR4_VMXE_MASK);
3134 if (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_TSC_SCALING) {
3135 /* TSC multiplier (0x2032). */
3136 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x32);
3137 } else {
3138 /* Preemption timer (0x482E). */
3139 kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x2E);
3143 static void kvm_msr_entry_add_perf(X86CPU *cpu, FeatureWordArray f)
3145 uint64_t kvm_perf_cap =
3146 kvm_arch_get_supported_msr_feature(kvm_state,
3147 MSR_IA32_PERF_CAPABILITIES);
3149 if (kvm_perf_cap) {
3150 kvm_msr_entry_add(cpu, MSR_IA32_PERF_CAPABILITIES,
3151 kvm_perf_cap & f[FEAT_PERF_CAPABILITIES]);
3155 static int kvm_buf_set_msrs(X86CPU *cpu)
3157 int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
3158 if (ret < 0) {
3159 return ret;
3162 if (ret < cpu->kvm_msr_buf->nmsrs) {
3163 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
3164 error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64,
3165 (uint32_t)e->index, (uint64_t)e->data);
3168 assert(ret == cpu->kvm_msr_buf->nmsrs);
3169 return 0;
3172 static void kvm_init_msrs(X86CPU *cpu)
3174 CPUX86State *env = &cpu->env;
3176 kvm_msr_buf_reset(cpu);
3177 if (has_msr_arch_capabs) {
3178 kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES,
3179 env->features[FEAT_ARCH_CAPABILITIES]);
3182 if (has_msr_core_capabs) {
3183 kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY,
3184 env->features[FEAT_CORE_CAPABILITY]);
3187 if (has_msr_perf_capabs && cpu->enable_pmu) {
3188 kvm_msr_entry_add_perf(cpu, env->features);
3191 if (has_msr_ucode_rev) {
3192 kvm_msr_entry_add(cpu, MSR_IA32_UCODE_REV, cpu->ucode_rev);
3196 * Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but
3197 * all kernels with MSR features should have them.
3199 if (kvm_feature_msrs && cpu_has_vmx(env)) {
3200 kvm_msr_entry_add_vmx(cpu, env->features);
3203 assert(kvm_buf_set_msrs(cpu) == 0);
3206 static int kvm_put_msrs(X86CPU *cpu, int level)
3208 CPUX86State *env = &cpu->env;
3209 int i;
3211 kvm_msr_buf_reset(cpu);
3213 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs);
3214 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
3215 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
3216 kvm_msr_entry_add(cpu, MSR_PAT, env->pat);
3217 if (has_msr_star) {
3218 kvm_msr_entry_add(cpu, MSR_STAR, env->star);
3220 if (has_msr_hsave_pa) {
3221 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave);
3223 if (has_msr_tsc_aux) {
3224 kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux);
3226 if (has_msr_tsc_adjust) {
3227 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust);
3229 if (has_msr_misc_enable) {
3230 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE,
3231 env->msr_ia32_misc_enable);
3233 if (has_msr_smbase) {
3234 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase);
3236 if (has_msr_smi_count) {
3237 kvm_msr_entry_add(cpu, MSR_SMI_COUNT, env->msr_smi_count);
3239 if (has_msr_pkrs) {
3240 kvm_msr_entry_add(cpu, MSR_IA32_PKRS, env->pkrs);
3242 if (has_msr_bndcfgs) {
3243 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs);
3245 if (has_msr_xss) {
3246 kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss);
3248 if (has_msr_umwait) {
3249 kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, env->umwait);
3251 if (has_msr_spec_ctrl) {
3252 kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl);
3254 if (has_tsc_scale_msr) {
3255 kvm_msr_entry_add(cpu, MSR_AMD64_TSC_RATIO, env->amd_tsc_scale_msr);
3258 if (has_msr_tsx_ctrl) {
3259 kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, env->tsx_ctrl);
3261 if (has_msr_virt_ssbd) {
3262 kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, env->virt_ssbd);
3265 #ifdef TARGET_X86_64
3266 if (lm_capable_kernel) {
3267 kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
3268 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase);
3269 kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask);
3270 kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar);
3272 #endif
3275 * The following MSRs have side effects on the guest or are too heavy
3276 * for normal writeback. Limit them to reset or full state updates.
3278 if (level >= KVM_PUT_RESET_STATE) {
3279 kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
3280 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
3281 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
3282 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) {
3283 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, env->async_pf_int_msr);
3285 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
3286 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
3288 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
3289 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
3291 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
3292 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
3295 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) {
3296 kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, env->poll_control_msr);
3299 if (has_architectural_pmu_version > 0) {
3300 if (has_architectural_pmu_version > 1) {
3301 /* Stop the counter. */
3302 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
3303 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
3306 /* Set the counter values. */
3307 for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
3308 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
3309 env->msr_fixed_counters[i]);
3311 for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
3312 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
3313 env->msr_gp_counters[i]);
3314 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
3315 env->msr_gp_evtsel[i]);
3317 if (has_architectural_pmu_version > 1) {
3318 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
3319 env->msr_global_status);
3320 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
3321 env->msr_global_ovf_ctrl);
3323 /* Now start the PMU. */
3324 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL,
3325 env->msr_fixed_ctr_ctrl);
3326 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
3327 env->msr_global_ctrl);
3331 * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add,
3332 * only sync them to KVM on the first cpu
3334 if (current_cpu == first_cpu) {
3335 if (has_msr_hv_hypercall) {
3336 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
3337 env->msr_hv_guest_os_id);
3338 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
3339 env->msr_hv_hypercall);
3341 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) {
3342 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC,
3343 env->msr_hv_tsc);
3345 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) {
3346 kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL,
3347 env->msr_hv_reenlightenment_control);
3348 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL,
3349 env->msr_hv_tsc_emulation_control);
3350 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS,
3351 env->msr_hv_tsc_emulation_status);
3353 #ifdef CONFIG_SYNDBG
3354 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG) &&
3355 has_msr_hv_syndbg_options) {
3356 kvm_msr_entry_add(cpu, HV_X64_MSR_SYNDBG_OPTIONS,
3357 hyperv_syndbg_query_options());
3359 #endif
3361 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) {
3362 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
3363 env->msr_hv_vapic);
3365 if (has_msr_hv_crash) {
3366 int j;
3368 for (j = 0; j < HV_CRASH_PARAMS; j++)
3369 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j,
3370 env->msr_hv_crash_params[j]);
3372 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_NOTIFY);
3374 if (has_msr_hv_runtime) {
3375 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime);
3377 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)
3378 && hv_vpindex_settable) {
3379 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_INDEX,
3380 hyperv_vp_index(CPU(cpu)));
3382 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
3383 int j;
3385 kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, HV_SYNIC_VERSION);
3387 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL,
3388 env->msr_hv_synic_control);
3389 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP,
3390 env->msr_hv_synic_evt_page);
3391 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP,
3392 env->msr_hv_synic_msg_page);
3394 for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
3395 kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j,
3396 env->msr_hv_synic_sint[j]);
3399 if (has_msr_hv_stimer) {
3400 int j;
3402 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) {
3403 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2,
3404 env->msr_hv_stimer_config[j]);
3407 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) {
3408 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2,
3409 env->msr_hv_stimer_count[j]);
3412 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
3413 uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits);
3415 kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype);
3416 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
3417 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
3418 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
3419 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
3420 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
3421 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
3422 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
3423 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
3424 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
3425 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
3426 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
3427 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
3428 /* The CPU GPs if we write to a bit above the physical limit of
3429 * the host CPU (and KVM emulates that)
3431 uint64_t mask = env->mtrr_var[i].mask;
3432 mask &= phys_mask;
3434 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i),
3435 env->mtrr_var[i].base);
3436 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask);
3439 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
3440 int addr_num = kvm_arch_get_supported_cpuid(kvm_state,
3441 0x14, 1, R_EAX) & 0x7;
3443 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL,
3444 env->msr_rtit_ctrl);
3445 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS,
3446 env->msr_rtit_status);
3447 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE,
3448 env->msr_rtit_output_base);
3449 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK,
3450 env->msr_rtit_output_mask);
3451 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH,
3452 env->msr_rtit_cr3_match);
3453 for (i = 0; i < addr_num; i++) {
3454 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i,
3455 env->msr_rtit_addrs[i]);
3459 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC) {
3460 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH0,
3461 env->msr_ia32_sgxlepubkeyhash[0]);
3462 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH1,
3463 env->msr_ia32_sgxlepubkeyhash[1]);
3464 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH2,
3465 env->msr_ia32_sgxlepubkeyhash[2]);
3466 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH3,
3467 env->msr_ia32_sgxlepubkeyhash[3]);
3470 if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) {
3471 kvm_msr_entry_add(cpu, MSR_IA32_XFD,
3472 env->msr_xfd);
3473 kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR,
3474 env->msr_xfd_err);
3477 if (kvm_enabled() && cpu->enable_pmu &&
3478 (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
3479 uint64_t depth;
3480 int i, ret;
3483 * Only migrate Arch LBR states when the host Arch LBR depth
3484 * equals that of source guest's, this is to avoid mismatch
3485 * of guest/host config for the msr hence avoid unexpected
3486 * misbehavior.
3488 ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth);
3490 if (ret == 1 && !!depth && depth == env->msr_lbr_depth) {
3491 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, env->msr_lbr_ctl);
3492 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, env->msr_lbr_depth);
3494 for (i = 0; i < ARCH_LBR_NR_ENTRIES; i++) {
3495 if (!env->lbr_records[i].from) {
3496 continue;
3498 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_FROM_0 + i,
3499 env->lbr_records[i].from);
3500 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_TO_0 + i,
3501 env->lbr_records[i].to);
3502 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_INFO_0 + i,
3503 env->lbr_records[i].info);
3508 /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
3509 * kvm_put_msr_feature_control. */
3512 if (env->mcg_cap) {
3513 int i;
3515 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status);
3516 kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl);
3517 if (has_msr_mcg_ext_ctl) {
3518 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl);
3520 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
3521 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]);
3525 return kvm_buf_set_msrs(cpu);
3529 static int kvm_get_fpu(X86CPU *cpu)
3531 CPUX86State *env = &cpu->env;
3532 struct kvm_fpu fpu;
3533 int i, ret;
3535 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu);
3536 if (ret < 0) {
3537 return ret;
3540 env->fpstt = (fpu.fsw >> 11) & 7;
3541 env->fpus = fpu.fsw;
3542 env->fpuc = fpu.fcw;
3543 env->fpop = fpu.last_opcode;
3544 env->fpip = fpu.last_ip;
3545 env->fpdp = fpu.last_dp;
3546 for (i = 0; i < 8; ++i) {
3547 env->fptags[i] = !((fpu.ftwx >> i) & 1);
3549 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
3550 for (i = 0; i < CPU_NB_REGS; i++) {
3551 env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]);
3552 env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]);
3554 env->mxcsr = fpu.mxcsr;
3556 return 0;
3559 static int kvm_get_xsave(X86CPU *cpu)
3561 CPUX86State *env = &cpu->env;
3562 void *xsave = env->xsave_buf;
3563 int type, ret;
3565 if (!has_xsave) {
3566 return kvm_get_fpu(cpu);
3569 type = has_xsave2 ? KVM_GET_XSAVE2 : KVM_GET_XSAVE;
3570 ret = kvm_vcpu_ioctl(CPU(cpu), type, xsave);
3571 if (ret < 0) {
3572 return ret;
3574 x86_cpu_xrstor_all_areas(cpu, xsave, env->xsave_buf_len);
3576 return 0;
3579 static int kvm_get_xcrs(X86CPU *cpu)
3581 CPUX86State *env = &cpu->env;
3582 int i, ret;
3583 struct kvm_xcrs xcrs;
3585 if (!has_xcrs) {
3586 return 0;
3589 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs);
3590 if (ret < 0) {
3591 return ret;
3594 for (i = 0; i < xcrs.nr_xcrs; i++) {
3595 /* Only support xcr0 now */
3596 if (xcrs.xcrs[i].xcr == 0) {
3597 env->xcr0 = xcrs.xcrs[i].value;
3598 break;
3601 return 0;
3604 static int kvm_get_sregs(X86CPU *cpu)
3606 CPUX86State *env = &cpu->env;
3607 struct kvm_sregs sregs;
3608 int ret;
3610 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
3611 if (ret < 0) {
3612 return ret;
3616 * The interrupt_bitmap is ignored because KVM_GET_SREGS is
3617 * always preceded by KVM_GET_VCPU_EVENTS.
3620 get_seg(&env->segs[R_CS], &sregs.cs);
3621 get_seg(&env->segs[R_DS], &sregs.ds);
3622 get_seg(&env->segs[R_ES], &sregs.es);
3623 get_seg(&env->segs[R_FS], &sregs.fs);
3624 get_seg(&env->segs[R_GS], &sregs.gs);
3625 get_seg(&env->segs[R_SS], &sregs.ss);
3627 get_seg(&env->tr, &sregs.tr);
3628 get_seg(&env->ldt, &sregs.ldt);
3630 env->idt.limit = sregs.idt.limit;
3631 env->idt.base = sregs.idt.base;
3632 env->gdt.limit = sregs.gdt.limit;
3633 env->gdt.base = sregs.gdt.base;
3635 env->cr[0] = sregs.cr0;
3636 env->cr[2] = sregs.cr2;
3637 env->cr[3] = sregs.cr3;
3638 env->cr[4] = sregs.cr4;
3640 env->efer = sregs.efer;
3642 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
3643 x86_update_hflags(env);
3645 return 0;
3648 static int kvm_get_sregs2(X86CPU *cpu)
3650 CPUX86State *env = &cpu->env;
3651 struct kvm_sregs2 sregs;
3652 int i, ret;
3654 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS2, &sregs);
3655 if (ret < 0) {
3656 return ret;
3659 get_seg(&env->segs[R_CS], &sregs.cs);
3660 get_seg(&env->segs[R_DS], &sregs.ds);
3661 get_seg(&env->segs[R_ES], &sregs.es);
3662 get_seg(&env->segs[R_FS], &sregs.fs);
3663 get_seg(&env->segs[R_GS], &sregs.gs);
3664 get_seg(&env->segs[R_SS], &sregs.ss);
3666 get_seg(&env->tr, &sregs.tr);
3667 get_seg(&env->ldt, &sregs.ldt);
3669 env->idt.limit = sregs.idt.limit;
3670 env->idt.base = sregs.idt.base;
3671 env->gdt.limit = sregs.gdt.limit;
3672 env->gdt.base = sregs.gdt.base;
3674 env->cr[0] = sregs.cr0;
3675 env->cr[2] = sregs.cr2;
3676 env->cr[3] = sregs.cr3;
3677 env->cr[4] = sregs.cr4;
3679 env->efer = sregs.efer;
3681 env->pdptrs_valid = sregs.flags & KVM_SREGS2_FLAGS_PDPTRS_VALID;
3683 if (env->pdptrs_valid) {
3684 for (i = 0; i < 4; i++) {
3685 env->pdptrs[i] = sregs.pdptrs[i];
3689 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
3690 x86_update_hflags(env);
3692 return 0;
3695 static int kvm_get_msrs(X86CPU *cpu)
3697 CPUX86State *env = &cpu->env;
3698 struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
3699 int ret, i;
3700 uint64_t mtrr_top_bits;
3702 kvm_msr_buf_reset(cpu);
3704 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0);
3705 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0);
3706 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0);
3707 kvm_msr_entry_add(cpu, MSR_PAT, 0);
3708 if (has_msr_star) {
3709 kvm_msr_entry_add(cpu, MSR_STAR, 0);
3711 if (has_msr_hsave_pa) {
3712 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0);
3714 if (has_msr_tsc_aux) {
3715 kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0);
3717 if (has_msr_tsc_adjust) {
3718 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0);
3720 if (has_msr_tsc_deadline) {
3721 kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0);
3723 if (has_msr_misc_enable) {
3724 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0);
3726 if (has_msr_smbase) {
3727 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0);
3729 if (has_msr_smi_count) {
3730 kvm_msr_entry_add(cpu, MSR_SMI_COUNT, 0);
3732 if (has_msr_feature_control) {
3733 kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0);
3735 if (has_msr_pkrs) {
3736 kvm_msr_entry_add(cpu, MSR_IA32_PKRS, 0);
3738 if (has_msr_bndcfgs) {
3739 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0);
3741 if (has_msr_xss) {
3742 kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0);
3744 if (has_msr_umwait) {
3745 kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, 0);
3747 if (has_msr_spec_ctrl) {
3748 kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0);
3750 if (has_tsc_scale_msr) {
3751 kvm_msr_entry_add(cpu, MSR_AMD64_TSC_RATIO, 0);
3754 if (has_msr_tsx_ctrl) {
3755 kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, 0);
3757 if (has_msr_virt_ssbd) {
3758 kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, 0);
3760 if (!env->tsc_valid) {
3761 kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0);
3762 env->tsc_valid = !runstate_is_running();
3765 #ifdef TARGET_X86_64
3766 if (lm_capable_kernel) {
3767 kvm_msr_entry_add(cpu, MSR_CSTAR, 0);
3768 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0);
3769 kvm_msr_entry_add(cpu, MSR_FMASK, 0);
3770 kvm_msr_entry_add(cpu, MSR_LSTAR, 0);
3772 #endif
3773 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
3774 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
3775 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF_INT)) {
3776 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, 0);
3778 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
3779 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
3781 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
3782 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
3784 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
3785 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
3787 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) {
3788 kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1);
3790 if (has_architectural_pmu_version > 0) {
3791 if (has_architectural_pmu_version > 1) {
3792 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
3793 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
3794 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
3795 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
3797 for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
3798 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0);
3800 for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
3801 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0);
3802 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0);
3806 if (env->mcg_cap) {
3807 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0);
3808 kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0);
3809 if (has_msr_mcg_ext_ctl) {
3810 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, 0);
3812 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
3813 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0);
3817 if (has_msr_hv_hypercall) {
3818 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0);
3819 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0);
3821 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) {
3822 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0);
3824 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) {
3825 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0);
3827 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) {
3828 kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0);
3829 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL, 0);
3830 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, 0);
3832 if (has_msr_hv_syndbg_options) {
3833 kvm_msr_entry_add(cpu, HV_X64_MSR_SYNDBG_OPTIONS, 0);
3835 if (has_msr_hv_crash) {
3836 int j;
3838 for (j = 0; j < HV_CRASH_PARAMS; j++) {
3839 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0);
3842 if (has_msr_hv_runtime) {
3843 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0);
3845 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
3846 uint32_t msr;
3848 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0);
3849 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0);
3850 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0);
3851 for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
3852 kvm_msr_entry_add(cpu, msr, 0);
3855 if (has_msr_hv_stimer) {
3856 uint32_t msr;
3858 for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT;
3859 msr++) {
3860 kvm_msr_entry_add(cpu, msr, 0);
3863 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
3864 kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0);
3865 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0);
3866 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0);
3867 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0);
3868 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0);
3869 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0);
3870 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0);
3871 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0);
3872 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0);
3873 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0);
3874 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0);
3875 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0);
3876 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
3877 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0);
3878 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0);
3882 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
3883 int addr_num =
3884 kvm_arch_get_supported_cpuid(kvm_state, 0x14, 1, R_EAX) & 0x7;
3886 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL, 0);
3887 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS, 0);
3888 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE, 0);
3889 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK, 0);
3890 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH, 0);
3891 for (i = 0; i < addr_num; i++) {
3892 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i, 0);
3896 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC) {
3897 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH0, 0);
3898 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH1, 0);
3899 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH2, 0);
3900 kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH3, 0);
3903 if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) {
3904 kvm_msr_entry_add(cpu, MSR_IA32_XFD, 0);
3905 kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR, 0);
3908 if (kvm_enabled() && cpu->enable_pmu &&
3909 (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
3910 uint64_t depth;
3911 int i, ret;
3913 ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth);
3914 if (ret == 1 && depth == ARCH_LBR_NR_ENTRIES) {
3915 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, 0);
3916 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, 0);
3918 for (i = 0; i < ARCH_LBR_NR_ENTRIES; i++) {
3919 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_FROM_0 + i, 0);
3920 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_TO_0 + i, 0);
3921 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_INFO_0 + i, 0);
3926 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
3927 if (ret < 0) {
3928 return ret;
3931 if (ret < cpu->kvm_msr_buf->nmsrs) {
3932 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
3933 error_report("error: failed to get MSR 0x%" PRIx32,
3934 (uint32_t)e->index);
3937 assert(ret == cpu->kvm_msr_buf->nmsrs);
3939 * MTRR masks: Each mask consists of 5 parts
3940 * a 10..0: must be zero
3941 * b 11 : valid bit
3942 * c n-1.12: actual mask bits
3943 * d 51..n: reserved must be zero
3944 * e 63.52: reserved must be zero
3946 * 'n' is the number of physical bits supported by the CPU and is
3947 * apparently always <= 52. We know our 'n' but don't know what
3948 * the destinations 'n' is; it might be smaller, in which case
3949 * it masks (c) on loading. It might be larger, in which case
3950 * we fill 'd' so that d..c is consistent irrespetive of the 'n'
3951 * we're migrating to.
3954 if (cpu->fill_mtrr_mask) {
3955 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 52);
3956 assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS);
3957 mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits);
3958 } else {
3959 mtrr_top_bits = 0;
3962 for (i = 0; i < ret; i++) {
3963 uint32_t index = msrs[i].index;
3964 switch (index) {
3965 case MSR_IA32_SYSENTER_CS:
3966 env->sysenter_cs = msrs[i].data;
3967 break;
3968 case MSR_IA32_SYSENTER_ESP:
3969 env->sysenter_esp = msrs[i].data;
3970 break;
3971 case MSR_IA32_SYSENTER_EIP:
3972 env->sysenter_eip = msrs[i].data;
3973 break;
3974 case MSR_PAT:
3975 env->pat = msrs[i].data;
3976 break;
3977 case MSR_STAR:
3978 env->star = msrs[i].data;
3979 break;
3980 #ifdef TARGET_X86_64
3981 case MSR_CSTAR:
3982 env->cstar = msrs[i].data;
3983 break;
3984 case MSR_KERNELGSBASE:
3985 env->kernelgsbase = msrs[i].data;
3986 break;
3987 case MSR_FMASK:
3988 env->fmask = msrs[i].data;
3989 break;
3990 case MSR_LSTAR:
3991 env->lstar = msrs[i].data;
3992 break;
3993 #endif
3994 case MSR_IA32_TSC:
3995 env->tsc = msrs[i].data;
3996 break;
3997 case MSR_TSC_AUX:
3998 env->tsc_aux = msrs[i].data;
3999 break;
4000 case MSR_TSC_ADJUST:
4001 env->tsc_adjust = msrs[i].data;
4002 break;
4003 case MSR_IA32_TSCDEADLINE:
4004 env->tsc_deadline = msrs[i].data;
4005 break;
4006 case MSR_VM_HSAVE_PA:
4007 env->vm_hsave = msrs[i].data;
4008 break;
4009 case MSR_KVM_SYSTEM_TIME:
4010 env->system_time_msr = msrs[i].data;
4011 break;
4012 case MSR_KVM_WALL_CLOCK:
4013 env->wall_clock_msr = msrs[i].data;
4014 break;
4015 case MSR_MCG_STATUS:
4016 env->mcg_status = msrs[i].data;
4017 break;
4018 case MSR_MCG_CTL:
4019 env->mcg_ctl = msrs[i].data;
4020 break;
4021 case MSR_MCG_EXT_CTL:
4022 env->mcg_ext_ctl = msrs[i].data;
4023 break;
4024 case MSR_IA32_MISC_ENABLE:
4025 env->msr_ia32_misc_enable = msrs[i].data;
4026 break;
4027 case MSR_IA32_SMBASE:
4028 env->smbase = msrs[i].data;
4029 break;
4030 case MSR_SMI_COUNT:
4031 env->msr_smi_count = msrs[i].data;
4032 break;
4033 case MSR_IA32_FEATURE_CONTROL:
4034 env->msr_ia32_feature_control = msrs[i].data;
4035 break;
4036 case MSR_IA32_BNDCFGS:
4037 env->msr_bndcfgs = msrs[i].data;
4038 break;
4039 case MSR_IA32_XSS:
4040 env->xss = msrs[i].data;
4041 break;
4042 case MSR_IA32_UMWAIT_CONTROL:
4043 env->umwait = msrs[i].data;
4044 break;
4045 case MSR_IA32_PKRS:
4046 env->pkrs = msrs[i].data;
4047 break;
4048 default:
4049 if (msrs[i].index >= MSR_MC0_CTL &&
4050 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
4051 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
4053 break;
4054 case MSR_KVM_ASYNC_PF_EN:
4055 env->async_pf_en_msr = msrs[i].data;
4056 break;
4057 case MSR_KVM_ASYNC_PF_INT:
4058 env->async_pf_int_msr = msrs[i].data;
4059 break;
4060 case MSR_KVM_PV_EOI_EN:
4061 env->pv_eoi_en_msr = msrs[i].data;
4062 break;
4063 case MSR_KVM_STEAL_TIME:
4064 env->steal_time_msr = msrs[i].data;
4065 break;
4066 case MSR_KVM_POLL_CONTROL: {
4067 env->poll_control_msr = msrs[i].data;
4068 break;
4070 case MSR_CORE_PERF_FIXED_CTR_CTRL:
4071 env->msr_fixed_ctr_ctrl = msrs[i].data;
4072 break;
4073 case MSR_CORE_PERF_GLOBAL_CTRL:
4074 env->msr_global_ctrl = msrs[i].data;
4075 break;
4076 case MSR_CORE_PERF_GLOBAL_STATUS:
4077 env->msr_global_status = msrs[i].data;
4078 break;
4079 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
4080 env->msr_global_ovf_ctrl = msrs[i].data;
4081 break;
4082 case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1:
4083 env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data;
4084 break;
4085 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1:
4086 env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data;
4087 break;
4088 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1:
4089 env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data;
4090 break;
4091 case HV_X64_MSR_HYPERCALL:
4092 env->msr_hv_hypercall = msrs[i].data;
4093 break;
4094 case HV_X64_MSR_GUEST_OS_ID:
4095 env->msr_hv_guest_os_id = msrs[i].data;
4096 break;
4097 case HV_X64_MSR_APIC_ASSIST_PAGE:
4098 env->msr_hv_vapic = msrs[i].data;
4099 break;
4100 case HV_X64_MSR_REFERENCE_TSC:
4101 env->msr_hv_tsc = msrs[i].data;
4102 break;
4103 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
4104 env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data;
4105 break;
4106 case HV_X64_MSR_VP_RUNTIME:
4107 env->msr_hv_runtime = msrs[i].data;
4108 break;
4109 case HV_X64_MSR_SCONTROL:
4110 env->msr_hv_synic_control = msrs[i].data;
4111 break;
4112 case HV_X64_MSR_SIEFP:
4113 env->msr_hv_synic_evt_page = msrs[i].data;
4114 break;
4115 case HV_X64_MSR_SIMP:
4116 env->msr_hv_synic_msg_page = msrs[i].data;
4117 break;
4118 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
4119 env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data;
4120 break;
4121 case HV_X64_MSR_STIMER0_CONFIG:
4122 case HV_X64_MSR_STIMER1_CONFIG:
4123 case HV_X64_MSR_STIMER2_CONFIG:
4124 case HV_X64_MSR_STIMER3_CONFIG:
4125 env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] =
4126 msrs[i].data;
4127 break;
4128 case HV_X64_MSR_STIMER0_COUNT:
4129 case HV_X64_MSR_STIMER1_COUNT:
4130 case HV_X64_MSR_STIMER2_COUNT:
4131 case HV_X64_MSR_STIMER3_COUNT:
4132 env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] =
4133 msrs[i].data;
4134 break;
4135 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
4136 env->msr_hv_reenlightenment_control = msrs[i].data;
4137 break;
4138 case HV_X64_MSR_TSC_EMULATION_CONTROL:
4139 env->msr_hv_tsc_emulation_control = msrs[i].data;
4140 break;
4141 case HV_X64_MSR_TSC_EMULATION_STATUS:
4142 env->msr_hv_tsc_emulation_status = msrs[i].data;
4143 break;
4144 case HV_X64_MSR_SYNDBG_OPTIONS:
4145 env->msr_hv_syndbg_options = msrs[i].data;
4146 break;
4147 case MSR_MTRRdefType:
4148 env->mtrr_deftype = msrs[i].data;
4149 break;
4150 case MSR_MTRRfix64K_00000:
4151 env->mtrr_fixed[0] = msrs[i].data;
4152 break;
4153 case MSR_MTRRfix16K_80000:
4154 env->mtrr_fixed[1] = msrs[i].data;
4155 break;
4156 case MSR_MTRRfix16K_A0000:
4157 env->mtrr_fixed[2] = msrs[i].data;
4158 break;
4159 case MSR_MTRRfix4K_C0000:
4160 env->mtrr_fixed[3] = msrs[i].data;
4161 break;
4162 case MSR_MTRRfix4K_C8000:
4163 env->mtrr_fixed[4] = msrs[i].data;
4164 break;
4165 case MSR_MTRRfix4K_D0000:
4166 env->mtrr_fixed[5] = msrs[i].data;
4167 break;
4168 case MSR_MTRRfix4K_D8000:
4169 env->mtrr_fixed[6] = msrs[i].data;
4170 break;
4171 case MSR_MTRRfix4K_E0000:
4172 env->mtrr_fixed[7] = msrs[i].data;
4173 break;
4174 case MSR_MTRRfix4K_E8000:
4175 env->mtrr_fixed[8] = msrs[i].data;
4176 break;
4177 case MSR_MTRRfix4K_F0000:
4178 env->mtrr_fixed[9] = msrs[i].data;
4179 break;
4180 case MSR_MTRRfix4K_F8000:
4181 env->mtrr_fixed[10] = msrs[i].data;
4182 break;
4183 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
4184 if (index & 1) {
4185 env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data |
4186 mtrr_top_bits;
4187 } else {
4188 env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
4190 break;
4191 case MSR_IA32_SPEC_CTRL:
4192 env->spec_ctrl = msrs[i].data;
4193 break;
4194 case MSR_AMD64_TSC_RATIO:
4195 env->amd_tsc_scale_msr = msrs[i].data;
4196 break;
4197 case MSR_IA32_TSX_CTRL:
4198 env->tsx_ctrl = msrs[i].data;
4199 break;
4200 case MSR_VIRT_SSBD:
4201 env->virt_ssbd = msrs[i].data;
4202 break;
4203 case MSR_IA32_RTIT_CTL:
4204 env->msr_rtit_ctrl = msrs[i].data;
4205 break;
4206 case MSR_IA32_RTIT_STATUS:
4207 env->msr_rtit_status = msrs[i].data;
4208 break;
4209 case MSR_IA32_RTIT_OUTPUT_BASE:
4210 env->msr_rtit_output_base = msrs[i].data;
4211 break;
4212 case MSR_IA32_RTIT_OUTPUT_MASK:
4213 env->msr_rtit_output_mask = msrs[i].data;
4214 break;
4215 case MSR_IA32_RTIT_CR3_MATCH:
4216 env->msr_rtit_cr3_match = msrs[i].data;
4217 break;
4218 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
4219 env->msr_rtit_addrs[index - MSR_IA32_RTIT_ADDR0_A] = msrs[i].data;
4220 break;
4221 case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3:
4222 env->msr_ia32_sgxlepubkeyhash[index - MSR_IA32_SGXLEPUBKEYHASH0] =
4223 msrs[i].data;
4224 break;
4225 case MSR_IA32_XFD:
4226 env->msr_xfd = msrs[i].data;
4227 break;
4228 case MSR_IA32_XFD_ERR:
4229 env->msr_xfd_err = msrs[i].data;
4230 break;
4231 case MSR_ARCH_LBR_CTL:
4232 env->msr_lbr_ctl = msrs[i].data;
4233 break;
4234 case MSR_ARCH_LBR_DEPTH:
4235 env->msr_lbr_depth = msrs[i].data;
4236 break;
4237 case MSR_ARCH_LBR_FROM_0 ... MSR_ARCH_LBR_FROM_0 + 31:
4238 env->lbr_records[index - MSR_ARCH_LBR_FROM_0].from = msrs[i].data;
4239 break;
4240 case MSR_ARCH_LBR_TO_0 ... MSR_ARCH_LBR_TO_0 + 31:
4241 env->lbr_records[index - MSR_ARCH_LBR_TO_0].to = msrs[i].data;
4242 break;
4243 case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31:
4244 env->lbr_records[index - MSR_ARCH_LBR_INFO_0].info = msrs[i].data;
4245 break;
4249 return 0;
4252 static int kvm_put_mp_state(X86CPU *cpu)
4254 struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state };
4256 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
4259 static int kvm_get_mp_state(X86CPU *cpu)
4261 CPUState *cs = CPU(cpu);
4262 CPUX86State *env = &cpu->env;
4263 struct kvm_mp_state mp_state;
4264 int ret;
4266 ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
4267 if (ret < 0) {
4268 return ret;
4270 env->mp_state = mp_state.mp_state;
4271 if (kvm_irqchip_in_kernel()) {
4272 cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
4274 return 0;
4277 static int kvm_get_apic(X86CPU *cpu)
4279 DeviceState *apic = cpu->apic_state;
4280 struct kvm_lapic_state kapic;
4281 int ret;
4283 if (apic && kvm_irqchip_in_kernel()) {
4284 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic);
4285 if (ret < 0) {
4286 return ret;
4289 kvm_get_apic_state(apic, &kapic);
4291 return 0;
4294 static int kvm_put_vcpu_events(X86CPU *cpu, int level)
4296 CPUState *cs = CPU(cpu);
4297 CPUX86State *env = &cpu->env;
4298 struct kvm_vcpu_events events = {};
4300 if (!kvm_has_vcpu_events()) {
4301 return 0;
4304 events.flags = 0;
4306 if (has_exception_payload) {
4307 events.flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
4308 events.exception.pending = env->exception_pending;
4309 events.exception_has_payload = env->exception_has_payload;
4310 events.exception_payload = env->exception_payload;
4312 events.exception.nr = env->exception_nr;
4313 events.exception.injected = env->exception_injected;
4314 events.exception.has_error_code = env->has_error_code;
4315 events.exception.error_code = env->error_code;
4317 events.interrupt.injected = (env->interrupt_injected >= 0);
4318 events.interrupt.nr = env->interrupt_injected;
4319 events.interrupt.soft = env->soft_interrupt;
4321 events.nmi.injected = env->nmi_injected;
4322 events.nmi.pending = env->nmi_pending;
4323 events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
4325 events.sipi_vector = env->sipi_vector;
4327 if (has_msr_smbase) {
4328 events.smi.smm = !!(env->hflags & HF_SMM_MASK);
4329 events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK);
4330 if (kvm_irqchip_in_kernel()) {
4331 /* As soon as these are moved to the kernel, remove them
4332 * from cs->interrupt_request.
4334 events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
4335 events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
4336 cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
4337 } else {
4338 /* Keep these in cs->interrupt_request. */
4339 events.smi.pending = 0;
4340 events.smi.latched_init = 0;
4342 /* Stop SMI delivery on old machine types to avoid a reboot
4343 * on an inward migration of an old VM.
4345 if (!cpu->kvm_no_smi_migration) {
4346 events.flags |= KVM_VCPUEVENT_VALID_SMM;
4350 if (level >= KVM_PUT_RESET_STATE) {
4351 events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
4352 if (env->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
4353 events.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR;
4357 if (has_triple_fault_event) {
4358 events.flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT;
4359 events.triple_fault.pending = env->triple_fault_pending;
4362 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
4365 static int kvm_get_vcpu_events(X86CPU *cpu)
4367 CPUX86State *env = &cpu->env;
4368 struct kvm_vcpu_events events;
4369 int ret;
4371 if (!kvm_has_vcpu_events()) {
4372 return 0;
4375 memset(&events, 0, sizeof(events));
4376 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
4377 if (ret < 0) {
4378 return ret;
4381 if (events.flags & KVM_VCPUEVENT_VALID_PAYLOAD) {
4382 env->exception_pending = events.exception.pending;
4383 env->exception_has_payload = events.exception_has_payload;
4384 env->exception_payload = events.exception_payload;
4385 } else {
4386 env->exception_pending = 0;
4387 env->exception_has_payload = false;
4389 env->exception_injected = events.exception.injected;
4390 env->exception_nr =
4391 (env->exception_pending || env->exception_injected) ?
4392 events.exception.nr : -1;
4393 env->has_error_code = events.exception.has_error_code;
4394 env->error_code = events.exception.error_code;
4396 env->interrupt_injected =
4397 events.interrupt.injected ? events.interrupt.nr : -1;
4398 env->soft_interrupt = events.interrupt.soft;
4400 env->nmi_injected = events.nmi.injected;
4401 env->nmi_pending = events.nmi.pending;
4402 if (events.nmi.masked) {
4403 env->hflags2 |= HF2_NMI_MASK;
4404 } else {
4405 env->hflags2 &= ~HF2_NMI_MASK;
4408 if (events.flags & KVM_VCPUEVENT_VALID_SMM) {
4409 if (events.smi.smm) {
4410 env->hflags |= HF_SMM_MASK;
4411 } else {
4412 env->hflags &= ~HF_SMM_MASK;
4414 if (events.smi.pending) {
4415 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
4416 } else {
4417 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
4419 if (events.smi.smm_inside_nmi) {
4420 env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
4421 } else {
4422 env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
4424 if (events.smi.latched_init) {
4425 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
4426 } else {
4427 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
4431 if (events.flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT) {
4432 env->triple_fault_pending = events.triple_fault.pending;
4435 env->sipi_vector = events.sipi_vector;
4437 return 0;
4440 static int kvm_guest_debug_workarounds(X86CPU *cpu)
4442 CPUState *cs = CPU(cpu);
4443 CPUX86State *env = &cpu->env;
4444 int ret = 0;
4445 unsigned long reinject_trap = 0;
4447 if (!kvm_has_vcpu_events()) {
4448 if (env->exception_nr == EXCP01_DB) {
4449 reinject_trap = KVM_GUESTDBG_INJECT_DB;
4450 } else if (env->exception_injected == EXCP03_INT3) {
4451 reinject_trap = KVM_GUESTDBG_INJECT_BP;
4453 kvm_reset_exception(env);
4457 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
4458 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
4459 * by updating the debug state once again if single-stepping is on.
4460 * Another reason to call kvm_update_guest_debug here is a pending debug
4461 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
4462 * reinject them via SET_GUEST_DEBUG.
4464 if (reinject_trap ||
4465 (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) {
4466 ret = kvm_update_guest_debug(cs, reinject_trap);
4468 return ret;
4471 static int kvm_put_debugregs(X86CPU *cpu)
4473 CPUX86State *env = &cpu->env;
4474 struct kvm_debugregs dbgregs;
4475 int i;
4477 if (!kvm_has_debugregs()) {
4478 return 0;
4481 memset(&dbgregs, 0, sizeof(dbgregs));
4482 for (i = 0; i < 4; i++) {
4483 dbgregs.db[i] = env->dr[i];
4485 dbgregs.dr6 = env->dr[6];
4486 dbgregs.dr7 = env->dr[7];
4487 dbgregs.flags = 0;
4489 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs);
4492 static int kvm_get_debugregs(X86CPU *cpu)
4494 CPUX86State *env = &cpu->env;
4495 struct kvm_debugregs dbgregs;
4496 int i, ret;
4498 if (!kvm_has_debugregs()) {
4499 return 0;
4502 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs);
4503 if (ret < 0) {
4504 return ret;
4506 for (i = 0; i < 4; i++) {
4507 env->dr[i] = dbgregs.db[i];
4509 env->dr[4] = env->dr[6] = dbgregs.dr6;
4510 env->dr[5] = env->dr[7] = dbgregs.dr7;
4512 return 0;
4515 static int kvm_put_nested_state(X86CPU *cpu)
4517 CPUX86State *env = &cpu->env;
4518 int max_nested_state_len = kvm_max_nested_state_length();
4520 if (!env->nested_state) {
4521 return 0;
4525 * Copy flags that are affected by reset from env->hflags and env->hflags2.
4527 if (env->hflags & HF_GUEST_MASK) {
4528 env->nested_state->flags |= KVM_STATE_NESTED_GUEST_MODE;
4529 } else {
4530 env->nested_state->flags &= ~KVM_STATE_NESTED_GUEST_MODE;
4533 /* Don't set KVM_STATE_NESTED_GIF_SET on VMX as it is illegal */
4534 if (cpu_has_svm(env) && (env->hflags2 & HF2_GIF_MASK)) {
4535 env->nested_state->flags |= KVM_STATE_NESTED_GIF_SET;
4536 } else {
4537 env->nested_state->flags &= ~KVM_STATE_NESTED_GIF_SET;
4540 assert(env->nested_state->size <= max_nested_state_len);
4541 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_NESTED_STATE, env->nested_state);
4544 static int kvm_get_nested_state(X86CPU *cpu)
4546 CPUX86State *env = &cpu->env;
4547 int max_nested_state_len = kvm_max_nested_state_length();
4548 int ret;
4550 if (!env->nested_state) {
4551 return 0;
4555 * It is possible that migration restored a smaller size into
4556 * nested_state->hdr.size than what our kernel support.
4557 * We preserve migration origin nested_state->hdr.size for
4558 * call to KVM_SET_NESTED_STATE but wish that our next call
4559 * to KVM_GET_NESTED_STATE will use max size our kernel support.
4561 env->nested_state->size = max_nested_state_len;
4563 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_NESTED_STATE, env->nested_state);
4564 if (ret < 0) {
4565 return ret;
4569 * Copy flags that are affected by reset to env->hflags and env->hflags2.
4571 if (env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE) {
4572 env->hflags |= HF_GUEST_MASK;
4573 } else {
4574 env->hflags &= ~HF_GUEST_MASK;
4577 /* Keep HF2_GIF_MASK set on !SVM as x86_cpu_pending_interrupt() needs it */
4578 if (cpu_has_svm(env)) {
4579 if (env->nested_state->flags & KVM_STATE_NESTED_GIF_SET) {
4580 env->hflags2 |= HF2_GIF_MASK;
4581 } else {
4582 env->hflags2 &= ~HF2_GIF_MASK;
4586 return ret;
4589 int kvm_arch_put_registers(CPUState *cpu, int level)
4591 X86CPU *x86_cpu = X86_CPU(cpu);
4592 int ret;
4594 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
4597 * Put MSR_IA32_FEATURE_CONTROL first, this ensures the VM gets out of VMX
4598 * root operation upon vCPU reset. kvm_put_msr_feature_control() should also
4599 * preceed kvm_put_nested_state() when 'real' nested state is set.
4601 if (level >= KVM_PUT_RESET_STATE) {
4602 ret = kvm_put_msr_feature_control(x86_cpu);
4603 if (ret < 0) {
4604 return ret;
4608 /* must be before kvm_put_nested_state so that EFER.SVME is set */
4609 ret = has_sregs2 ? kvm_put_sregs2(x86_cpu) : kvm_put_sregs(x86_cpu);
4610 if (ret < 0) {
4611 return ret;
4614 if (level >= KVM_PUT_RESET_STATE) {
4615 ret = kvm_put_nested_state(x86_cpu);
4616 if (ret < 0) {
4617 return ret;
4621 if (level == KVM_PUT_FULL_STATE) {
4622 /* We don't check for kvm_arch_set_tsc_khz() errors here,
4623 * because TSC frequency mismatch shouldn't abort migration,
4624 * unless the user explicitly asked for a more strict TSC
4625 * setting (e.g. using an explicit "tsc-freq" option).
4627 kvm_arch_set_tsc_khz(cpu);
4630 ret = kvm_getput_regs(x86_cpu, 1);
4631 if (ret < 0) {
4632 return ret;
4634 ret = kvm_put_xsave(x86_cpu);
4635 if (ret < 0) {
4636 return ret;
4638 ret = kvm_put_xcrs(x86_cpu);
4639 if (ret < 0) {
4640 return ret;
4642 /* must be before kvm_put_msrs */
4643 ret = kvm_inject_mce_oldstyle(x86_cpu);
4644 if (ret < 0) {
4645 return ret;
4647 ret = kvm_put_msrs(x86_cpu, level);
4648 if (ret < 0) {
4649 return ret;
4651 ret = kvm_put_vcpu_events(x86_cpu, level);
4652 if (ret < 0) {
4653 return ret;
4655 if (level >= KVM_PUT_RESET_STATE) {
4656 ret = kvm_put_mp_state(x86_cpu);
4657 if (ret < 0) {
4658 return ret;
4662 ret = kvm_put_tscdeadline_msr(x86_cpu);
4663 if (ret < 0) {
4664 return ret;
4666 ret = kvm_put_debugregs(x86_cpu);
4667 if (ret < 0) {
4668 return ret;
4670 /* must be last */
4671 ret = kvm_guest_debug_workarounds(x86_cpu);
4672 if (ret < 0) {
4673 return ret;
4675 return 0;
4678 int kvm_arch_get_registers(CPUState *cs)
4680 X86CPU *cpu = X86_CPU(cs);
4681 int ret;
4683 assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs));
4685 ret = kvm_get_vcpu_events(cpu);
4686 if (ret < 0) {
4687 goto out;
4690 * KVM_GET_MPSTATE can modify CS and RIP, call it before
4691 * KVM_GET_REGS and KVM_GET_SREGS.
4693 ret = kvm_get_mp_state(cpu);
4694 if (ret < 0) {
4695 goto out;
4697 ret = kvm_getput_regs(cpu, 0);
4698 if (ret < 0) {
4699 goto out;
4701 ret = kvm_get_xsave(cpu);
4702 if (ret < 0) {
4703 goto out;
4705 ret = kvm_get_xcrs(cpu);
4706 if (ret < 0) {
4707 goto out;
4709 ret = has_sregs2 ? kvm_get_sregs2(cpu) : kvm_get_sregs(cpu);
4710 if (ret < 0) {
4711 goto out;
4713 ret = kvm_get_msrs(cpu);
4714 if (ret < 0) {
4715 goto out;
4717 ret = kvm_get_apic(cpu);
4718 if (ret < 0) {
4719 goto out;
4721 ret = kvm_get_debugregs(cpu);
4722 if (ret < 0) {
4723 goto out;
4725 ret = kvm_get_nested_state(cpu);
4726 if (ret < 0) {
4727 goto out;
4729 ret = 0;
4730 out:
4731 cpu_sync_bndcs_hflags(&cpu->env);
4732 return ret;
4735 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
4737 X86CPU *x86_cpu = X86_CPU(cpu);
4738 CPUX86State *env = &x86_cpu->env;
4739 int ret;
4741 /* Inject NMI */
4742 if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
4743 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
4744 qemu_mutex_lock_iothread();
4745 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
4746 qemu_mutex_unlock_iothread();
4747 DPRINTF("injected NMI\n");
4748 ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
4749 if (ret < 0) {
4750 fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
4751 strerror(-ret));
4754 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
4755 qemu_mutex_lock_iothread();
4756 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
4757 qemu_mutex_unlock_iothread();
4758 DPRINTF("injected SMI\n");
4759 ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
4760 if (ret < 0) {
4761 fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n",
4762 strerror(-ret));
4767 if (!kvm_pic_in_kernel()) {
4768 qemu_mutex_lock_iothread();
4771 /* Force the VCPU out of its inner loop to process any INIT requests
4772 * or (for userspace APIC, but it is cheap to combine the checks here)
4773 * pending TPR access reports.
4775 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
4776 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
4777 !(env->hflags & HF_SMM_MASK)) {
4778 cpu->exit_request = 1;
4780 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
4781 cpu->exit_request = 1;
4785 if (!kvm_pic_in_kernel()) {
4786 /* Try to inject an interrupt if the guest can accept it */
4787 if (run->ready_for_interrupt_injection &&
4788 (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
4789 (env->eflags & IF_MASK)) {
4790 int irq;
4792 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
4793 irq = cpu_get_pic_interrupt(env);
4794 if (irq >= 0) {
4795 struct kvm_interrupt intr;
4797 intr.irq = irq;
4798 DPRINTF("injected interrupt %d\n", irq);
4799 ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr);
4800 if (ret < 0) {
4801 fprintf(stderr,
4802 "KVM: injection failed, interrupt lost (%s)\n",
4803 strerror(-ret));
4808 /* If we have an interrupt but the guest is not ready to receive an
4809 * interrupt, request an interrupt window exit. This will
4810 * cause a return to userspace as soon as the guest is ready to
4811 * receive interrupts. */
4812 if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
4813 run->request_interrupt_window = 1;
4814 } else {
4815 run->request_interrupt_window = 0;
4818 DPRINTF("setting tpr\n");
4819 run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
4821 qemu_mutex_unlock_iothread();
4825 static void kvm_rate_limit_on_bus_lock(void)
4827 uint64_t delay_ns = ratelimit_calculate_delay(&bus_lock_ratelimit_ctrl, 1);
4829 if (delay_ns) {
4830 g_usleep(delay_ns / SCALE_US);
4834 MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
4836 X86CPU *x86_cpu = X86_CPU(cpu);
4837 CPUX86State *env = &x86_cpu->env;
4839 if (run->flags & KVM_RUN_X86_SMM) {
4840 env->hflags |= HF_SMM_MASK;
4841 } else {
4842 env->hflags &= ~HF_SMM_MASK;
4844 if (run->if_flag) {
4845 env->eflags |= IF_MASK;
4846 } else {
4847 env->eflags &= ~IF_MASK;
4849 if (run->flags & KVM_RUN_X86_BUS_LOCK) {
4850 kvm_rate_limit_on_bus_lock();
4853 /* We need to protect the apic state against concurrent accesses from
4854 * different threads in case the userspace irqchip is used. */
4855 if (!kvm_irqchip_in_kernel()) {
4856 qemu_mutex_lock_iothread();
4858 cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
4859 cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
4860 if (!kvm_irqchip_in_kernel()) {
4861 qemu_mutex_unlock_iothread();
4863 return cpu_get_mem_attrs(env);
4866 int kvm_arch_process_async_events(CPUState *cs)
4868 X86CPU *cpu = X86_CPU(cs);
4869 CPUX86State *env = &cpu->env;
4871 if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
4872 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
4873 assert(env->mcg_cap);
4875 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
4877 kvm_cpu_synchronize_state(cs);
4879 if (env->exception_nr == EXCP08_DBLE) {
4880 /* this means triple fault */
4881 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
4882 cs->exit_request = 1;
4883 return 0;
4885 kvm_queue_exception(env, EXCP12_MCHK, 0, 0);
4886 env->has_error_code = 0;
4888 cs->halted = 0;
4889 if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
4890 env->mp_state = KVM_MP_STATE_RUNNABLE;
4894 if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
4895 !(env->hflags & HF_SMM_MASK)) {
4896 kvm_cpu_synchronize_state(cs);
4897 do_cpu_init(cpu);
4900 if (kvm_irqchip_in_kernel()) {
4901 return 0;
4904 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
4905 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
4906 apic_poll_irq(cpu->apic_state);
4908 if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
4909 (env->eflags & IF_MASK)) ||
4910 (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
4911 cs->halted = 0;
4913 if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
4914 kvm_cpu_synchronize_state(cs);
4915 do_cpu_sipi(cpu);
4917 if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
4918 cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
4919 kvm_cpu_synchronize_state(cs);
4920 apic_handle_tpr_access_report(cpu->apic_state, env->eip,
4921 env->tpr_access_type);
4924 return cs->halted;
4927 static int kvm_handle_halt(X86CPU *cpu)
4929 CPUState *cs = CPU(cpu);
4930 CPUX86State *env = &cpu->env;
4932 if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
4933 (env->eflags & IF_MASK)) &&
4934 !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
4935 cs->halted = 1;
4936 return EXCP_HLT;
4939 return 0;
4942 static int kvm_handle_tpr_access(X86CPU *cpu)
4944 CPUState *cs = CPU(cpu);
4945 struct kvm_run *run = cs->kvm_run;
4947 apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip,
4948 run->tpr_access.is_write ? TPR_ACCESS_WRITE
4949 : TPR_ACCESS_READ);
4950 return 1;
4953 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
4955 static const uint8_t int3 = 0xcc;
4957 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
4958 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) {
4959 return -EINVAL;
4961 return 0;
4964 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
4966 uint8_t int3;
4968 if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0)) {
4969 return -EINVAL;
4971 if (int3 != 0xcc) {
4972 return 0;
4974 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
4975 return -EINVAL;
4977 return 0;
4980 static struct {
4981 target_ulong addr;
4982 int len;
4983 int type;
4984 } hw_breakpoint[4];
4986 static int nb_hw_breakpoint;
4988 static int find_hw_breakpoint(target_ulong addr, int len, int type)
4990 int n;
4992 for (n = 0; n < nb_hw_breakpoint; n++) {
4993 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
4994 (hw_breakpoint[n].len == len || len == -1)) {
4995 return n;
4998 return -1;
5001 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
5002 target_ulong len, int type)
5004 switch (type) {
5005 case GDB_BREAKPOINT_HW:
5006 len = 1;
5007 break;
5008 case GDB_WATCHPOINT_WRITE:
5009 case GDB_WATCHPOINT_ACCESS:
5010 switch (len) {
5011 case 1:
5012 break;
5013 case 2:
5014 case 4:
5015 case 8:
5016 if (addr & (len - 1)) {
5017 return -EINVAL;
5019 break;
5020 default:
5021 return -EINVAL;
5023 break;
5024 default:
5025 return -ENOSYS;
5028 if (nb_hw_breakpoint == 4) {
5029 return -ENOBUFS;
5031 if (find_hw_breakpoint(addr, len, type) >= 0) {
5032 return -EEXIST;
5034 hw_breakpoint[nb_hw_breakpoint].addr = addr;
5035 hw_breakpoint[nb_hw_breakpoint].len = len;
5036 hw_breakpoint[nb_hw_breakpoint].type = type;
5037 nb_hw_breakpoint++;
5039 return 0;
5042 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
5043 target_ulong len, int type)
5045 int n;
5047 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
5048 if (n < 0) {
5049 return -ENOENT;
5051 nb_hw_breakpoint--;
5052 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
5054 return 0;
5057 void kvm_arch_remove_all_hw_breakpoints(void)
5059 nb_hw_breakpoint = 0;
5062 static CPUWatchpoint hw_watchpoint;
5064 static int kvm_handle_debug(X86CPU *cpu,
5065 struct kvm_debug_exit_arch *arch_info)
5067 CPUState *cs = CPU(cpu);
5068 CPUX86State *env = &cpu->env;
5069 int ret = 0;
5070 int n;
5072 if (arch_info->exception == EXCP01_DB) {
5073 if (arch_info->dr6 & DR6_BS) {
5074 if (cs->singlestep_enabled) {
5075 ret = EXCP_DEBUG;
5077 } else {
5078 for (n = 0; n < 4; n++) {
5079 if (arch_info->dr6 & (1 << n)) {
5080 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
5081 case 0x0:
5082 ret = EXCP_DEBUG;
5083 break;
5084 case 0x1:
5085 ret = EXCP_DEBUG;
5086 cs->watchpoint_hit = &hw_watchpoint;
5087 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
5088 hw_watchpoint.flags = BP_MEM_WRITE;
5089 break;
5090 case 0x3:
5091 ret = EXCP_DEBUG;
5092 cs->watchpoint_hit = &hw_watchpoint;
5093 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
5094 hw_watchpoint.flags = BP_MEM_ACCESS;
5095 break;
5100 } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) {
5101 ret = EXCP_DEBUG;
5103 if (ret == 0) {
5104 cpu_synchronize_state(cs);
5105 assert(env->exception_nr == -1);
5107 /* pass to guest */
5108 kvm_queue_exception(env, arch_info->exception,
5109 arch_info->exception == EXCP01_DB,
5110 arch_info->dr6);
5111 env->has_error_code = 0;
5114 return ret;
5117 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
5119 const uint8_t type_code[] = {
5120 [GDB_BREAKPOINT_HW] = 0x0,
5121 [GDB_WATCHPOINT_WRITE] = 0x1,
5122 [GDB_WATCHPOINT_ACCESS] = 0x3
5124 const uint8_t len_code[] = {
5125 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
5127 int n;
5129 if (kvm_sw_breakpoints_active(cpu)) {
5130 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
5132 if (nb_hw_breakpoint > 0) {
5133 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
5134 dbg->arch.debugreg[7] = 0x0600;
5135 for (n = 0; n < nb_hw_breakpoint; n++) {
5136 dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
5137 dbg->arch.debugreg[7] |= (2 << (n * 2)) |
5138 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
5139 ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
5144 static bool kvm_install_msr_filters(KVMState *s)
5146 uint64_t zero = 0;
5147 struct kvm_msr_filter filter = {
5148 .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
5150 int r, i, j = 0;
5152 for (i = 0; i < KVM_MSR_FILTER_MAX_RANGES; i++) {
5153 KVMMSRHandlers *handler = &msr_handlers[i];
5154 if (handler->msr) {
5155 struct kvm_msr_filter_range *range = &filter.ranges[j++];
5157 *range = (struct kvm_msr_filter_range) {
5158 .flags = 0,
5159 .nmsrs = 1,
5160 .base = handler->msr,
5161 .bitmap = (__u8 *)&zero,
5164 if (handler->rdmsr) {
5165 range->flags |= KVM_MSR_FILTER_READ;
5168 if (handler->wrmsr) {
5169 range->flags |= KVM_MSR_FILTER_WRITE;
5174 r = kvm_vm_ioctl(s, KVM_X86_SET_MSR_FILTER, &filter);
5175 if (r) {
5176 return false;
5179 return true;
5182 bool kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr,
5183 QEMUWRMSRHandler *wrmsr)
5185 int i;
5187 for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) {
5188 if (!msr_handlers[i].msr) {
5189 msr_handlers[i] = (KVMMSRHandlers) {
5190 .msr = msr,
5191 .rdmsr = rdmsr,
5192 .wrmsr = wrmsr,
5195 if (!kvm_install_msr_filters(s)) {
5196 msr_handlers[i] = (KVMMSRHandlers) { };
5197 return false;
5200 return true;
5204 return false;
5207 static int kvm_handle_rdmsr(X86CPU *cpu, struct kvm_run *run)
5209 int i;
5210 bool r;
5212 for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) {
5213 KVMMSRHandlers *handler = &msr_handlers[i];
5214 if (run->msr.index == handler->msr) {
5215 if (handler->rdmsr) {
5216 r = handler->rdmsr(cpu, handler->msr,
5217 (uint64_t *)&run->msr.data);
5218 run->msr.error = r ? 0 : 1;
5219 return 0;
5224 assert(false);
5227 static int kvm_handle_wrmsr(X86CPU *cpu, struct kvm_run *run)
5229 int i;
5230 bool r;
5232 for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) {
5233 KVMMSRHandlers *handler = &msr_handlers[i];
5234 if (run->msr.index == handler->msr) {
5235 if (handler->wrmsr) {
5236 r = handler->wrmsr(cpu, handler->msr, run->msr.data);
5237 run->msr.error = r ? 0 : 1;
5238 return 0;
5243 assert(false);
5246 static bool has_sgx_provisioning;
5248 static bool __kvm_enable_sgx_provisioning(KVMState *s)
5250 int fd, ret;
5252 if (!kvm_vm_check_extension(s, KVM_CAP_SGX_ATTRIBUTE)) {
5253 return false;
5256 fd = qemu_open_old("/dev/sgx_provision", O_RDONLY);
5257 if (fd < 0) {
5258 return false;
5261 ret = kvm_vm_enable_cap(s, KVM_CAP_SGX_ATTRIBUTE, 0, fd);
5262 if (ret) {
5263 error_report("Could not enable SGX PROVISIONKEY: %s", strerror(-ret));
5264 exit(1);
5266 close(fd);
5267 return true;
5270 bool kvm_enable_sgx_provisioning(KVMState *s)
5272 return MEMORIZE(__kvm_enable_sgx_provisioning(s), has_sgx_provisioning);
5275 static bool host_supports_vmx(void)
5277 uint32_t ecx, unused;
5279 host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
5280 return ecx & CPUID_EXT_VMX;
5283 #define VMX_INVALID_GUEST_STATE 0x80000021
5285 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
5287 X86CPU *cpu = X86_CPU(cs);
5288 uint64_t code;
5289 int ret;
5290 bool ctx_invalid;
5291 char str[256];
5292 KVMState *state;
5294 switch (run->exit_reason) {
5295 case KVM_EXIT_HLT:
5296 DPRINTF("handle_hlt\n");
5297 qemu_mutex_lock_iothread();
5298 ret = kvm_handle_halt(cpu);
5299 qemu_mutex_unlock_iothread();
5300 break;
5301 case KVM_EXIT_SET_TPR:
5302 ret = 0;
5303 break;
5304 case KVM_EXIT_TPR_ACCESS:
5305 qemu_mutex_lock_iothread();
5306 ret = kvm_handle_tpr_access(cpu);
5307 qemu_mutex_unlock_iothread();
5308 break;
5309 case KVM_EXIT_FAIL_ENTRY:
5310 code = run->fail_entry.hardware_entry_failure_reason;
5311 fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
5312 code);
5313 if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
5314 fprintf(stderr,
5315 "\nIf you're running a guest on an Intel machine without "
5316 "unrestricted mode\n"
5317 "support, the failure can be most likely due to the guest "
5318 "entering an invalid\n"
5319 "state for Intel VT. For example, the guest maybe running "
5320 "in big real mode\n"
5321 "which is not supported on less recent Intel processors."
5322 "\n\n");
5324 ret = -1;
5325 break;
5326 case KVM_EXIT_EXCEPTION:
5327 fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
5328 run->ex.exception, run->ex.error_code);
5329 ret = -1;
5330 break;
5331 case KVM_EXIT_DEBUG:
5332 DPRINTF("kvm_exit_debug\n");
5333 qemu_mutex_lock_iothread();
5334 ret = kvm_handle_debug(cpu, &run->debug.arch);
5335 qemu_mutex_unlock_iothread();
5336 break;
5337 case KVM_EXIT_HYPERV:
5338 ret = kvm_hv_handle_exit(cpu, &run->hyperv);
5339 break;
5340 case KVM_EXIT_IOAPIC_EOI:
5341 ioapic_eoi_broadcast(run->eoi.vector);
5342 ret = 0;
5343 break;
5344 case KVM_EXIT_X86_BUS_LOCK:
5345 /* already handled in kvm_arch_post_run */
5346 ret = 0;
5347 break;
5348 case KVM_EXIT_NOTIFY:
5349 ctx_invalid = !!(run->notify.flags & KVM_NOTIFY_CONTEXT_INVALID);
5350 state = KVM_STATE(current_accel());
5351 sprintf(str, "Encounter a notify exit with %svalid context in"
5352 " guest. There can be possible misbehaves in guest."
5353 " Please have a look.", ctx_invalid ? "in" : "");
5354 if (ctx_invalid ||
5355 state->notify_vmexit == NOTIFY_VMEXIT_OPTION_INTERNAL_ERROR) {
5356 warn_report("KVM internal error: %s", str);
5357 ret = -1;
5358 } else {
5359 warn_report_once("KVM: %s", str);
5360 ret = 0;
5362 break;
5363 case KVM_EXIT_X86_RDMSR:
5364 /* We only enable MSR filtering, any other exit is bogus */
5365 assert(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER);
5366 ret = kvm_handle_rdmsr(cpu, run);
5367 break;
5368 case KVM_EXIT_X86_WRMSR:
5369 /* We only enable MSR filtering, any other exit is bogus */
5370 assert(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER);
5371 ret = kvm_handle_wrmsr(cpu, run);
5372 break;
5373 default:
5374 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
5375 ret = -1;
5376 break;
5379 return ret;
5382 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
5384 X86CPU *cpu = X86_CPU(cs);
5385 CPUX86State *env = &cpu->env;
5387 kvm_cpu_synchronize_state(cs);
5388 return !(env->cr[0] & CR0_PE_MASK) ||
5389 ((env->segs[R_CS].selector & 3) != 3);
5392 void kvm_arch_init_irq_routing(KVMState *s)
5394 /* We know at this point that we're using the in-kernel
5395 * irqchip, so we can use irqfds, and on x86 we know
5396 * we can use msi via irqfd and GSI routing.
5398 kvm_msi_via_irqfd_allowed = true;
5399 kvm_gsi_routing_allowed = true;
5401 if (kvm_irqchip_is_split()) {
5402 KVMRouteChange c = kvm_irqchip_begin_route_changes(s);
5403 int i;
5405 /* If the ioapic is in QEMU and the lapics are in KVM, reserve
5406 MSI routes for signaling interrupts to the local apics. */
5407 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
5408 if (kvm_irqchip_add_msi_route(&c, 0, NULL) < 0) {
5409 error_report("Could not enable split IRQ mode.");
5410 exit(1);
5413 kvm_irqchip_commit_route_changes(&c);
5417 int kvm_arch_irqchip_create(KVMState *s)
5419 int ret;
5420 if (kvm_kernel_irqchip_split()) {
5421 ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24);
5422 if (ret) {
5423 error_report("Could not enable split irqchip mode: %s",
5424 strerror(-ret));
5425 exit(1);
5426 } else {
5427 DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
5428 kvm_split_irqchip = true;
5429 return 1;
5431 } else {
5432 return 0;
5436 uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address)
5438 CPUX86State *env;
5439 uint64_t ext_id;
5441 if (!first_cpu) {
5442 return address;
5444 env = &X86_CPU(first_cpu)->env;
5445 if (!(env->features[FEAT_KVM] & (1 << KVM_FEATURE_MSI_EXT_DEST_ID))) {
5446 return address;
5450 * If the remappable format bit is set, or the upper bits are
5451 * already set in address_hi, or the low extended bits aren't
5452 * there anyway, do nothing.
5454 ext_id = address & (0xff << MSI_ADDR_DEST_IDX_SHIFT);
5455 if (!ext_id || (ext_id & (1 << MSI_ADDR_DEST_IDX_SHIFT)) || (address >> 32)) {
5456 return address;
5459 address &= ~ext_id;
5460 address |= ext_id << 35;
5461 return address;
5464 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
5465 uint64_t address, uint32_t data, PCIDevice *dev)
5467 X86IOMMUState *iommu = x86_iommu_get_default();
5469 if (iommu) {
5470 X86IOMMUClass *class = X86_IOMMU_DEVICE_GET_CLASS(iommu);
5472 if (class->int_remap) {
5473 int ret;
5474 MSIMessage src, dst;
5476 src.address = route->u.msi.address_hi;
5477 src.address <<= VTD_MSI_ADDR_HI_SHIFT;
5478 src.address |= route->u.msi.address_lo;
5479 src.data = route->u.msi.data;
5481 ret = class->int_remap(iommu, &src, &dst, dev ? \
5482 pci_requester_id(dev) : \
5483 X86_IOMMU_SID_INVALID);
5484 if (ret) {
5485 trace_kvm_x86_fixup_msi_error(route->gsi);
5486 return 1;
5490 * Handled untranslated compatibilty format interrupt with
5491 * extended destination ID in the low bits 11-5. */
5492 dst.address = kvm_swizzle_msi_ext_dest_id(dst.address);
5494 route->u.msi.address_hi = dst.address >> VTD_MSI_ADDR_HI_SHIFT;
5495 route->u.msi.address_lo = dst.address & VTD_MSI_ADDR_LO_MASK;
5496 route->u.msi.data = dst.data;
5497 return 0;
5501 address = kvm_swizzle_msi_ext_dest_id(address);
5502 route->u.msi.address_hi = address >> VTD_MSI_ADDR_HI_SHIFT;
5503 route->u.msi.address_lo = address & VTD_MSI_ADDR_LO_MASK;
5504 return 0;
5507 typedef struct MSIRouteEntry MSIRouteEntry;
5509 struct MSIRouteEntry {
5510 PCIDevice *dev; /* Device pointer */
5511 int vector; /* MSI/MSIX vector index */
5512 int virq; /* Virtual IRQ index */
5513 QLIST_ENTRY(MSIRouteEntry) list;
5516 /* List of used GSI routes */
5517 static QLIST_HEAD(, MSIRouteEntry) msi_route_list = \
5518 QLIST_HEAD_INITIALIZER(msi_route_list);
5520 static void kvm_update_msi_routes_all(void *private, bool global,
5521 uint32_t index, uint32_t mask)
5523 int cnt = 0, vector;
5524 MSIRouteEntry *entry;
5525 MSIMessage msg;
5526 PCIDevice *dev;
5528 /* TODO: explicit route update */
5529 QLIST_FOREACH(entry, &msi_route_list, list) {
5530 cnt++;
5531 vector = entry->vector;
5532 dev = entry->dev;
5533 if (msix_enabled(dev) && !msix_is_masked(dev, vector)) {
5534 msg = msix_get_message(dev, vector);
5535 } else if (msi_enabled(dev) && !msi_is_masked(dev, vector)) {
5536 msg = msi_get_message(dev, vector);
5537 } else {
5539 * Either MSI/MSIX is disabled for the device, or the
5540 * specific message was masked out. Skip this one.
5542 continue;
5544 kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, dev);
5546 kvm_irqchip_commit_routes(kvm_state);
5547 trace_kvm_x86_update_msi_routes(cnt);
5550 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
5551 int vector, PCIDevice *dev)
5553 static bool notify_list_inited = false;
5554 MSIRouteEntry *entry;
5556 if (!dev) {
5557 /* These are (possibly) IOAPIC routes only used for split
5558 * kernel irqchip mode, while what we are housekeeping are
5559 * PCI devices only. */
5560 return 0;
5563 entry = g_new0(MSIRouteEntry, 1);
5564 entry->dev = dev;
5565 entry->vector = vector;
5566 entry->virq = route->gsi;
5567 QLIST_INSERT_HEAD(&msi_route_list, entry, list);
5569 trace_kvm_x86_add_msi_route(route->gsi);
5571 if (!notify_list_inited) {
5572 /* For the first time we do add route, add ourselves into
5573 * IOMMU's IEC notify list if needed. */
5574 X86IOMMUState *iommu = x86_iommu_get_default();
5575 if (iommu) {
5576 x86_iommu_iec_register_notifier(iommu,
5577 kvm_update_msi_routes_all,
5578 NULL);
5580 notify_list_inited = true;
5582 return 0;
5585 int kvm_arch_release_virq_post(int virq)
5587 MSIRouteEntry *entry, *next;
5588 QLIST_FOREACH_SAFE(entry, &msi_route_list, list, next) {
5589 if (entry->virq == virq) {
5590 trace_kvm_x86_remove_msi_route(virq);
5591 QLIST_REMOVE(entry, list);
5592 g_free(entry);
5593 break;
5596 return 0;
5599 int kvm_arch_msi_data_to_gsi(uint32_t data)
5601 abort();
5604 bool kvm_has_waitpkg(void)
5606 return has_msr_umwait;
5609 bool kvm_arch_cpu_check_are_resettable(void)
5611 return !sev_es_enabled();
5614 #define ARCH_REQ_XCOMP_GUEST_PERM 0x1025
5616 void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask)
5618 KVMState *s = kvm_state;
5619 uint64_t supported;
5621 mask &= XSTATE_DYNAMIC_MASK;
5622 if (!mask) {
5623 return;
5626 * Just ignore bits that are not in CPUID[EAX=0xD,ECX=0].
5627 * ARCH_REQ_XCOMP_GUEST_PERM would fail, and QEMU has warned
5628 * about them already because they are not supported features.
5630 supported = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
5631 supported |= (uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32;
5632 mask &= supported;
5634 while (mask) {
5635 int bit = ctz64(mask);
5636 int rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, bit);
5637 if (rc) {
5639 * Older kernel version (<5.17) do not support
5640 * ARCH_REQ_XCOMP_GUEST_PERM, but also do not return
5641 * any dynamic feature from kvm_arch_get_supported_cpuid.
5643 warn_report("prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure "
5644 "for feature bit %d", bit);
5646 mask &= ~BIT_ULL(bit);
5650 static int kvm_arch_get_notify_vmexit(Object *obj, Error **errp)
5652 KVMState *s = KVM_STATE(obj);
5653 return s->notify_vmexit;
5656 static void kvm_arch_set_notify_vmexit(Object *obj, int value, Error **errp)
5658 KVMState *s = KVM_STATE(obj);
5660 if (s->fd != -1) {
5661 error_setg(errp, "Cannot set properties after the accelerator has been initialized");
5662 return;
5665 s->notify_vmexit = value;
5668 static void kvm_arch_get_notify_window(Object *obj, Visitor *v,
5669 const char *name, void *opaque,
5670 Error **errp)
5672 KVMState *s = KVM_STATE(obj);
5673 uint32_t value = s->notify_window;
5675 visit_type_uint32(v, name, &value, errp);
5678 static void kvm_arch_set_notify_window(Object *obj, Visitor *v,
5679 const char *name, void *opaque,
5680 Error **errp)
5682 KVMState *s = KVM_STATE(obj);
5683 Error *error = NULL;
5684 uint32_t value;
5686 if (s->fd != -1) {
5687 error_setg(errp, "Cannot set properties after the accelerator has been initialized");
5688 return;
5691 visit_type_uint32(v, name, &value, &error);
5692 if (error) {
5693 error_propagate(errp, error);
5694 return;
5697 s->notify_window = value;
5700 void kvm_arch_accel_class_init(ObjectClass *oc)
5702 object_class_property_add_enum(oc, "notify-vmexit", "NotifyVMexitOption",
5703 &NotifyVmexitOption_lookup,
5704 kvm_arch_get_notify_vmexit,
5705 kvm_arch_set_notify_vmexit);
5706 object_class_property_set_description(oc, "notify-vmexit",
5707 "Enable notify VM exit");
5709 object_class_property_add(oc, "notify-window", "uint32",
5710 kvm_arch_get_notify_window,
5711 kvm_arch_set_notify_window,
5712 NULL, NULL);
5713 object_class_property_set_description(oc, "notify-window",
5714 "Clock cycles without an event window "
5715 "after which a notification VM exit occurs");