target-microblaze: Add CPU versions 9.4, 9.5 and 9.6
[qemu/ar7.git] / target / i386 / kvm.c
blobee36502789ffc4741af03ce178cbb7a7baaa1ed9
1 /*
2 * QEMU KVM support
4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "qapi/error.h"
17 #include <sys/ioctl.h>
18 #include <sys/utsname.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_para.h>
23 #include "qemu-common.h"
24 #include "cpu.h"
25 #include "sysemu/sysemu.h"
26 #include "sysemu/hw_accel.h"
27 #include "sysemu/kvm_int.h"
28 #include "kvm_i386.h"
29 #include "hyperv.h"
31 #include "exec/gdbstub.h"
32 #include "qemu/host-utils.h"
33 #include "qemu/config-file.h"
34 #include "qemu/error-report.h"
35 #include "hw/i386/pc.h"
36 #include "hw/i386/apic.h"
37 #include "hw/i386/apic_internal.h"
38 #include "hw/i386/apic-msidef.h"
39 #include "hw/i386/intel_iommu.h"
40 #include "hw/i386/x86-iommu.h"
42 #include "exec/ioport.h"
43 #include "standard-headers/asm-x86/hyperv.h"
44 #include "hw/pci/pci.h"
45 #include "hw/pci/msi.h"
46 #include "hw/pci/msix.h"
47 #include "migration/blocker.h"
48 #include "exec/memattrs.h"
49 #include "trace.h"
51 //#define DEBUG_KVM
53 #ifdef DEBUG_KVM
54 #define DPRINTF(fmt, ...) \
55 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
56 #else
57 #define DPRINTF(fmt, ...) \
58 do { } while (0)
59 #endif
61 #define MSR_KVM_WALL_CLOCK 0x11
62 #define MSR_KVM_SYSTEM_TIME 0x12
64 /* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
65 * 255 kvm_msr_entry structs */
66 #define MSR_BUF_SIZE 4096
68 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
69 KVM_CAP_INFO(SET_TSS_ADDR),
70 KVM_CAP_INFO(EXT_CPUID),
71 KVM_CAP_INFO(MP_STATE),
72 KVM_CAP_LAST_INFO
75 static bool has_msr_star;
76 static bool has_msr_hsave_pa;
77 static bool has_msr_tsc_aux;
78 static bool has_msr_tsc_adjust;
79 static bool has_msr_tsc_deadline;
80 static bool has_msr_feature_control;
81 static bool has_msr_misc_enable;
82 static bool has_msr_smbase;
83 static bool has_msr_bndcfgs;
84 static int lm_capable_kernel;
85 static bool has_msr_hv_hypercall;
86 static bool has_msr_hv_crash;
87 static bool has_msr_hv_reset;
88 static bool has_msr_hv_vpindex;
89 static bool has_msr_hv_runtime;
90 static bool has_msr_hv_synic;
91 static bool has_msr_hv_stimer;
92 static bool has_msr_xss;
94 static bool has_msr_architectural_pmu;
95 static uint32_t num_architectural_pmu_counters;
97 static int has_xsave;
98 static int has_xcrs;
99 static int has_pit_state2;
101 static bool has_msr_mcg_ext_ctl;
103 static struct kvm_cpuid2 *cpuid_cache;
105 int kvm_has_pit_state2(void)
107 return has_pit_state2;
110 bool kvm_has_smm(void)
112 return kvm_check_extension(kvm_state, KVM_CAP_X86_SMM);
115 bool kvm_has_adjust_clock_stable(void)
117 int ret = kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK);
119 return (ret == KVM_CLOCK_TSC_STABLE);
122 bool kvm_allows_irq0_override(void)
124 return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
127 static bool kvm_x2apic_api_set_flags(uint64_t flags)
129 KVMState *s = KVM_STATE(current_machine->accelerator);
131 return !kvm_vm_enable_cap(s, KVM_CAP_X2APIC_API, 0, flags);
134 #define MEMORIZE(fn, _result) \
135 ({ \
136 static bool _memorized; \
138 if (_memorized) { \
139 return _result; \
141 _memorized = true; \
142 _result = fn; \
145 static bool has_x2apic_api;
147 bool kvm_has_x2apic_api(void)
149 return has_x2apic_api;
152 bool kvm_enable_x2apic(void)
154 return MEMORIZE(
155 kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS |
156 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK),
157 has_x2apic_api);
160 static int kvm_get_tsc(CPUState *cs)
162 X86CPU *cpu = X86_CPU(cs);
163 CPUX86State *env = &cpu->env;
164 struct {
165 struct kvm_msrs info;
166 struct kvm_msr_entry entries[1];
167 } msr_data;
168 int ret;
170 if (env->tsc_valid) {
171 return 0;
174 msr_data.info.nmsrs = 1;
175 msr_data.entries[0].index = MSR_IA32_TSC;
176 env->tsc_valid = !runstate_is_running();
178 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
179 if (ret < 0) {
180 return ret;
183 assert(ret == 1);
184 env->tsc = msr_data.entries[0].data;
185 return 0;
188 static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg)
190 kvm_get_tsc(cpu);
193 void kvm_synchronize_all_tsc(void)
195 CPUState *cpu;
197 if (kvm_enabled()) {
198 CPU_FOREACH(cpu) {
199 run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL);
204 static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
206 struct kvm_cpuid2 *cpuid;
207 int r, size;
209 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
210 cpuid = g_malloc0(size);
211 cpuid->nent = max;
212 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
213 if (r == 0 && cpuid->nent >= max) {
214 r = -E2BIG;
216 if (r < 0) {
217 if (r == -E2BIG) {
218 g_free(cpuid);
219 return NULL;
220 } else {
221 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
222 strerror(-r));
223 exit(1);
226 return cpuid;
229 /* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
230 * for all entries.
232 static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s)
234 struct kvm_cpuid2 *cpuid;
235 int max = 1;
237 if (cpuid_cache != NULL) {
238 return cpuid_cache;
240 while ((cpuid = try_get_cpuid(s, max)) == NULL) {
241 max *= 2;
243 cpuid_cache = cpuid;
244 return cpuid;
247 static const struct kvm_para_features {
248 int cap;
249 int feature;
250 } para_features[] = {
251 { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
252 { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
253 { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
254 { KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF },
257 static int get_para_features(KVMState *s)
259 int i, features = 0;
261 for (i = 0; i < ARRAY_SIZE(para_features); i++) {
262 if (kvm_check_extension(s, para_features[i].cap)) {
263 features |= (1 << para_features[i].feature);
267 return features;
270 static bool host_tsx_blacklisted(void)
272 int family, model, stepping;\
273 char vendor[CPUID_VENDOR_SZ + 1];
275 host_vendor_fms(vendor, &family, &model, &stepping);
277 /* Check if we are running on a Haswell host known to have broken TSX */
278 return !strcmp(vendor, CPUID_VENDOR_INTEL) &&
279 (family == 6) &&
280 ((model == 63 && stepping < 4) ||
281 model == 60 || model == 69 || model == 70);
284 /* Returns the value for a specific register on the cpuid entry
286 static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
288 uint32_t ret = 0;
289 switch (reg) {
290 case R_EAX:
291 ret = entry->eax;
292 break;
293 case R_EBX:
294 ret = entry->ebx;
295 break;
296 case R_ECX:
297 ret = entry->ecx;
298 break;
299 case R_EDX:
300 ret = entry->edx;
301 break;
303 return ret;
306 /* Find matching entry for function/index on kvm_cpuid2 struct
308 static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
309 uint32_t function,
310 uint32_t index)
312 int i;
313 for (i = 0; i < cpuid->nent; ++i) {
314 if (cpuid->entries[i].function == function &&
315 cpuid->entries[i].index == index) {
316 return &cpuid->entries[i];
319 /* not found: */
320 return NULL;
323 uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
324 uint32_t index, int reg)
326 struct kvm_cpuid2 *cpuid;
327 uint32_t ret = 0;
328 uint32_t cpuid_1_edx;
329 bool found = false;
331 cpuid = get_supported_cpuid(s);
333 struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index);
334 if (entry) {
335 found = true;
336 ret = cpuid_entry_get_reg(entry, reg);
339 /* Fixups for the data returned by KVM, below */
341 if (function == 1 && reg == R_EDX) {
342 /* KVM before 2.6.30 misreports the following features */
343 ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
344 } else if (function == 1 && reg == R_ECX) {
345 /* We can set the hypervisor flag, even if KVM does not return it on
346 * GET_SUPPORTED_CPUID
348 ret |= CPUID_EXT_HYPERVISOR;
349 /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
350 * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
351 * and the irqchip is in the kernel.
353 if (kvm_irqchip_in_kernel() &&
354 kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
355 ret |= CPUID_EXT_TSC_DEADLINE_TIMER;
358 /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
359 * without the in-kernel irqchip
361 if (!kvm_irqchip_in_kernel()) {
362 ret &= ~CPUID_EXT_X2APIC;
364 } else if (function == 6 && reg == R_EAX) {
365 ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */
366 } else if (function == 7 && index == 0 && reg == R_EBX) {
367 if (host_tsx_blacklisted()) {
368 ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE);
370 } else if (function == 0x80000001 && reg == R_EDX) {
371 /* On Intel, kvm returns cpuid according to the Intel spec,
372 * so add missing bits according to the AMD spec:
374 cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
375 ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
376 } else if (function == KVM_CPUID_FEATURES && reg == R_EAX) {
377 /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't
378 * be enabled without the in-kernel irqchip
380 if (!kvm_irqchip_in_kernel()) {
381 ret &= ~(1U << KVM_FEATURE_PV_UNHALT);
385 /* fallback for older kernels */
386 if ((function == KVM_CPUID_FEATURES) && !found) {
387 ret = get_para_features(s);
390 return ret;
393 typedef struct HWPoisonPage {
394 ram_addr_t ram_addr;
395 QLIST_ENTRY(HWPoisonPage) list;
396 } HWPoisonPage;
398 static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
399 QLIST_HEAD_INITIALIZER(hwpoison_page_list);
401 static void kvm_unpoison_all(void *param)
403 HWPoisonPage *page, *next_page;
405 QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
406 QLIST_REMOVE(page, list);
407 qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
408 g_free(page);
412 static void kvm_hwpoison_page_add(ram_addr_t ram_addr)
414 HWPoisonPage *page;
416 QLIST_FOREACH(page, &hwpoison_page_list, list) {
417 if (page->ram_addr == ram_addr) {
418 return;
421 page = g_new(HWPoisonPage, 1);
422 page->ram_addr = ram_addr;
423 QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
426 static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
427 int *max_banks)
429 int r;
431 r = kvm_check_extension(s, KVM_CAP_MCE);
432 if (r > 0) {
433 *max_banks = r;
434 return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
436 return -ENOSYS;
439 static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
441 CPUState *cs = CPU(cpu);
442 CPUX86State *env = &cpu->env;
443 uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
444 MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
445 uint64_t mcg_status = MCG_STATUS_MCIP;
446 int flags = 0;
448 if (code == BUS_MCEERR_AR) {
449 status |= MCI_STATUS_AR | 0x134;
450 mcg_status |= MCG_STATUS_EIPV;
451 } else {
452 status |= 0xc0;
453 mcg_status |= MCG_STATUS_RIPV;
456 flags = cpu_x86_support_mca_broadcast(env) ? MCE_INJECT_BROADCAST : 0;
457 /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the
458 * guest kernel back into env->mcg_ext_ctl.
460 cpu_synchronize_state(cs);
461 if (env->mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN) {
462 mcg_status |= MCG_STATUS_LMCE;
463 flags = 0;
466 cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
467 (MCM_ADDR_PHYS << 6) | 0xc, flags);
470 static void hardware_memory_error(void)
472 fprintf(stderr, "Hardware memory error!\n");
473 exit(1);
476 void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
478 X86CPU *cpu = X86_CPU(c);
479 CPUX86State *env = &cpu->env;
480 ram_addr_t ram_addr;
481 hwaddr paddr;
483 /* If we get an action required MCE, it has been injected by KVM
484 * while the VM was running. An action optional MCE instead should
485 * be coming from the main thread, which qemu_init_sigbus identifies
486 * as the "early kill" thread.
488 assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
490 if ((env->mcg_cap & MCG_SER_P) && addr) {
491 ram_addr = qemu_ram_addr_from_host(addr);
492 if (ram_addr != RAM_ADDR_INVALID &&
493 kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
494 kvm_hwpoison_page_add(ram_addr);
495 kvm_mce_inject(cpu, paddr, code);
496 return;
499 fprintf(stderr, "Hardware memory error for memory used by "
500 "QEMU itself instead of guest system!\n");
503 if (code == BUS_MCEERR_AR) {
504 hardware_memory_error();
507 /* Hope we are lucky for AO MCE */
510 static int kvm_inject_mce_oldstyle(X86CPU *cpu)
512 CPUX86State *env = &cpu->env;
514 if (!kvm_has_vcpu_events() && env->exception_injected == EXCP12_MCHK) {
515 unsigned int bank, bank_num = env->mcg_cap & 0xff;
516 struct kvm_x86_mce mce;
518 env->exception_injected = -1;
521 * There must be at least one bank in use if an MCE is pending.
522 * Find it and use its values for the event injection.
524 for (bank = 0; bank < bank_num; bank++) {
525 if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
526 break;
529 assert(bank < bank_num);
531 mce.bank = bank;
532 mce.status = env->mce_banks[bank * 4 + 1];
533 mce.mcg_status = env->mcg_status;
534 mce.addr = env->mce_banks[bank * 4 + 2];
535 mce.misc = env->mce_banks[bank * 4 + 3];
537 return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce);
539 return 0;
542 static void cpu_update_state(void *opaque, int running, RunState state)
544 CPUX86State *env = opaque;
546 if (running) {
547 env->tsc_valid = false;
551 unsigned long kvm_arch_vcpu_id(CPUState *cs)
553 X86CPU *cpu = X86_CPU(cs);
554 return cpu->apic_id;
557 #ifndef KVM_CPUID_SIGNATURE_NEXT
558 #define KVM_CPUID_SIGNATURE_NEXT 0x40000100
559 #endif
561 static bool hyperv_hypercall_available(X86CPU *cpu)
563 return cpu->hyperv_vapic ||
564 (cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY);
567 static bool hyperv_enabled(X86CPU *cpu)
569 CPUState *cs = CPU(cpu);
570 return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 &&
571 (hyperv_hypercall_available(cpu) ||
572 cpu->hyperv_time ||
573 cpu->hyperv_relaxed_timing ||
574 cpu->hyperv_crash ||
575 cpu->hyperv_reset ||
576 cpu->hyperv_vpindex ||
577 cpu->hyperv_runtime ||
578 cpu->hyperv_synic ||
579 cpu->hyperv_stimer);
582 static int kvm_arch_set_tsc_khz(CPUState *cs)
584 X86CPU *cpu = X86_CPU(cs);
585 CPUX86State *env = &cpu->env;
586 int r;
588 if (!env->tsc_khz) {
589 return 0;
592 r = kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL) ?
593 kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) :
594 -ENOTSUP;
595 if (r < 0) {
596 /* When KVM_SET_TSC_KHZ fails, it's an error only if the current
597 * TSC frequency doesn't match the one we want.
599 int cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
600 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
601 -ENOTSUP;
602 if (cur_freq <= 0 || cur_freq != env->tsc_khz) {
603 error_report("warning: TSC frequency mismatch between "
604 "VM (%" PRId64 " kHz) and host (%d kHz), "
605 "and TSC scaling unavailable",
606 env->tsc_khz, cur_freq);
607 return r;
611 return 0;
614 static int hyperv_handle_properties(CPUState *cs)
616 X86CPU *cpu = X86_CPU(cs);
617 CPUX86State *env = &cpu->env;
619 if (cpu->hyperv_time &&
620 kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) <= 0) {
621 cpu->hyperv_time = false;
624 if (cpu->hyperv_relaxed_timing) {
625 env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
627 if (cpu->hyperv_vapic) {
628 env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
629 env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
631 if (cpu->hyperv_time) {
632 env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
633 env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_TIME_REF_COUNT_AVAILABLE;
634 env->features[FEAT_HYPERV_EAX] |= 0x200;
636 if (cpu->hyperv_crash && has_msr_hv_crash) {
637 env->features[FEAT_HYPERV_EDX] |= HV_X64_GUEST_CRASH_MSR_AVAILABLE;
639 env->features[FEAT_HYPERV_EDX] |= HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
640 if (cpu->hyperv_reset && has_msr_hv_reset) {
641 env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_RESET_AVAILABLE;
643 if (cpu->hyperv_vpindex && has_msr_hv_vpindex) {
644 env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_VP_INDEX_AVAILABLE;
646 if (cpu->hyperv_runtime && has_msr_hv_runtime) {
647 env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_VP_RUNTIME_AVAILABLE;
649 if (cpu->hyperv_synic) {
650 int sint;
652 if (!has_msr_hv_synic ||
653 kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_SYNIC, 0)) {
654 fprintf(stderr, "Hyper-V SynIC is not supported by kernel\n");
655 return -ENOSYS;
658 env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_SYNIC_AVAILABLE;
659 env->msr_hv_synic_version = HV_SYNIC_VERSION_1;
660 for (sint = 0; sint < ARRAY_SIZE(env->msr_hv_synic_sint); sint++) {
661 env->msr_hv_synic_sint[sint] = HV_SYNIC_SINT_MASKED;
664 if (cpu->hyperv_stimer) {
665 if (!has_msr_hv_stimer) {
666 fprintf(stderr, "Hyper-V timers aren't supported by kernel\n");
667 return -ENOSYS;
669 env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_SYNTIMER_AVAILABLE;
671 return 0;
674 static Error *invtsc_mig_blocker;
676 #define KVM_MAX_CPUID_ENTRIES 100
678 int kvm_arch_init_vcpu(CPUState *cs)
680 struct {
681 struct kvm_cpuid2 cpuid;
682 struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
683 } QEMU_PACKED cpuid_data;
684 X86CPU *cpu = X86_CPU(cs);
685 CPUX86State *env = &cpu->env;
686 uint32_t limit, i, j, cpuid_i;
687 uint32_t unused;
688 struct kvm_cpuid_entry2 *c;
689 uint32_t signature[3];
690 int kvm_base = KVM_CPUID_SIGNATURE;
691 int r;
692 Error *local_err = NULL;
694 memset(&cpuid_data, 0, sizeof(cpuid_data));
696 cpuid_i = 0;
698 /* Paravirtualization CPUIDs */
699 if (hyperv_enabled(cpu)) {
700 c = &cpuid_data.entries[cpuid_i++];
701 c->function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
702 if (!cpu->hyperv_vendor_id) {
703 memcpy(signature, "Microsoft Hv", 12);
704 } else {
705 size_t len = strlen(cpu->hyperv_vendor_id);
707 if (len > 12) {
708 error_report("hv-vendor-id truncated to 12 characters");
709 len = 12;
711 memset(signature, 0, 12);
712 memcpy(signature, cpu->hyperv_vendor_id, len);
714 c->eax = HYPERV_CPUID_MIN;
715 c->ebx = signature[0];
716 c->ecx = signature[1];
717 c->edx = signature[2];
719 c = &cpuid_data.entries[cpuid_i++];
720 c->function = HYPERV_CPUID_INTERFACE;
721 memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
722 c->eax = signature[0];
723 c->ebx = 0;
724 c->ecx = 0;
725 c->edx = 0;
727 c = &cpuid_data.entries[cpuid_i++];
728 c->function = HYPERV_CPUID_VERSION;
729 c->eax = 0x00001bbc;
730 c->ebx = 0x00060001;
732 c = &cpuid_data.entries[cpuid_i++];
733 c->function = HYPERV_CPUID_FEATURES;
734 r = hyperv_handle_properties(cs);
735 if (r) {
736 return r;
738 c->eax = env->features[FEAT_HYPERV_EAX];
739 c->ebx = env->features[FEAT_HYPERV_EBX];
740 c->edx = env->features[FEAT_HYPERV_EDX];
742 c = &cpuid_data.entries[cpuid_i++];
743 c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;
744 if (cpu->hyperv_relaxed_timing) {
745 c->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
747 if (cpu->hyperv_vapic) {
748 c->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
750 c->ebx = cpu->hyperv_spinlock_attempts;
752 c = &cpuid_data.entries[cpuid_i++];
753 c->function = HYPERV_CPUID_IMPLEMENT_LIMITS;
754 c->eax = 0x40;
755 c->ebx = 0x40;
757 kvm_base = KVM_CPUID_SIGNATURE_NEXT;
758 has_msr_hv_hypercall = true;
761 if (cpu->expose_kvm) {
762 memcpy(signature, "KVMKVMKVM\0\0\0", 12);
763 c = &cpuid_data.entries[cpuid_i++];
764 c->function = KVM_CPUID_SIGNATURE | kvm_base;
765 c->eax = KVM_CPUID_FEATURES | kvm_base;
766 c->ebx = signature[0];
767 c->ecx = signature[1];
768 c->edx = signature[2];
770 c = &cpuid_data.entries[cpuid_i++];
771 c->function = KVM_CPUID_FEATURES | kvm_base;
772 c->eax = env->features[FEAT_KVM];
775 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
777 for (i = 0; i <= limit; i++) {
778 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
779 fprintf(stderr, "unsupported level value: 0x%x\n", limit);
780 abort();
782 c = &cpuid_data.entries[cpuid_i++];
784 switch (i) {
785 case 2: {
786 /* Keep reading function 2 till all the input is received */
787 int times;
789 c->function = i;
790 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
791 KVM_CPUID_FLAG_STATE_READ_NEXT;
792 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
793 times = c->eax & 0xff;
795 for (j = 1; j < times; ++j) {
796 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
797 fprintf(stderr, "cpuid_data is full, no space for "
798 "cpuid(eax:2):eax & 0xf = 0x%x\n", times);
799 abort();
801 c = &cpuid_data.entries[cpuid_i++];
802 c->function = i;
803 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
804 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
806 break;
808 case 4:
809 case 0xb:
810 case 0xd:
811 for (j = 0; ; j++) {
812 if (i == 0xd && j == 64) {
813 break;
815 c->function = i;
816 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
817 c->index = j;
818 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
820 if (i == 4 && c->eax == 0) {
821 break;
823 if (i == 0xb && !(c->ecx & 0xff00)) {
824 break;
826 if (i == 0xd && c->eax == 0) {
827 continue;
829 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
830 fprintf(stderr, "cpuid_data is full, no space for "
831 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
832 abort();
834 c = &cpuid_data.entries[cpuid_i++];
836 break;
837 default:
838 c->function = i;
839 c->flags = 0;
840 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
841 break;
845 if (limit >= 0x0a) {
846 uint32_t ver;
848 cpu_x86_cpuid(env, 0x0a, 0, &ver, &unused, &unused, &unused);
849 if ((ver & 0xff) > 0) {
850 has_msr_architectural_pmu = true;
851 num_architectural_pmu_counters = (ver & 0xff00) >> 8;
853 /* Shouldn't be more than 32, since that's the number of bits
854 * available in EBX to tell us _which_ counters are available.
855 * Play it safe.
857 if (num_architectural_pmu_counters > MAX_GP_COUNTERS) {
858 num_architectural_pmu_counters = MAX_GP_COUNTERS;
863 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
865 for (i = 0x80000000; i <= limit; i++) {
866 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
867 fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
868 abort();
870 c = &cpuid_data.entries[cpuid_i++];
872 c->function = i;
873 c->flags = 0;
874 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
877 /* Call Centaur's CPUID instructions they are supported. */
878 if (env->cpuid_xlevel2 > 0) {
879 cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
881 for (i = 0xC0000000; i <= limit; i++) {
882 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
883 fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
884 abort();
886 c = &cpuid_data.entries[cpuid_i++];
888 c->function = i;
889 c->flags = 0;
890 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
894 cpuid_data.cpuid.nent = cpuid_i;
896 if (((env->cpuid_version >> 8)&0xF) >= 6
897 && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
898 (CPUID_MCE | CPUID_MCA)
899 && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) {
900 uint64_t mcg_cap, unsupported_caps;
901 int banks;
902 int ret;
904 ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks);
905 if (ret < 0) {
906 fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
907 return ret;
910 if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) {
911 error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
912 (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks);
913 return -ENOTSUP;
916 unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK);
917 if (unsupported_caps) {
918 if (unsupported_caps & MCG_LMCE_P) {
919 error_report("kvm: LMCE not supported");
920 return -ENOTSUP;
922 error_report("warning: Unsupported MCG_CAP bits: 0x%" PRIx64,
923 unsupported_caps);
926 env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK;
927 ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap);
928 if (ret < 0) {
929 fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
930 return ret;
934 qemu_add_vm_change_state_handler(cpu_update_state, env);
936 c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0);
937 if (c) {
938 has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) ||
939 !!(c->ecx & CPUID_EXT_SMX);
942 if (env->mcg_cap & MCG_LMCE_P) {
943 has_msr_mcg_ext_ctl = has_msr_feature_control = true;
946 if (!env->user_tsc_khz) {
947 if ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) &&
948 invtsc_mig_blocker == NULL) {
949 /* for migration */
950 error_setg(&invtsc_mig_blocker,
951 "State blocked by non-migratable CPU device"
952 " (invtsc flag)");
953 r = migrate_add_blocker(invtsc_mig_blocker, &local_err);
954 if (local_err) {
955 error_report_err(local_err);
956 error_free(invtsc_mig_blocker);
957 goto fail;
959 /* for savevm */
960 vmstate_x86_cpu.unmigratable = 1;
964 r = kvm_arch_set_tsc_khz(cs);
965 if (r < 0) {
966 goto fail;
969 /* vcpu's TSC frequency is either specified by user, or following
970 * the value used by KVM if the former is not present. In the
971 * latter case, we query it from KVM and record in env->tsc_khz,
972 * so that vcpu's TSC frequency can be migrated later via this field.
974 if (!env->tsc_khz) {
975 r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
976 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
977 -ENOTSUP;
978 if (r > 0) {
979 env->tsc_khz = r;
983 if (cpu->vmware_cpuid_freq
984 /* Guests depend on 0x40000000 to detect this feature, so only expose
985 * it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */
986 && cpu->expose_kvm
987 && kvm_base == KVM_CPUID_SIGNATURE
988 /* TSC clock must be stable and known for this feature. */
989 && ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC)
990 || env->user_tsc_khz != 0)
991 && env->tsc_khz != 0) {
993 c = &cpuid_data.entries[cpuid_i++];
994 c->function = KVM_CPUID_SIGNATURE | 0x10;
995 c->eax = env->tsc_khz;
996 /* LAPIC resolution of 1ns (freq: 1GHz) is hardcoded in KVM's
997 * APIC_BUS_CYCLE_NS */
998 c->ebx = 1000000;
999 c->ecx = c->edx = 0;
1001 c = cpuid_find_entry(&cpuid_data.cpuid, kvm_base, 0);
1002 c->eax = MAX(c->eax, KVM_CPUID_SIGNATURE | 0x10);
1005 cpuid_data.cpuid.nent = cpuid_i;
1007 cpuid_data.cpuid.padding = 0;
1008 r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
1009 if (r) {
1010 goto fail;
1013 if (has_xsave) {
1014 env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
1016 cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE);
1018 if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
1019 has_msr_tsc_aux = false;
1022 return 0;
1024 fail:
1025 migrate_del_blocker(invtsc_mig_blocker);
1026 return r;
1029 void kvm_arch_reset_vcpu(X86CPU *cpu)
1031 CPUX86State *env = &cpu->env;
1033 env->exception_injected = -1;
1034 env->interrupt_injected = -1;
1035 env->xcr0 = 1;
1036 if (kvm_irqchip_in_kernel()) {
1037 env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
1038 KVM_MP_STATE_UNINITIALIZED;
1039 } else {
1040 env->mp_state = KVM_MP_STATE_RUNNABLE;
1044 void kvm_arch_do_init_vcpu(X86CPU *cpu)
1046 CPUX86State *env = &cpu->env;
1048 /* APs get directly into wait-for-SIPI state. */
1049 if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) {
1050 env->mp_state = KVM_MP_STATE_INIT_RECEIVED;
1054 static int kvm_get_supported_msrs(KVMState *s)
1056 static int kvm_supported_msrs;
1057 int ret = 0;
1059 /* first time */
1060 if (kvm_supported_msrs == 0) {
1061 struct kvm_msr_list msr_list, *kvm_msr_list;
1063 kvm_supported_msrs = -1;
1065 /* Obtain MSR list from KVM. These are the MSRs that we must
1066 * save/restore */
1067 msr_list.nmsrs = 0;
1068 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
1069 if (ret < 0 && ret != -E2BIG) {
1070 return ret;
1072 /* Old kernel modules had a bug and could write beyond the provided
1073 memory. Allocate at least a safe amount of 1K. */
1074 kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
1075 msr_list.nmsrs *
1076 sizeof(msr_list.indices[0])));
1078 kvm_msr_list->nmsrs = msr_list.nmsrs;
1079 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
1080 if (ret >= 0) {
1081 int i;
1083 for (i = 0; i < kvm_msr_list->nmsrs; i++) {
1084 if (kvm_msr_list->indices[i] == MSR_STAR) {
1085 has_msr_star = true;
1086 continue;
1088 if (kvm_msr_list->indices[i] == MSR_VM_HSAVE_PA) {
1089 has_msr_hsave_pa = true;
1090 continue;
1092 if (kvm_msr_list->indices[i] == MSR_TSC_AUX) {
1093 has_msr_tsc_aux = true;
1094 continue;
1096 if (kvm_msr_list->indices[i] == MSR_TSC_ADJUST) {
1097 has_msr_tsc_adjust = true;
1098 continue;
1100 if (kvm_msr_list->indices[i] == MSR_IA32_TSCDEADLINE) {
1101 has_msr_tsc_deadline = true;
1102 continue;
1104 if (kvm_msr_list->indices[i] == MSR_IA32_SMBASE) {
1105 has_msr_smbase = true;
1106 continue;
1108 if (kvm_msr_list->indices[i] == MSR_IA32_MISC_ENABLE) {
1109 has_msr_misc_enable = true;
1110 continue;
1112 if (kvm_msr_list->indices[i] == MSR_IA32_BNDCFGS) {
1113 has_msr_bndcfgs = true;
1114 continue;
1116 if (kvm_msr_list->indices[i] == MSR_IA32_XSS) {
1117 has_msr_xss = true;
1118 continue;
1120 if (kvm_msr_list->indices[i] == HV_X64_MSR_CRASH_CTL) {
1121 has_msr_hv_crash = true;
1122 continue;
1124 if (kvm_msr_list->indices[i] == HV_X64_MSR_RESET) {
1125 has_msr_hv_reset = true;
1126 continue;
1128 if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_INDEX) {
1129 has_msr_hv_vpindex = true;
1130 continue;
1132 if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_RUNTIME) {
1133 has_msr_hv_runtime = true;
1134 continue;
1136 if (kvm_msr_list->indices[i] == HV_X64_MSR_SCONTROL) {
1137 has_msr_hv_synic = true;
1138 continue;
1140 if (kvm_msr_list->indices[i] == HV_X64_MSR_STIMER0_CONFIG) {
1141 has_msr_hv_stimer = true;
1142 continue;
1147 g_free(kvm_msr_list);
1150 return ret;
1153 static Notifier smram_machine_done;
1154 static KVMMemoryListener smram_listener;
1155 static AddressSpace smram_address_space;
1156 static MemoryRegion smram_as_root;
1157 static MemoryRegion smram_as_mem;
1159 static void register_smram_listener(Notifier *n, void *unused)
1161 MemoryRegion *smram =
1162 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
1164 /* Outer container... */
1165 memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull);
1166 memory_region_set_enabled(&smram_as_root, true);
1168 /* ... with two regions inside: normal system memory with low
1169 * priority, and...
1171 memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram",
1172 get_system_memory(), 0, ~0ull);
1173 memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0);
1174 memory_region_set_enabled(&smram_as_mem, true);
1176 if (smram) {
1177 /* ... SMRAM with higher priority */
1178 memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10);
1179 memory_region_set_enabled(smram, true);
1182 address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM");
1183 kvm_memory_listener_register(kvm_state, &smram_listener,
1184 &smram_address_space, 1);
1187 int kvm_arch_init(MachineState *ms, KVMState *s)
1189 uint64_t identity_base = 0xfffbc000;
1190 uint64_t shadow_mem;
1191 int ret;
1192 struct utsname utsname;
1194 #ifdef KVM_CAP_XSAVE
1195 has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
1196 #endif
1198 #ifdef KVM_CAP_XCRS
1199 has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
1200 #endif
1202 #ifdef KVM_CAP_PIT_STATE2
1203 has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
1204 #endif
1206 ret = kvm_get_supported_msrs(s);
1207 if (ret < 0) {
1208 return ret;
1211 uname(&utsname);
1212 lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
1215 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
1216 * In order to use vm86 mode, an EPT identity map and a TSS are needed.
1217 * Since these must be part of guest physical memory, we need to allocate
1218 * them, both by setting their start addresses in the kernel and by
1219 * creating a corresponding e820 entry. We need 4 pages before the BIOS.
1221 * Older KVM versions may not support setting the identity map base. In
1222 * that case we need to stick with the default, i.e. a 256K maximum BIOS
1223 * size.
1225 if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
1226 /* Allows up to 16M BIOSes. */
1227 identity_base = 0xfeffc000;
1229 ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
1230 if (ret < 0) {
1231 return ret;
1235 /* Set TSS base one page after EPT identity map. */
1236 ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
1237 if (ret < 0) {
1238 return ret;
1241 /* Tell fw_cfg to notify the BIOS to reserve the range. */
1242 ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
1243 if (ret < 0) {
1244 fprintf(stderr, "e820_add_entry() table is full\n");
1245 return ret;
1247 qemu_register_reset(kvm_unpoison_all, NULL);
1249 shadow_mem = machine_kvm_shadow_mem(ms);
1250 if (shadow_mem != -1) {
1251 shadow_mem /= 4096;
1252 ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
1253 if (ret < 0) {
1254 return ret;
1258 if (kvm_check_extension(s, KVM_CAP_X86_SMM) &&
1259 object_dynamic_cast(OBJECT(ms), TYPE_PC_MACHINE) &&
1260 pc_machine_is_smm_enabled(PC_MACHINE(ms))) {
1261 smram_machine_done.notify = register_smram_listener;
1262 qemu_add_machine_init_done_notifier(&smram_machine_done);
1264 return 0;
1267 static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
1269 lhs->selector = rhs->selector;
1270 lhs->base = rhs->base;
1271 lhs->limit = rhs->limit;
1272 lhs->type = 3;
1273 lhs->present = 1;
1274 lhs->dpl = 3;
1275 lhs->db = 0;
1276 lhs->s = 1;
1277 lhs->l = 0;
1278 lhs->g = 0;
1279 lhs->avl = 0;
1280 lhs->unusable = 0;
1283 static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
1285 unsigned flags = rhs->flags;
1286 lhs->selector = rhs->selector;
1287 lhs->base = rhs->base;
1288 lhs->limit = rhs->limit;
1289 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
1290 lhs->present = (flags & DESC_P_MASK) != 0;
1291 lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
1292 lhs->db = (flags >> DESC_B_SHIFT) & 1;
1293 lhs->s = (flags & DESC_S_MASK) != 0;
1294 lhs->l = (flags >> DESC_L_SHIFT) & 1;
1295 lhs->g = (flags & DESC_G_MASK) != 0;
1296 lhs->avl = (flags & DESC_AVL_MASK) != 0;
1297 lhs->unusable = !lhs->present;
1298 lhs->padding = 0;
1301 static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
1303 lhs->selector = rhs->selector;
1304 lhs->base = rhs->base;
1305 lhs->limit = rhs->limit;
1306 lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
1307 ((rhs->present && !rhs->unusable) * DESC_P_MASK) |
1308 (rhs->dpl << DESC_DPL_SHIFT) |
1309 (rhs->db << DESC_B_SHIFT) |
1310 (rhs->s * DESC_S_MASK) |
1311 (rhs->l << DESC_L_SHIFT) |
1312 (rhs->g * DESC_G_MASK) |
1313 (rhs->avl * DESC_AVL_MASK);
1316 static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
1318 if (set) {
1319 *kvm_reg = *qemu_reg;
1320 } else {
1321 *qemu_reg = *kvm_reg;
1325 static int kvm_getput_regs(X86CPU *cpu, int set)
1327 CPUX86State *env = &cpu->env;
1328 struct kvm_regs regs;
1329 int ret = 0;
1331 if (!set) {
1332 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, &regs);
1333 if (ret < 0) {
1334 return ret;
1338 kvm_getput_reg(&regs.rax, &env->regs[R_EAX], set);
1339 kvm_getput_reg(&regs.rbx, &env->regs[R_EBX], set);
1340 kvm_getput_reg(&regs.rcx, &env->regs[R_ECX], set);
1341 kvm_getput_reg(&regs.rdx, &env->regs[R_EDX], set);
1342 kvm_getput_reg(&regs.rsi, &env->regs[R_ESI], set);
1343 kvm_getput_reg(&regs.rdi, &env->regs[R_EDI], set);
1344 kvm_getput_reg(&regs.rsp, &env->regs[R_ESP], set);
1345 kvm_getput_reg(&regs.rbp, &env->regs[R_EBP], set);
1346 #ifdef TARGET_X86_64
1347 kvm_getput_reg(&regs.r8, &env->regs[8], set);
1348 kvm_getput_reg(&regs.r9, &env->regs[9], set);
1349 kvm_getput_reg(&regs.r10, &env->regs[10], set);
1350 kvm_getput_reg(&regs.r11, &env->regs[11], set);
1351 kvm_getput_reg(&regs.r12, &env->regs[12], set);
1352 kvm_getput_reg(&regs.r13, &env->regs[13], set);
1353 kvm_getput_reg(&regs.r14, &env->regs[14], set);
1354 kvm_getput_reg(&regs.r15, &env->regs[15], set);
1355 #endif
1357 kvm_getput_reg(&regs.rflags, &env->eflags, set);
1358 kvm_getput_reg(&regs.rip, &env->eip, set);
1360 if (set) {
1361 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, &regs);
1364 return ret;
1367 static int kvm_put_fpu(X86CPU *cpu)
1369 CPUX86State *env = &cpu->env;
1370 struct kvm_fpu fpu;
1371 int i;
1373 memset(&fpu, 0, sizeof fpu);
1374 fpu.fsw = env->fpus & ~(7 << 11);
1375 fpu.fsw |= (env->fpstt & 7) << 11;
1376 fpu.fcw = env->fpuc;
1377 fpu.last_opcode = env->fpop;
1378 fpu.last_ip = env->fpip;
1379 fpu.last_dp = env->fpdp;
1380 for (i = 0; i < 8; ++i) {
1381 fpu.ftwx |= (!env->fptags[i]) << i;
1383 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
1384 for (i = 0; i < CPU_NB_REGS; i++) {
1385 stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0));
1386 stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1));
1388 fpu.mxcsr = env->mxcsr;
1390 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu);
1393 #define XSAVE_FCW_FSW 0
1394 #define XSAVE_FTW_FOP 1
1395 #define XSAVE_CWD_RIP 2
1396 #define XSAVE_CWD_RDP 4
1397 #define XSAVE_MXCSR 6
1398 #define XSAVE_ST_SPACE 8
1399 #define XSAVE_XMM_SPACE 40
1400 #define XSAVE_XSTATE_BV 128
1401 #define XSAVE_YMMH_SPACE 144
1402 #define XSAVE_BNDREGS 240
1403 #define XSAVE_BNDCSR 256
1404 #define XSAVE_OPMASK 272
1405 #define XSAVE_ZMM_Hi256 288
1406 #define XSAVE_Hi16_ZMM 416
1407 #define XSAVE_PKRU 672
1409 #define XSAVE_BYTE_OFFSET(word_offset) \
1410 ((word_offset) * sizeof(((struct kvm_xsave *)0)->region[0]))
1412 #define ASSERT_OFFSET(word_offset, field) \
1413 QEMU_BUILD_BUG_ON(XSAVE_BYTE_OFFSET(word_offset) != \
1414 offsetof(X86XSaveArea, field))
1416 ASSERT_OFFSET(XSAVE_FCW_FSW, legacy.fcw);
1417 ASSERT_OFFSET(XSAVE_FTW_FOP, legacy.ftw);
1418 ASSERT_OFFSET(XSAVE_CWD_RIP, legacy.fpip);
1419 ASSERT_OFFSET(XSAVE_CWD_RDP, legacy.fpdp);
1420 ASSERT_OFFSET(XSAVE_MXCSR, legacy.mxcsr);
1421 ASSERT_OFFSET(XSAVE_ST_SPACE, legacy.fpregs);
1422 ASSERT_OFFSET(XSAVE_XMM_SPACE, legacy.xmm_regs);
1423 ASSERT_OFFSET(XSAVE_XSTATE_BV, header.xstate_bv);
1424 ASSERT_OFFSET(XSAVE_YMMH_SPACE, avx_state);
1425 ASSERT_OFFSET(XSAVE_BNDREGS, bndreg_state);
1426 ASSERT_OFFSET(XSAVE_BNDCSR, bndcsr_state);
1427 ASSERT_OFFSET(XSAVE_OPMASK, opmask_state);
1428 ASSERT_OFFSET(XSAVE_ZMM_Hi256, zmm_hi256_state);
1429 ASSERT_OFFSET(XSAVE_Hi16_ZMM, hi16_zmm_state);
1430 ASSERT_OFFSET(XSAVE_PKRU, pkru_state);
1432 static int kvm_put_xsave(X86CPU *cpu)
1434 CPUX86State *env = &cpu->env;
1435 X86XSaveArea *xsave = env->kvm_xsave_buf;
1436 uint16_t cwd, swd, twd;
1437 int i;
1439 if (!has_xsave) {
1440 return kvm_put_fpu(cpu);
1443 memset(xsave, 0, sizeof(struct kvm_xsave));
1444 twd = 0;
1445 swd = env->fpus & ~(7 << 11);
1446 swd |= (env->fpstt & 7) << 11;
1447 cwd = env->fpuc;
1448 for (i = 0; i < 8; ++i) {
1449 twd |= (!env->fptags[i]) << i;
1451 xsave->legacy.fcw = cwd;
1452 xsave->legacy.fsw = swd;
1453 xsave->legacy.ftw = twd;
1454 xsave->legacy.fpop = env->fpop;
1455 xsave->legacy.fpip = env->fpip;
1456 xsave->legacy.fpdp = env->fpdp;
1457 memcpy(&xsave->legacy.fpregs, env->fpregs,
1458 sizeof env->fpregs);
1459 xsave->legacy.mxcsr = env->mxcsr;
1460 xsave->header.xstate_bv = env->xstate_bv;
1461 memcpy(&xsave->bndreg_state.bnd_regs, env->bnd_regs,
1462 sizeof env->bnd_regs);
1463 xsave->bndcsr_state.bndcsr = env->bndcs_regs;
1464 memcpy(&xsave->opmask_state.opmask_regs, env->opmask_regs,
1465 sizeof env->opmask_regs);
1467 for (i = 0; i < CPU_NB_REGS; i++) {
1468 uint8_t *xmm = xsave->legacy.xmm_regs[i];
1469 uint8_t *ymmh = xsave->avx_state.ymmh[i];
1470 uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
1471 stq_p(xmm, env->xmm_regs[i].ZMM_Q(0));
1472 stq_p(xmm+8, env->xmm_regs[i].ZMM_Q(1));
1473 stq_p(ymmh, env->xmm_regs[i].ZMM_Q(2));
1474 stq_p(ymmh+8, env->xmm_regs[i].ZMM_Q(3));
1475 stq_p(zmmh, env->xmm_regs[i].ZMM_Q(4));
1476 stq_p(zmmh+8, env->xmm_regs[i].ZMM_Q(5));
1477 stq_p(zmmh+16, env->xmm_regs[i].ZMM_Q(6));
1478 stq_p(zmmh+24, env->xmm_regs[i].ZMM_Q(7));
1481 #ifdef TARGET_X86_64
1482 memcpy(&xsave->hi16_zmm_state.hi16_zmm, &env->xmm_regs[16],
1483 16 * sizeof env->xmm_regs[16]);
1484 memcpy(&xsave->pkru_state, &env->pkru, sizeof env->pkru);
1485 #endif
1486 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
1489 static int kvm_put_xcrs(X86CPU *cpu)
1491 CPUX86State *env = &cpu->env;
1492 struct kvm_xcrs xcrs = {};
1494 if (!has_xcrs) {
1495 return 0;
1498 xcrs.nr_xcrs = 1;
1499 xcrs.flags = 0;
1500 xcrs.xcrs[0].xcr = 0;
1501 xcrs.xcrs[0].value = env->xcr0;
1502 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs);
1505 static int kvm_put_sregs(X86CPU *cpu)
1507 CPUX86State *env = &cpu->env;
1508 struct kvm_sregs sregs;
1510 memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
1511 if (env->interrupt_injected >= 0) {
1512 sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
1513 (uint64_t)1 << (env->interrupt_injected % 64);
1516 if ((env->eflags & VM_MASK)) {
1517 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
1518 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
1519 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
1520 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
1521 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
1522 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
1523 } else {
1524 set_seg(&sregs.cs, &env->segs[R_CS]);
1525 set_seg(&sregs.ds, &env->segs[R_DS]);
1526 set_seg(&sregs.es, &env->segs[R_ES]);
1527 set_seg(&sregs.fs, &env->segs[R_FS]);
1528 set_seg(&sregs.gs, &env->segs[R_GS]);
1529 set_seg(&sregs.ss, &env->segs[R_SS]);
1532 set_seg(&sregs.tr, &env->tr);
1533 set_seg(&sregs.ldt, &env->ldt);
1535 sregs.idt.limit = env->idt.limit;
1536 sregs.idt.base = env->idt.base;
1537 memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
1538 sregs.gdt.limit = env->gdt.limit;
1539 sregs.gdt.base = env->gdt.base;
1540 memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
1542 sregs.cr0 = env->cr[0];
1543 sregs.cr2 = env->cr[2];
1544 sregs.cr3 = env->cr[3];
1545 sregs.cr4 = env->cr[4];
1547 sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
1548 sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
1550 sregs.efer = env->efer;
1552 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
1555 static void kvm_msr_buf_reset(X86CPU *cpu)
1557 memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE);
1560 static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value)
1562 struct kvm_msrs *msrs = cpu->kvm_msr_buf;
1563 void *limit = ((void *)msrs) + MSR_BUF_SIZE;
1564 struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs];
1566 assert((void *)(entry + 1) <= limit);
1568 entry->index = index;
1569 entry->reserved = 0;
1570 entry->data = value;
1571 msrs->nmsrs++;
1574 static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value)
1576 kvm_msr_buf_reset(cpu);
1577 kvm_msr_entry_add(cpu, index, value);
1579 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
1582 void kvm_put_apicbase(X86CPU *cpu, uint64_t value)
1584 int ret;
1586 ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value);
1587 assert(ret == 1);
1590 static int kvm_put_tscdeadline_msr(X86CPU *cpu)
1592 CPUX86State *env = &cpu->env;
1593 int ret;
1595 if (!has_msr_tsc_deadline) {
1596 return 0;
1599 ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline);
1600 if (ret < 0) {
1601 return ret;
1604 assert(ret == 1);
1605 return 0;
1609 * Provide a separate write service for the feature control MSR in order to
1610 * kick the VCPU out of VMXON or even guest mode on reset. This has to be done
1611 * before writing any other state because forcibly leaving nested mode
1612 * invalidates the VCPU state.
1614 static int kvm_put_msr_feature_control(X86CPU *cpu)
1616 int ret;
1618 if (!has_msr_feature_control) {
1619 return 0;
1622 ret = kvm_put_one_msr(cpu, MSR_IA32_FEATURE_CONTROL,
1623 cpu->env.msr_ia32_feature_control);
1624 if (ret < 0) {
1625 return ret;
1628 assert(ret == 1);
1629 return 0;
1632 static int kvm_put_msrs(X86CPU *cpu, int level)
1634 CPUX86State *env = &cpu->env;
1635 int i;
1636 int ret;
1638 kvm_msr_buf_reset(cpu);
1640 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs);
1641 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
1642 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
1643 kvm_msr_entry_add(cpu, MSR_PAT, env->pat);
1644 if (has_msr_star) {
1645 kvm_msr_entry_add(cpu, MSR_STAR, env->star);
1647 if (has_msr_hsave_pa) {
1648 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave);
1650 if (has_msr_tsc_aux) {
1651 kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux);
1653 if (has_msr_tsc_adjust) {
1654 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust);
1656 if (has_msr_misc_enable) {
1657 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE,
1658 env->msr_ia32_misc_enable);
1660 if (has_msr_smbase) {
1661 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase);
1663 if (has_msr_bndcfgs) {
1664 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs);
1666 if (has_msr_xss) {
1667 kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss);
1669 #ifdef TARGET_X86_64
1670 if (lm_capable_kernel) {
1671 kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
1672 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase);
1673 kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask);
1674 kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar);
1676 #endif
1678 * The following MSRs have side effects on the guest or are too heavy
1679 * for normal writeback. Limit them to reset or full state updates.
1681 if (level >= KVM_PUT_RESET_STATE) {
1682 kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
1683 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
1684 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
1685 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
1686 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
1688 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
1689 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
1691 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
1692 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
1694 if (has_msr_architectural_pmu) {
1695 /* Stop the counter. */
1696 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
1697 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
1699 /* Set the counter values. */
1700 for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
1701 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
1702 env->msr_fixed_counters[i]);
1704 for (i = 0; i < num_architectural_pmu_counters; i++) {
1705 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
1706 env->msr_gp_counters[i]);
1707 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
1708 env->msr_gp_evtsel[i]);
1710 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
1711 env->msr_global_status);
1712 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
1713 env->msr_global_ovf_ctrl);
1715 /* Now start the PMU. */
1716 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL,
1717 env->msr_fixed_ctr_ctrl);
1718 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
1719 env->msr_global_ctrl);
1721 if (has_msr_hv_hypercall) {
1722 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
1723 env->msr_hv_guest_os_id);
1724 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
1725 env->msr_hv_hypercall);
1727 if (cpu->hyperv_vapic) {
1728 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
1729 env->msr_hv_vapic);
1731 if (cpu->hyperv_time) {
1732 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, env->msr_hv_tsc);
1734 if (has_msr_hv_crash) {
1735 int j;
1737 for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++)
1738 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j,
1739 env->msr_hv_crash_params[j]);
1741 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL,
1742 HV_X64_MSR_CRASH_CTL_NOTIFY);
1744 if (has_msr_hv_runtime) {
1745 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime);
1747 if (cpu->hyperv_synic) {
1748 int j;
1750 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL,
1751 env->msr_hv_synic_control);
1752 kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION,
1753 env->msr_hv_synic_version);
1754 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP,
1755 env->msr_hv_synic_evt_page);
1756 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP,
1757 env->msr_hv_synic_msg_page);
1759 for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
1760 kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j,
1761 env->msr_hv_synic_sint[j]);
1764 if (has_msr_hv_stimer) {
1765 int j;
1767 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) {
1768 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2,
1769 env->msr_hv_stimer_config[j]);
1772 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) {
1773 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2,
1774 env->msr_hv_stimer_count[j]);
1777 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
1778 uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits);
1780 kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype);
1781 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
1782 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
1783 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
1784 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
1785 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
1786 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
1787 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
1788 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
1789 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
1790 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
1791 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
1792 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
1793 /* The CPU GPs if we write to a bit above the physical limit of
1794 * the host CPU (and KVM emulates that)
1796 uint64_t mask = env->mtrr_var[i].mask;
1797 mask &= phys_mask;
1799 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i),
1800 env->mtrr_var[i].base);
1801 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask);
1805 /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
1806 * kvm_put_msr_feature_control. */
1808 if (env->mcg_cap) {
1809 int i;
1811 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status);
1812 kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl);
1813 if (has_msr_mcg_ext_ctl) {
1814 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl);
1816 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
1817 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]);
1821 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
1822 if (ret < 0) {
1823 return ret;
1826 if (ret < cpu->kvm_msr_buf->nmsrs) {
1827 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
1828 error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64,
1829 (uint32_t)e->index, (uint64_t)e->data);
1832 assert(ret == cpu->kvm_msr_buf->nmsrs);
1833 return 0;
1837 static int kvm_get_fpu(X86CPU *cpu)
1839 CPUX86State *env = &cpu->env;
1840 struct kvm_fpu fpu;
1841 int i, ret;
1843 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu);
1844 if (ret < 0) {
1845 return ret;
1848 env->fpstt = (fpu.fsw >> 11) & 7;
1849 env->fpus = fpu.fsw;
1850 env->fpuc = fpu.fcw;
1851 env->fpop = fpu.last_opcode;
1852 env->fpip = fpu.last_ip;
1853 env->fpdp = fpu.last_dp;
1854 for (i = 0; i < 8; ++i) {
1855 env->fptags[i] = !((fpu.ftwx >> i) & 1);
1857 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
1858 for (i = 0; i < CPU_NB_REGS; i++) {
1859 env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]);
1860 env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]);
1862 env->mxcsr = fpu.mxcsr;
1864 return 0;
1867 static int kvm_get_xsave(X86CPU *cpu)
1869 CPUX86State *env = &cpu->env;
1870 X86XSaveArea *xsave = env->kvm_xsave_buf;
1871 int ret, i;
1872 uint16_t cwd, swd, twd;
1874 if (!has_xsave) {
1875 return kvm_get_fpu(cpu);
1878 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave);
1879 if (ret < 0) {
1880 return ret;
1883 cwd = xsave->legacy.fcw;
1884 swd = xsave->legacy.fsw;
1885 twd = xsave->legacy.ftw;
1886 env->fpop = xsave->legacy.fpop;
1887 env->fpstt = (swd >> 11) & 7;
1888 env->fpus = swd;
1889 env->fpuc = cwd;
1890 for (i = 0; i < 8; ++i) {
1891 env->fptags[i] = !((twd >> i) & 1);
1893 env->fpip = xsave->legacy.fpip;
1894 env->fpdp = xsave->legacy.fpdp;
1895 env->mxcsr = xsave->legacy.mxcsr;
1896 memcpy(env->fpregs, &xsave->legacy.fpregs,
1897 sizeof env->fpregs);
1898 env->xstate_bv = xsave->header.xstate_bv;
1899 memcpy(env->bnd_regs, &xsave->bndreg_state.bnd_regs,
1900 sizeof env->bnd_regs);
1901 env->bndcs_regs = xsave->bndcsr_state.bndcsr;
1902 memcpy(env->opmask_regs, &xsave->opmask_state.opmask_regs,
1903 sizeof env->opmask_regs);
1905 for (i = 0; i < CPU_NB_REGS; i++) {
1906 uint8_t *xmm = xsave->legacy.xmm_regs[i];
1907 uint8_t *ymmh = xsave->avx_state.ymmh[i];
1908 uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
1909 env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm);
1910 env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm+8);
1911 env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh);
1912 env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh+8);
1913 env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
1914 env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh+8);
1915 env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh+16);
1916 env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh+24);
1919 #ifdef TARGET_X86_64
1920 memcpy(&env->xmm_regs[16], &xsave->hi16_zmm_state.hi16_zmm,
1921 16 * sizeof env->xmm_regs[16]);
1922 memcpy(&env->pkru, &xsave->pkru_state, sizeof env->pkru);
1923 #endif
1924 return 0;
1927 static int kvm_get_xcrs(X86CPU *cpu)
1929 CPUX86State *env = &cpu->env;
1930 int i, ret;
1931 struct kvm_xcrs xcrs;
1933 if (!has_xcrs) {
1934 return 0;
1937 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs);
1938 if (ret < 0) {
1939 return ret;
1942 for (i = 0; i < xcrs.nr_xcrs; i++) {
1943 /* Only support xcr0 now */
1944 if (xcrs.xcrs[i].xcr == 0) {
1945 env->xcr0 = xcrs.xcrs[i].value;
1946 break;
1949 return 0;
1952 static int kvm_get_sregs(X86CPU *cpu)
1954 CPUX86State *env = &cpu->env;
1955 struct kvm_sregs sregs;
1956 uint32_t hflags;
1957 int bit, i, ret;
1959 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
1960 if (ret < 0) {
1961 return ret;
1964 /* There can only be one pending IRQ set in the bitmap at a time, so try
1965 to find it and save its number instead (-1 for none). */
1966 env->interrupt_injected = -1;
1967 for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
1968 if (sregs.interrupt_bitmap[i]) {
1969 bit = ctz64(sregs.interrupt_bitmap[i]);
1970 env->interrupt_injected = i * 64 + bit;
1971 break;
1975 get_seg(&env->segs[R_CS], &sregs.cs);
1976 get_seg(&env->segs[R_DS], &sregs.ds);
1977 get_seg(&env->segs[R_ES], &sregs.es);
1978 get_seg(&env->segs[R_FS], &sregs.fs);
1979 get_seg(&env->segs[R_GS], &sregs.gs);
1980 get_seg(&env->segs[R_SS], &sregs.ss);
1982 get_seg(&env->tr, &sregs.tr);
1983 get_seg(&env->ldt, &sregs.ldt);
1985 env->idt.limit = sregs.idt.limit;
1986 env->idt.base = sregs.idt.base;
1987 env->gdt.limit = sregs.gdt.limit;
1988 env->gdt.base = sregs.gdt.base;
1990 env->cr[0] = sregs.cr0;
1991 env->cr[2] = sregs.cr2;
1992 env->cr[3] = sregs.cr3;
1993 env->cr[4] = sregs.cr4;
1995 env->efer = sregs.efer;
1997 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
1999 #define HFLAG_COPY_MASK \
2000 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
2001 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
2002 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
2003 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
2005 hflags = env->hflags & HFLAG_COPY_MASK;
2006 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
2007 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
2008 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
2009 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
2010 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
2012 if (env->cr[4] & CR4_OSFXSR_MASK) {
2013 hflags |= HF_OSFXSR_MASK;
2016 if (env->efer & MSR_EFER_LMA) {
2017 hflags |= HF_LMA_MASK;
2020 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
2021 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
2022 } else {
2023 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
2024 (DESC_B_SHIFT - HF_CS32_SHIFT);
2025 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
2026 (DESC_B_SHIFT - HF_SS32_SHIFT);
2027 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
2028 !(hflags & HF_CS32_MASK)) {
2029 hflags |= HF_ADDSEG_MASK;
2030 } else {
2031 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
2032 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
2035 env->hflags = hflags;
2037 return 0;
2040 static int kvm_get_msrs(X86CPU *cpu)
2042 CPUX86State *env = &cpu->env;
2043 struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
2044 int ret, i;
2045 uint64_t mtrr_top_bits;
2047 kvm_msr_buf_reset(cpu);
2049 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0);
2050 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0);
2051 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0);
2052 kvm_msr_entry_add(cpu, MSR_PAT, 0);
2053 if (has_msr_star) {
2054 kvm_msr_entry_add(cpu, MSR_STAR, 0);
2056 if (has_msr_hsave_pa) {
2057 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0);
2059 if (has_msr_tsc_aux) {
2060 kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0);
2062 if (has_msr_tsc_adjust) {
2063 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0);
2065 if (has_msr_tsc_deadline) {
2066 kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0);
2068 if (has_msr_misc_enable) {
2069 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0);
2071 if (has_msr_smbase) {
2072 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0);
2074 if (has_msr_feature_control) {
2075 kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0);
2077 if (has_msr_bndcfgs) {
2078 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0);
2080 if (has_msr_xss) {
2081 kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0);
2085 if (!env->tsc_valid) {
2086 kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0);
2087 env->tsc_valid = !runstate_is_running();
2090 #ifdef TARGET_X86_64
2091 if (lm_capable_kernel) {
2092 kvm_msr_entry_add(cpu, MSR_CSTAR, 0);
2093 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0);
2094 kvm_msr_entry_add(cpu, MSR_FMASK, 0);
2095 kvm_msr_entry_add(cpu, MSR_LSTAR, 0);
2097 #endif
2098 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
2099 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
2100 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
2101 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
2103 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
2104 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
2106 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
2107 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
2109 if (has_msr_architectural_pmu) {
2110 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
2111 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
2112 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
2113 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
2114 for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
2115 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0);
2117 for (i = 0; i < num_architectural_pmu_counters; i++) {
2118 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0);
2119 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0);
2123 if (env->mcg_cap) {
2124 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0);
2125 kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0);
2126 if (has_msr_mcg_ext_ctl) {
2127 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, 0);
2129 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
2130 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0);
2134 if (has_msr_hv_hypercall) {
2135 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0);
2136 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0);
2138 if (cpu->hyperv_vapic) {
2139 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0);
2141 if (cpu->hyperv_time) {
2142 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0);
2144 if (has_msr_hv_crash) {
2145 int j;
2147 for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++) {
2148 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0);
2151 if (has_msr_hv_runtime) {
2152 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0);
2154 if (cpu->hyperv_synic) {
2155 uint32_t msr;
2157 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0);
2158 kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, 0);
2159 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0);
2160 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0);
2161 for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
2162 kvm_msr_entry_add(cpu, msr, 0);
2165 if (has_msr_hv_stimer) {
2166 uint32_t msr;
2168 for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT;
2169 msr++) {
2170 kvm_msr_entry_add(cpu, msr, 0);
2173 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
2174 kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0);
2175 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0);
2176 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0);
2177 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0);
2178 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0);
2179 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0);
2180 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0);
2181 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0);
2182 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0);
2183 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0);
2184 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0);
2185 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0);
2186 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
2187 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0);
2188 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0);
2192 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
2193 if (ret < 0) {
2194 return ret;
2197 if (ret < cpu->kvm_msr_buf->nmsrs) {
2198 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
2199 error_report("error: failed to get MSR 0x%" PRIx32,
2200 (uint32_t)e->index);
2203 assert(ret == cpu->kvm_msr_buf->nmsrs);
2205 * MTRR masks: Each mask consists of 5 parts
2206 * a 10..0: must be zero
2207 * b 11 : valid bit
2208 * c n-1.12: actual mask bits
2209 * d 51..n: reserved must be zero
2210 * e 63.52: reserved must be zero
2212 * 'n' is the number of physical bits supported by the CPU and is
2213 * apparently always <= 52. We know our 'n' but don't know what
2214 * the destinations 'n' is; it might be smaller, in which case
2215 * it masks (c) on loading. It might be larger, in which case
2216 * we fill 'd' so that d..c is consistent irrespetive of the 'n'
2217 * we're migrating to.
2220 if (cpu->fill_mtrr_mask) {
2221 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 52);
2222 assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS);
2223 mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits);
2224 } else {
2225 mtrr_top_bits = 0;
2228 for (i = 0; i < ret; i++) {
2229 uint32_t index = msrs[i].index;
2230 switch (index) {
2231 case MSR_IA32_SYSENTER_CS:
2232 env->sysenter_cs = msrs[i].data;
2233 break;
2234 case MSR_IA32_SYSENTER_ESP:
2235 env->sysenter_esp = msrs[i].data;
2236 break;
2237 case MSR_IA32_SYSENTER_EIP:
2238 env->sysenter_eip = msrs[i].data;
2239 break;
2240 case MSR_PAT:
2241 env->pat = msrs[i].data;
2242 break;
2243 case MSR_STAR:
2244 env->star = msrs[i].data;
2245 break;
2246 #ifdef TARGET_X86_64
2247 case MSR_CSTAR:
2248 env->cstar = msrs[i].data;
2249 break;
2250 case MSR_KERNELGSBASE:
2251 env->kernelgsbase = msrs[i].data;
2252 break;
2253 case MSR_FMASK:
2254 env->fmask = msrs[i].data;
2255 break;
2256 case MSR_LSTAR:
2257 env->lstar = msrs[i].data;
2258 break;
2259 #endif
2260 case MSR_IA32_TSC:
2261 env->tsc = msrs[i].data;
2262 break;
2263 case MSR_TSC_AUX:
2264 env->tsc_aux = msrs[i].data;
2265 break;
2266 case MSR_TSC_ADJUST:
2267 env->tsc_adjust = msrs[i].data;
2268 break;
2269 case MSR_IA32_TSCDEADLINE:
2270 env->tsc_deadline = msrs[i].data;
2271 break;
2272 case MSR_VM_HSAVE_PA:
2273 env->vm_hsave = msrs[i].data;
2274 break;
2275 case MSR_KVM_SYSTEM_TIME:
2276 env->system_time_msr = msrs[i].data;
2277 break;
2278 case MSR_KVM_WALL_CLOCK:
2279 env->wall_clock_msr = msrs[i].data;
2280 break;
2281 case MSR_MCG_STATUS:
2282 env->mcg_status = msrs[i].data;
2283 break;
2284 case MSR_MCG_CTL:
2285 env->mcg_ctl = msrs[i].data;
2286 break;
2287 case MSR_MCG_EXT_CTL:
2288 env->mcg_ext_ctl = msrs[i].data;
2289 break;
2290 case MSR_IA32_MISC_ENABLE:
2291 env->msr_ia32_misc_enable = msrs[i].data;
2292 break;
2293 case MSR_IA32_SMBASE:
2294 env->smbase = msrs[i].data;
2295 break;
2296 case MSR_IA32_FEATURE_CONTROL:
2297 env->msr_ia32_feature_control = msrs[i].data;
2298 break;
2299 case MSR_IA32_BNDCFGS:
2300 env->msr_bndcfgs = msrs[i].data;
2301 break;
2302 case MSR_IA32_XSS:
2303 env->xss = msrs[i].data;
2304 break;
2305 default:
2306 if (msrs[i].index >= MSR_MC0_CTL &&
2307 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
2308 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
2310 break;
2311 case MSR_KVM_ASYNC_PF_EN:
2312 env->async_pf_en_msr = msrs[i].data;
2313 break;
2314 case MSR_KVM_PV_EOI_EN:
2315 env->pv_eoi_en_msr = msrs[i].data;
2316 break;
2317 case MSR_KVM_STEAL_TIME:
2318 env->steal_time_msr = msrs[i].data;
2319 break;
2320 case MSR_CORE_PERF_FIXED_CTR_CTRL:
2321 env->msr_fixed_ctr_ctrl = msrs[i].data;
2322 break;
2323 case MSR_CORE_PERF_GLOBAL_CTRL:
2324 env->msr_global_ctrl = msrs[i].data;
2325 break;
2326 case MSR_CORE_PERF_GLOBAL_STATUS:
2327 env->msr_global_status = msrs[i].data;
2328 break;
2329 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
2330 env->msr_global_ovf_ctrl = msrs[i].data;
2331 break;
2332 case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1:
2333 env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data;
2334 break;
2335 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1:
2336 env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data;
2337 break;
2338 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1:
2339 env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data;
2340 break;
2341 case HV_X64_MSR_HYPERCALL:
2342 env->msr_hv_hypercall = msrs[i].data;
2343 break;
2344 case HV_X64_MSR_GUEST_OS_ID:
2345 env->msr_hv_guest_os_id = msrs[i].data;
2346 break;
2347 case HV_X64_MSR_APIC_ASSIST_PAGE:
2348 env->msr_hv_vapic = msrs[i].data;
2349 break;
2350 case HV_X64_MSR_REFERENCE_TSC:
2351 env->msr_hv_tsc = msrs[i].data;
2352 break;
2353 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
2354 env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data;
2355 break;
2356 case HV_X64_MSR_VP_RUNTIME:
2357 env->msr_hv_runtime = msrs[i].data;
2358 break;
2359 case HV_X64_MSR_SCONTROL:
2360 env->msr_hv_synic_control = msrs[i].data;
2361 break;
2362 case HV_X64_MSR_SVERSION:
2363 env->msr_hv_synic_version = msrs[i].data;
2364 break;
2365 case HV_X64_MSR_SIEFP:
2366 env->msr_hv_synic_evt_page = msrs[i].data;
2367 break;
2368 case HV_X64_MSR_SIMP:
2369 env->msr_hv_synic_msg_page = msrs[i].data;
2370 break;
2371 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
2372 env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data;
2373 break;
2374 case HV_X64_MSR_STIMER0_CONFIG:
2375 case HV_X64_MSR_STIMER1_CONFIG:
2376 case HV_X64_MSR_STIMER2_CONFIG:
2377 case HV_X64_MSR_STIMER3_CONFIG:
2378 env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] =
2379 msrs[i].data;
2380 break;
2381 case HV_X64_MSR_STIMER0_COUNT:
2382 case HV_X64_MSR_STIMER1_COUNT:
2383 case HV_X64_MSR_STIMER2_COUNT:
2384 case HV_X64_MSR_STIMER3_COUNT:
2385 env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] =
2386 msrs[i].data;
2387 break;
2388 case MSR_MTRRdefType:
2389 env->mtrr_deftype = msrs[i].data;
2390 break;
2391 case MSR_MTRRfix64K_00000:
2392 env->mtrr_fixed[0] = msrs[i].data;
2393 break;
2394 case MSR_MTRRfix16K_80000:
2395 env->mtrr_fixed[1] = msrs[i].data;
2396 break;
2397 case MSR_MTRRfix16K_A0000:
2398 env->mtrr_fixed[2] = msrs[i].data;
2399 break;
2400 case MSR_MTRRfix4K_C0000:
2401 env->mtrr_fixed[3] = msrs[i].data;
2402 break;
2403 case MSR_MTRRfix4K_C8000:
2404 env->mtrr_fixed[4] = msrs[i].data;
2405 break;
2406 case MSR_MTRRfix4K_D0000:
2407 env->mtrr_fixed[5] = msrs[i].data;
2408 break;
2409 case MSR_MTRRfix4K_D8000:
2410 env->mtrr_fixed[6] = msrs[i].data;
2411 break;
2412 case MSR_MTRRfix4K_E0000:
2413 env->mtrr_fixed[7] = msrs[i].data;
2414 break;
2415 case MSR_MTRRfix4K_E8000:
2416 env->mtrr_fixed[8] = msrs[i].data;
2417 break;
2418 case MSR_MTRRfix4K_F0000:
2419 env->mtrr_fixed[9] = msrs[i].data;
2420 break;
2421 case MSR_MTRRfix4K_F8000:
2422 env->mtrr_fixed[10] = msrs[i].data;
2423 break;
2424 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
2425 if (index & 1) {
2426 env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data |
2427 mtrr_top_bits;
2428 } else {
2429 env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
2431 break;
2435 return 0;
2438 static int kvm_put_mp_state(X86CPU *cpu)
2440 struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state };
2442 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
2445 static int kvm_get_mp_state(X86CPU *cpu)
2447 CPUState *cs = CPU(cpu);
2448 CPUX86State *env = &cpu->env;
2449 struct kvm_mp_state mp_state;
2450 int ret;
2452 ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
2453 if (ret < 0) {
2454 return ret;
2456 env->mp_state = mp_state.mp_state;
2457 if (kvm_irqchip_in_kernel()) {
2458 cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
2460 return 0;
2463 static int kvm_get_apic(X86CPU *cpu)
2465 DeviceState *apic = cpu->apic_state;
2466 struct kvm_lapic_state kapic;
2467 int ret;
2469 if (apic && kvm_irqchip_in_kernel()) {
2470 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic);
2471 if (ret < 0) {
2472 return ret;
2475 kvm_get_apic_state(apic, &kapic);
2477 return 0;
2480 static int kvm_put_vcpu_events(X86CPU *cpu, int level)
2482 CPUState *cs = CPU(cpu);
2483 CPUX86State *env = &cpu->env;
2484 struct kvm_vcpu_events events = {};
2486 if (!kvm_has_vcpu_events()) {
2487 return 0;
2490 events.exception.injected = (env->exception_injected >= 0);
2491 events.exception.nr = env->exception_injected;
2492 events.exception.has_error_code = env->has_error_code;
2493 events.exception.error_code = env->error_code;
2494 events.exception.pad = 0;
2496 events.interrupt.injected = (env->interrupt_injected >= 0);
2497 events.interrupt.nr = env->interrupt_injected;
2498 events.interrupt.soft = env->soft_interrupt;
2500 events.nmi.injected = env->nmi_injected;
2501 events.nmi.pending = env->nmi_pending;
2502 events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
2503 events.nmi.pad = 0;
2505 events.sipi_vector = env->sipi_vector;
2506 events.flags = 0;
2508 if (has_msr_smbase) {
2509 events.smi.smm = !!(env->hflags & HF_SMM_MASK);
2510 events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK);
2511 if (kvm_irqchip_in_kernel()) {
2512 /* As soon as these are moved to the kernel, remove them
2513 * from cs->interrupt_request.
2515 events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
2516 events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
2517 cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
2518 } else {
2519 /* Keep these in cs->interrupt_request. */
2520 events.smi.pending = 0;
2521 events.smi.latched_init = 0;
2523 /* Stop SMI delivery on old machine types to avoid a reboot
2524 * on an inward migration of an old VM.
2526 if (!cpu->kvm_no_smi_migration) {
2527 events.flags |= KVM_VCPUEVENT_VALID_SMM;
2531 if (level >= KVM_PUT_RESET_STATE) {
2532 events.flags |=
2533 KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR;
2536 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
2539 static int kvm_get_vcpu_events(X86CPU *cpu)
2541 CPUX86State *env = &cpu->env;
2542 struct kvm_vcpu_events events;
2543 int ret;
2545 if (!kvm_has_vcpu_events()) {
2546 return 0;
2549 memset(&events, 0, sizeof(events));
2550 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
2551 if (ret < 0) {
2552 return ret;
2554 env->exception_injected =
2555 events.exception.injected ? events.exception.nr : -1;
2556 env->has_error_code = events.exception.has_error_code;
2557 env->error_code = events.exception.error_code;
2559 env->interrupt_injected =
2560 events.interrupt.injected ? events.interrupt.nr : -1;
2561 env->soft_interrupt = events.interrupt.soft;
2563 env->nmi_injected = events.nmi.injected;
2564 env->nmi_pending = events.nmi.pending;
2565 if (events.nmi.masked) {
2566 env->hflags2 |= HF2_NMI_MASK;
2567 } else {
2568 env->hflags2 &= ~HF2_NMI_MASK;
2571 if (events.flags & KVM_VCPUEVENT_VALID_SMM) {
2572 if (events.smi.smm) {
2573 env->hflags |= HF_SMM_MASK;
2574 } else {
2575 env->hflags &= ~HF_SMM_MASK;
2577 if (events.smi.pending) {
2578 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
2579 } else {
2580 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
2582 if (events.smi.smm_inside_nmi) {
2583 env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
2584 } else {
2585 env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
2587 if (events.smi.latched_init) {
2588 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
2589 } else {
2590 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
2594 env->sipi_vector = events.sipi_vector;
2596 return 0;
2599 static int kvm_guest_debug_workarounds(X86CPU *cpu)
2601 CPUState *cs = CPU(cpu);
2602 CPUX86State *env = &cpu->env;
2603 int ret = 0;
2604 unsigned long reinject_trap = 0;
2606 if (!kvm_has_vcpu_events()) {
2607 if (env->exception_injected == 1) {
2608 reinject_trap = KVM_GUESTDBG_INJECT_DB;
2609 } else if (env->exception_injected == 3) {
2610 reinject_trap = KVM_GUESTDBG_INJECT_BP;
2612 env->exception_injected = -1;
2616 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
2617 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
2618 * by updating the debug state once again if single-stepping is on.
2619 * Another reason to call kvm_update_guest_debug here is a pending debug
2620 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
2621 * reinject them via SET_GUEST_DEBUG.
2623 if (reinject_trap ||
2624 (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) {
2625 ret = kvm_update_guest_debug(cs, reinject_trap);
2627 return ret;
2630 static int kvm_put_debugregs(X86CPU *cpu)
2632 CPUX86State *env = &cpu->env;
2633 struct kvm_debugregs dbgregs;
2634 int i;
2636 if (!kvm_has_debugregs()) {
2637 return 0;
2640 for (i = 0; i < 4; i++) {
2641 dbgregs.db[i] = env->dr[i];
2643 dbgregs.dr6 = env->dr[6];
2644 dbgregs.dr7 = env->dr[7];
2645 dbgregs.flags = 0;
2647 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs);
2650 static int kvm_get_debugregs(X86CPU *cpu)
2652 CPUX86State *env = &cpu->env;
2653 struct kvm_debugregs dbgregs;
2654 int i, ret;
2656 if (!kvm_has_debugregs()) {
2657 return 0;
2660 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs);
2661 if (ret < 0) {
2662 return ret;
2664 for (i = 0; i < 4; i++) {
2665 env->dr[i] = dbgregs.db[i];
2667 env->dr[4] = env->dr[6] = dbgregs.dr6;
2668 env->dr[5] = env->dr[7] = dbgregs.dr7;
2670 return 0;
2673 int kvm_arch_put_registers(CPUState *cpu, int level)
2675 X86CPU *x86_cpu = X86_CPU(cpu);
2676 int ret;
2678 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
2680 if (level >= KVM_PUT_RESET_STATE) {
2681 ret = kvm_put_msr_feature_control(x86_cpu);
2682 if (ret < 0) {
2683 return ret;
2687 if (level == KVM_PUT_FULL_STATE) {
2688 /* We don't check for kvm_arch_set_tsc_khz() errors here,
2689 * because TSC frequency mismatch shouldn't abort migration,
2690 * unless the user explicitly asked for a more strict TSC
2691 * setting (e.g. using an explicit "tsc-freq" option).
2693 kvm_arch_set_tsc_khz(cpu);
2696 ret = kvm_getput_regs(x86_cpu, 1);
2697 if (ret < 0) {
2698 return ret;
2700 ret = kvm_put_xsave(x86_cpu);
2701 if (ret < 0) {
2702 return ret;
2704 ret = kvm_put_xcrs(x86_cpu);
2705 if (ret < 0) {
2706 return ret;
2708 ret = kvm_put_sregs(x86_cpu);
2709 if (ret < 0) {
2710 return ret;
2712 /* must be before kvm_put_msrs */
2713 ret = kvm_inject_mce_oldstyle(x86_cpu);
2714 if (ret < 0) {
2715 return ret;
2717 ret = kvm_put_msrs(x86_cpu, level);
2718 if (ret < 0) {
2719 return ret;
2721 if (level >= KVM_PUT_RESET_STATE) {
2722 ret = kvm_put_mp_state(x86_cpu);
2723 if (ret < 0) {
2724 return ret;
2728 ret = kvm_put_tscdeadline_msr(x86_cpu);
2729 if (ret < 0) {
2730 return ret;
2733 ret = kvm_put_vcpu_events(x86_cpu, level);
2734 if (ret < 0) {
2735 return ret;
2737 ret = kvm_put_debugregs(x86_cpu);
2738 if (ret < 0) {
2739 return ret;
2741 /* must be last */
2742 ret = kvm_guest_debug_workarounds(x86_cpu);
2743 if (ret < 0) {
2744 return ret;
2746 return 0;
2749 int kvm_arch_get_registers(CPUState *cs)
2751 X86CPU *cpu = X86_CPU(cs);
2752 int ret;
2754 assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs));
2756 ret = kvm_getput_regs(cpu, 0);
2757 if (ret < 0) {
2758 goto out;
2760 ret = kvm_get_xsave(cpu);
2761 if (ret < 0) {
2762 goto out;
2764 ret = kvm_get_xcrs(cpu);
2765 if (ret < 0) {
2766 goto out;
2768 ret = kvm_get_sregs(cpu);
2769 if (ret < 0) {
2770 goto out;
2772 ret = kvm_get_msrs(cpu);
2773 if (ret < 0) {
2774 goto out;
2776 ret = kvm_get_mp_state(cpu);
2777 if (ret < 0) {
2778 goto out;
2780 ret = kvm_get_apic(cpu);
2781 if (ret < 0) {
2782 goto out;
2784 ret = kvm_get_vcpu_events(cpu);
2785 if (ret < 0) {
2786 goto out;
2788 ret = kvm_get_debugregs(cpu);
2789 if (ret < 0) {
2790 goto out;
2792 ret = 0;
2793 out:
2794 cpu_sync_bndcs_hflags(&cpu->env);
2795 return ret;
2798 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
2800 X86CPU *x86_cpu = X86_CPU(cpu);
2801 CPUX86State *env = &x86_cpu->env;
2802 int ret;
2804 /* Inject NMI */
2805 if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
2806 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
2807 qemu_mutex_lock_iothread();
2808 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
2809 qemu_mutex_unlock_iothread();
2810 DPRINTF("injected NMI\n");
2811 ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
2812 if (ret < 0) {
2813 fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
2814 strerror(-ret));
2817 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
2818 qemu_mutex_lock_iothread();
2819 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
2820 qemu_mutex_unlock_iothread();
2821 DPRINTF("injected SMI\n");
2822 ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
2823 if (ret < 0) {
2824 fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n",
2825 strerror(-ret));
2830 if (!kvm_pic_in_kernel()) {
2831 qemu_mutex_lock_iothread();
2834 /* Force the VCPU out of its inner loop to process any INIT requests
2835 * or (for userspace APIC, but it is cheap to combine the checks here)
2836 * pending TPR access reports.
2838 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
2839 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
2840 !(env->hflags & HF_SMM_MASK)) {
2841 cpu->exit_request = 1;
2843 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
2844 cpu->exit_request = 1;
2848 if (!kvm_pic_in_kernel()) {
2849 /* Try to inject an interrupt if the guest can accept it */
2850 if (run->ready_for_interrupt_injection &&
2851 (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
2852 (env->eflags & IF_MASK)) {
2853 int irq;
2855 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
2856 irq = cpu_get_pic_interrupt(env);
2857 if (irq >= 0) {
2858 struct kvm_interrupt intr;
2860 intr.irq = irq;
2861 DPRINTF("injected interrupt %d\n", irq);
2862 ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr);
2863 if (ret < 0) {
2864 fprintf(stderr,
2865 "KVM: injection failed, interrupt lost (%s)\n",
2866 strerror(-ret));
2871 /* If we have an interrupt but the guest is not ready to receive an
2872 * interrupt, request an interrupt window exit. This will
2873 * cause a return to userspace as soon as the guest is ready to
2874 * receive interrupts. */
2875 if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
2876 run->request_interrupt_window = 1;
2877 } else {
2878 run->request_interrupt_window = 0;
2881 DPRINTF("setting tpr\n");
2882 run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
2884 qemu_mutex_unlock_iothread();
2888 MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
2890 X86CPU *x86_cpu = X86_CPU(cpu);
2891 CPUX86State *env = &x86_cpu->env;
2893 if (run->flags & KVM_RUN_X86_SMM) {
2894 env->hflags |= HF_SMM_MASK;
2895 } else {
2896 env->hflags &= ~HF_SMM_MASK;
2898 if (run->if_flag) {
2899 env->eflags |= IF_MASK;
2900 } else {
2901 env->eflags &= ~IF_MASK;
2904 /* We need to protect the apic state against concurrent accesses from
2905 * different threads in case the userspace irqchip is used. */
2906 if (!kvm_irqchip_in_kernel()) {
2907 qemu_mutex_lock_iothread();
2909 cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
2910 cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
2911 if (!kvm_irqchip_in_kernel()) {
2912 qemu_mutex_unlock_iothread();
2914 return cpu_get_mem_attrs(env);
2917 int kvm_arch_process_async_events(CPUState *cs)
2919 X86CPU *cpu = X86_CPU(cs);
2920 CPUX86State *env = &cpu->env;
2922 if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
2923 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
2924 assert(env->mcg_cap);
2926 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
2928 kvm_cpu_synchronize_state(cs);
2930 if (env->exception_injected == EXCP08_DBLE) {
2931 /* this means triple fault */
2932 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
2933 cs->exit_request = 1;
2934 return 0;
2936 env->exception_injected = EXCP12_MCHK;
2937 env->has_error_code = 0;
2939 cs->halted = 0;
2940 if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
2941 env->mp_state = KVM_MP_STATE_RUNNABLE;
2945 if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
2946 !(env->hflags & HF_SMM_MASK)) {
2947 kvm_cpu_synchronize_state(cs);
2948 do_cpu_init(cpu);
2951 if (kvm_irqchip_in_kernel()) {
2952 return 0;
2955 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
2956 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
2957 apic_poll_irq(cpu->apic_state);
2959 if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
2960 (env->eflags & IF_MASK)) ||
2961 (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
2962 cs->halted = 0;
2964 if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
2965 kvm_cpu_synchronize_state(cs);
2966 do_cpu_sipi(cpu);
2968 if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
2969 cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
2970 kvm_cpu_synchronize_state(cs);
2971 apic_handle_tpr_access_report(cpu->apic_state, env->eip,
2972 env->tpr_access_type);
2975 return cs->halted;
2978 static int kvm_handle_halt(X86CPU *cpu)
2980 CPUState *cs = CPU(cpu);
2981 CPUX86State *env = &cpu->env;
2983 if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
2984 (env->eflags & IF_MASK)) &&
2985 !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
2986 cs->halted = 1;
2987 return EXCP_HLT;
2990 return 0;
2993 static int kvm_handle_tpr_access(X86CPU *cpu)
2995 CPUState *cs = CPU(cpu);
2996 struct kvm_run *run = cs->kvm_run;
2998 apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip,
2999 run->tpr_access.is_write ? TPR_ACCESS_WRITE
3000 : TPR_ACCESS_READ);
3001 return 1;
3004 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
3006 static const uint8_t int3 = 0xcc;
3008 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
3009 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) {
3010 return -EINVAL;
3012 return 0;
3015 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
3017 uint8_t int3;
3019 if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
3020 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
3021 return -EINVAL;
3023 return 0;
3026 static struct {
3027 target_ulong addr;
3028 int len;
3029 int type;
3030 } hw_breakpoint[4];
3032 static int nb_hw_breakpoint;
3034 static int find_hw_breakpoint(target_ulong addr, int len, int type)
3036 int n;
3038 for (n = 0; n < nb_hw_breakpoint; n++) {
3039 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
3040 (hw_breakpoint[n].len == len || len == -1)) {
3041 return n;
3044 return -1;
3047 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
3048 target_ulong len, int type)
3050 switch (type) {
3051 case GDB_BREAKPOINT_HW:
3052 len = 1;
3053 break;
3054 case GDB_WATCHPOINT_WRITE:
3055 case GDB_WATCHPOINT_ACCESS:
3056 switch (len) {
3057 case 1:
3058 break;
3059 case 2:
3060 case 4:
3061 case 8:
3062 if (addr & (len - 1)) {
3063 return -EINVAL;
3065 break;
3066 default:
3067 return -EINVAL;
3069 break;
3070 default:
3071 return -ENOSYS;
3074 if (nb_hw_breakpoint == 4) {
3075 return -ENOBUFS;
3077 if (find_hw_breakpoint(addr, len, type) >= 0) {
3078 return -EEXIST;
3080 hw_breakpoint[nb_hw_breakpoint].addr = addr;
3081 hw_breakpoint[nb_hw_breakpoint].len = len;
3082 hw_breakpoint[nb_hw_breakpoint].type = type;
3083 nb_hw_breakpoint++;
3085 return 0;
3088 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
3089 target_ulong len, int type)
3091 int n;
3093 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
3094 if (n < 0) {
3095 return -ENOENT;
3097 nb_hw_breakpoint--;
3098 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
3100 return 0;
3103 void kvm_arch_remove_all_hw_breakpoints(void)
3105 nb_hw_breakpoint = 0;
3108 static CPUWatchpoint hw_watchpoint;
3110 static int kvm_handle_debug(X86CPU *cpu,
3111 struct kvm_debug_exit_arch *arch_info)
3113 CPUState *cs = CPU(cpu);
3114 CPUX86State *env = &cpu->env;
3115 int ret = 0;
3116 int n;
3118 if (arch_info->exception == 1) {
3119 if (arch_info->dr6 & (1 << 14)) {
3120 if (cs->singlestep_enabled) {
3121 ret = EXCP_DEBUG;
3123 } else {
3124 for (n = 0; n < 4; n++) {
3125 if (arch_info->dr6 & (1 << n)) {
3126 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
3127 case 0x0:
3128 ret = EXCP_DEBUG;
3129 break;
3130 case 0x1:
3131 ret = EXCP_DEBUG;
3132 cs->watchpoint_hit = &hw_watchpoint;
3133 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
3134 hw_watchpoint.flags = BP_MEM_WRITE;
3135 break;
3136 case 0x3:
3137 ret = EXCP_DEBUG;
3138 cs->watchpoint_hit = &hw_watchpoint;
3139 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
3140 hw_watchpoint.flags = BP_MEM_ACCESS;
3141 break;
3146 } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) {
3147 ret = EXCP_DEBUG;
3149 if (ret == 0) {
3150 cpu_synchronize_state(cs);
3151 assert(env->exception_injected == -1);
3153 /* pass to guest */
3154 env->exception_injected = arch_info->exception;
3155 env->has_error_code = 0;
3158 return ret;
3161 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
3163 const uint8_t type_code[] = {
3164 [GDB_BREAKPOINT_HW] = 0x0,
3165 [GDB_WATCHPOINT_WRITE] = 0x1,
3166 [GDB_WATCHPOINT_ACCESS] = 0x3
3168 const uint8_t len_code[] = {
3169 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
3171 int n;
3173 if (kvm_sw_breakpoints_active(cpu)) {
3174 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
3176 if (nb_hw_breakpoint > 0) {
3177 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
3178 dbg->arch.debugreg[7] = 0x0600;
3179 for (n = 0; n < nb_hw_breakpoint; n++) {
3180 dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
3181 dbg->arch.debugreg[7] |= (2 << (n * 2)) |
3182 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
3183 ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
3188 static bool host_supports_vmx(void)
3190 uint32_t ecx, unused;
3192 host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
3193 return ecx & CPUID_EXT_VMX;
3196 #define VMX_INVALID_GUEST_STATE 0x80000021
3198 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
3200 X86CPU *cpu = X86_CPU(cs);
3201 uint64_t code;
3202 int ret;
3204 switch (run->exit_reason) {
3205 case KVM_EXIT_HLT:
3206 DPRINTF("handle_hlt\n");
3207 qemu_mutex_lock_iothread();
3208 ret = kvm_handle_halt(cpu);
3209 qemu_mutex_unlock_iothread();
3210 break;
3211 case KVM_EXIT_SET_TPR:
3212 ret = 0;
3213 break;
3214 case KVM_EXIT_TPR_ACCESS:
3215 qemu_mutex_lock_iothread();
3216 ret = kvm_handle_tpr_access(cpu);
3217 qemu_mutex_unlock_iothread();
3218 break;
3219 case KVM_EXIT_FAIL_ENTRY:
3220 code = run->fail_entry.hardware_entry_failure_reason;
3221 fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
3222 code);
3223 if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
3224 fprintf(stderr,
3225 "\nIf you're running a guest on an Intel machine without "
3226 "unrestricted mode\n"
3227 "support, the failure can be most likely due to the guest "
3228 "entering an invalid\n"
3229 "state for Intel VT. For example, the guest maybe running "
3230 "in big real mode\n"
3231 "which is not supported on less recent Intel processors."
3232 "\n\n");
3234 ret = -1;
3235 break;
3236 case KVM_EXIT_EXCEPTION:
3237 fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
3238 run->ex.exception, run->ex.error_code);
3239 ret = -1;
3240 break;
3241 case KVM_EXIT_DEBUG:
3242 DPRINTF("kvm_exit_debug\n");
3243 qemu_mutex_lock_iothread();
3244 ret = kvm_handle_debug(cpu, &run->debug.arch);
3245 qemu_mutex_unlock_iothread();
3246 break;
3247 case KVM_EXIT_HYPERV:
3248 ret = kvm_hv_handle_exit(cpu, &run->hyperv);
3249 break;
3250 case KVM_EXIT_IOAPIC_EOI:
3251 ioapic_eoi_broadcast(run->eoi.vector);
3252 ret = 0;
3253 break;
3254 default:
3255 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
3256 ret = -1;
3257 break;
3260 return ret;
3263 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
3265 X86CPU *cpu = X86_CPU(cs);
3266 CPUX86State *env = &cpu->env;
3268 kvm_cpu_synchronize_state(cs);
3269 return !(env->cr[0] & CR0_PE_MASK) ||
3270 ((env->segs[R_CS].selector & 3) != 3);
3273 void kvm_arch_init_irq_routing(KVMState *s)
3275 if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
3276 /* If kernel can't do irq routing, interrupt source
3277 * override 0->2 cannot be set up as required by HPET.
3278 * So we have to disable it.
3280 no_hpet = 1;
3282 /* We know at this point that we're using the in-kernel
3283 * irqchip, so we can use irqfds, and on x86 we know
3284 * we can use msi via irqfd and GSI routing.
3286 kvm_msi_via_irqfd_allowed = true;
3287 kvm_gsi_routing_allowed = true;
3289 if (kvm_irqchip_is_split()) {
3290 int i;
3292 /* If the ioapic is in QEMU and the lapics are in KVM, reserve
3293 MSI routes for signaling interrupts to the local apics. */
3294 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
3295 if (kvm_irqchip_add_msi_route(s, 0, NULL) < 0) {
3296 error_report("Could not enable split IRQ mode.");
3297 exit(1);
3303 int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
3305 int ret;
3306 if (machine_kernel_irqchip_split(ms)) {
3307 ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24);
3308 if (ret) {
3309 error_report("Could not enable split irqchip mode: %s",
3310 strerror(-ret));
3311 exit(1);
3312 } else {
3313 DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
3314 kvm_split_irqchip = true;
3315 return 1;
3317 } else {
3318 return 0;
3322 /* Classic KVM device assignment interface. Will remain x86 only. */
3323 int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr,
3324 uint32_t flags, uint32_t *dev_id)
3326 struct kvm_assigned_pci_dev dev_data = {
3327 .segnr = dev_addr->domain,
3328 .busnr = dev_addr->bus,
3329 .devfn = PCI_DEVFN(dev_addr->slot, dev_addr->function),
3330 .flags = flags,
3332 int ret;
3334 dev_data.assigned_dev_id =
3335 (dev_addr->domain << 16) | (dev_addr->bus << 8) | dev_data.devfn;
3337 ret = kvm_vm_ioctl(s, KVM_ASSIGN_PCI_DEVICE, &dev_data);
3338 if (ret < 0) {
3339 return ret;
3342 *dev_id = dev_data.assigned_dev_id;
3344 return 0;
3347 int kvm_device_pci_deassign(KVMState *s, uint32_t dev_id)
3349 struct kvm_assigned_pci_dev dev_data = {
3350 .assigned_dev_id = dev_id,
3353 return kvm_vm_ioctl(s, KVM_DEASSIGN_PCI_DEVICE, &dev_data);
3356 static int kvm_assign_irq_internal(KVMState *s, uint32_t dev_id,
3357 uint32_t irq_type, uint32_t guest_irq)
3359 struct kvm_assigned_irq assigned_irq = {
3360 .assigned_dev_id = dev_id,
3361 .guest_irq = guest_irq,
3362 .flags = irq_type,
3365 if (kvm_check_extension(s, KVM_CAP_ASSIGN_DEV_IRQ)) {
3366 return kvm_vm_ioctl(s, KVM_ASSIGN_DEV_IRQ, &assigned_irq);
3367 } else {
3368 return kvm_vm_ioctl(s, KVM_ASSIGN_IRQ, &assigned_irq);
3372 int kvm_device_intx_assign(KVMState *s, uint32_t dev_id, bool use_host_msi,
3373 uint32_t guest_irq)
3375 uint32_t irq_type = KVM_DEV_IRQ_GUEST_INTX |
3376 (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX);
3378 return kvm_assign_irq_internal(s, dev_id, irq_type, guest_irq);
3381 int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked)
3383 struct kvm_assigned_pci_dev dev_data = {
3384 .assigned_dev_id = dev_id,
3385 .flags = masked ? KVM_DEV_ASSIGN_MASK_INTX : 0,
3388 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_INTX_MASK, &dev_data);
3391 static int kvm_deassign_irq_internal(KVMState *s, uint32_t dev_id,
3392 uint32_t type)
3394 struct kvm_assigned_irq assigned_irq = {
3395 .assigned_dev_id = dev_id,
3396 .flags = type,
3399 return kvm_vm_ioctl(s, KVM_DEASSIGN_DEV_IRQ, &assigned_irq);
3402 int kvm_device_intx_deassign(KVMState *s, uint32_t dev_id, bool use_host_msi)
3404 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_INTX |
3405 (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX));
3408 int kvm_device_msi_assign(KVMState *s, uint32_t dev_id, int virq)
3410 return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSI |
3411 KVM_DEV_IRQ_GUEST_MSI, virq);
3414 int kvm_device_msi_deassign(KVMState *s, uint32_t dev_id)
3416 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSI |
3417 KVM_DEV_IRQ_HOST_MSI);
3420 bool kvm_device_msix_supported(KVMState *s)
3422 /* The kernel lacks a corresponding KVM_CAP, so we probe by calling
3423 * KVM_ASSIGN_SET_MSIX_NR with an invalid parameter. */
3424 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, NULL) == -EFAULT;
3427 int kvm_device_msix_init_vectors(KVMState *s, uint32_t dev_id,
3428 uint32_t nr_vectors)
3430 struct kvm_assigned_msix_nr msix_nr = {
3431 .assigned_dev_id = dev_id,
3432 .entry_nr = nr_vectors,
3435 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, &msix_nr);
3438 int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector,
3439 int virq)
3441 struct kvm_assigned_msix_entry msix_entry = {
3442 .assigned_dev_id = dev_id,
3443 .gsi = virq,
3444 .entry = vector,
3447 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_ENTRY, &msix_entry);
3450 int kvm_device_msix_assign(KVMState *s, uint32_t dev_id)
3452 return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSIX |
3453 KVM_DEV_IRQ_GUEST_MSIX, 0);
3456 int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id)
3458 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSIX |
3459 KVM_DEV_IRQ_HOST_MSIX);
3462 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
3463 uint64_t address, uint32_t data, PCIDevice *dev)
3465 X86IOMMUState *iommu = x86_iommu_get_default();
3467 if (iommu) {
3468 int ret;
3469 MSIMessage src, dst;
3470 X86IOMMUClass *class = X86_IOMMU_GET_CLASS(iommu);
3472 src.address = route->u.msi.address_hi;
3473 src.address <<= VTD_MSI_ADDR_HI_SHIFT;
3474 src.address |= route->u.msi.address_lo;
3475 src.data = route->u.msi.data;
3477 ret = class->int_remap(iommu, &src, &dst, dev ? \
3478 pci_requester_id(dev) : \
3479 X86_IOMMU_SID_INVALID);
3480 if (ret) {
3481 trace_kvm_x86_fixup_msi_error(route->gsi);
3482 return 1;
3485 route->u.msi.address_hi = dst.address >> VTD_MSI_ADDR_HI_SHIFT;
3486 route->u.msi.address_lo = dst.address & VTD_MSI_ADDR_LO_MASK;
3487 route->u.msi.data = dst.data;
3490 return 0;
3493 typedef struct MSIRouteEntry MSIRouteEntry;
3495 struct MSIRouteEntry {
3496 PCIDevice *dev; /* Device pointer */
3497 int vector; /* MSI/MSIX vector index */
3498 int virq; /* Virtual IRQ index */
3499 QLIST_ENTRY(MSIRouteEntry) list;
3502 /* List of used GSI routes */
3503 static QLIST_HEAD(, MSIRouteEntry) msi_route_list = \
3504 QLIST_HEAD_INITIALIZER(msi_route_list);
3506 static void kvm_update_msi_routes_all(void *private, bool global,
3507 uint32_t index, uint32_t mask)
3509 int cnt = 0;
3510 MSIRouteEntry *entry;
3511 MSIMessage msg;
3512 PCIDevice *dev;
3514 /* TODO: explicit route update */
3515 QLIST_FOREACH(entry, &msi_route_list, list) {
3516 cnt++;
3517 dev = entry->dev;
3518 if (!msix_enabled(dev) && !msi_enabled(dev)) {
3519 continue;
3521 msg = pci_get_msi_message(dev, entry->vector);
3522 kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, dev);
3524 kvm_irqchip_commit_routes(kvm_state);
3525 trace_kvm_x86_update_msi_routes(cnt);
3528 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
3529 int vector, PCIDevice *dev)
3531 static bool notify_list_inited = false;
3532 MSIRouteEntry *entry;
3534 if (!dev) {
3535 /* These are (possibly) IOAPIC routes only used for split
3536 * kernel irqchip mode, while what we are housekeeping are
3537 * PCI devices only. */
3538 return 0;
3541 entry = g_new0(MSIRouteEntry, 1);
3542 entry->dev = dev;
3543 entry->vector = vector;
3544 entry->virq = route->gsi;
3545 QLIST_INSERT_HEAD(&msi_route_list, entry, list);
3547 trace_kvm_x86_add_msi_route(route->gsi);
3549 if (!notify_list_inited) {
3550 /* For the first time we do add route, add ourselves into
3551 * IOMMU's IEC notify list if needed. */
3552 X86IOMMUState *iommu = x86_iommu_get_default();
3553 if (iommu) {
3554 x86_iommu_iec_register_notifier(iommu,
3555 kvm_update_msi_routes_all,
3556 NULL);
3558 notify_list_inited = true;
3560 return 0;
3563 int kvm_arch_release_virq_post(int virq)
3565 MSIRouteEntry *entry, *next;
3566 QLIST_FOREACH_SAFE(entry, &msi_route_list, list, next) {
3567 if (entry->virq == virq) {
3568 trace_kvm_x86_remove_msi_route(virq);
3569 QLIST_REMOVE(entry, list);
3570 break;
3573 return 0;
3576 int kvm_arch_msi_data_to_gsi(uint32_t data)
3578 abort();