i386/kvm: add support for Hyper-V IPI send
[qemu/kevin.git] / target / i386 / kvm.c
blob4e62b5c39b804852accf494c5fae5ef2463e3f45
1 /*
2 * QEMU KVM support
4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "qapi/error.h"
17 #include <sys/ioctl.h>
18 #include <sys/utsname.h>
20 #include <linux/kvm.h>
21 #include "standard-headers/asm-x86/kvm_para.h"
23 #include "qemu-common.h"
24 #include "cpu.h"
25 #include "sysemu/sysemu.h"
26 #include "sysemu/hw_accel.h"
27 #include "sysemu/kvm_int.h"
28 #include "kvm_i386.h"
29 #include "hyperv.h"
30 #include "hyperv-proto.h"
32 #include "exec/gdbstub.h"
33 #include "qemu/host-utils.h"
34 #include "qemu/config-file.h"
35 #include "qemu/error-report.h"
36 #include "hw/i386/pc.h"
37 #include "hw/i386/apic.h"
38 #include "hw/i386/apic_internal.h"
39 #include "hw/i386/apic-msidef.h"
40 #include "hw/i386/intel_iommu.h"
41 #include "hw/i386/x86-iommu.h"
43 #include "hw/pci/pci.h"
44 #include "hw/pci/msi.h"
45 #include "hw/pci/msix.h"
46 #include "migration/blocker.h"
47 #include "exec/memattrs.h"
48 #include "trace.h"
50 //#define DEBUG_KVM
52 #ifdef DEBUG_KVM
53 #define DPRINTF(fmt, ...) \
54 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
55 #else
56 #define DPRINTF(fmt, ...) \
57 do { } while (0)
58 #endif
60 #define MSR_KVM_WALL_CLOCK 0x11
61 #define MSR_KVM_SYSTEM_TIME 0x12
63 /* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
64 * 255 kvm_msr_entry structs */
65 #define MSR_BUF_SIZE 4096
67 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
68 KVM_CAP_INFO(SET_TSS_ADDR),
69 KVM_CAP_INFO(EXT_CPUID),
70 KVM_CAP_INFO(MP_STATE),
71 KVM_CAP_LAST_INFO
74 static bool has_msr_star;
75 static bool has_msr_hsave_pa;
76 static bool has_msr_tsc_aux;
77 static bool has_msr_tsc_adjust;
78 static bool has_msr_tsc_deadline;
79 static bool has_msr_feature_control;
80 static bool has_msr_misc_enable;
81 static bool has_msr_smbase;
82 static bool has_msr_bndcfgs;
83 static int lm_capable_kernel;
84 static bool has_msr_hv_hypercall;
85 static bool has_msr_hv_crash;
86 static bool has_msr_hv_reset;
87 static bool has_msr_hv_vpindex;
88 static bool hv_vpindex_settable;
89 static bool has_msr_hv_runtime;
90 static bool has_msr_hv_synic;
91 static bool has_msr_hv_stimer;
92 static bool has_msr_hv_frequencies;
93 static bool has_msr_hv_reenlightenment;
94 static bool has_msr_xss;
95 static bool has_msr_spec_ctrl;
96 static bool has_msr_virt_ssbd;
97 static bool has_msr_smi_count;
99 static uint32_t has_architectural_pmu_version;
100 static uint32_t num_architectural_pmu_gp_counters;
101 static uint32_t num_architectural_pmu_fixed_counters;
103 static int has_xsave;
104 static int has_xcrs;
105 static int has_pit_state2;
107 static bool has_msr_mcg_ext_ctl;
109 static struct kvm_cpuid2 *cpuid_cache;
111 int kvm_has_pit_state2(void)
113 return has_pit_state2;
116 bool kvm_has_smm(void)
118 return kvm_check_extension(kvm_state, KVM_CAP_X86_SMM);
121 bool kvm_has_adjust_clock_stable(void)
123 int ret = kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK);
125 return (ret == KVM_CLOCK_TSC_STABLE);
128 bool kvm_allows_irq0_override(void)
130 return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
133 static bool kvm_x2apic_api_set_flags(uint64_t flags)
135 KVMState *s = KVM_STATE(current_machine->accelerator);
137 return !kvm_vm_enable_cap(s, KVM_CAP_X2APIC_API, 0, flags);
140 #define MEMORIZE(fn, _result) \
141 ({ \
142 static bool _memorized; \
144 if (_memorized) { \
145 return _result; \
147 _memorized = true; \
148 _result = fn; \
151 static bool has_x2apic_api;
153 bool kvm_has_x2apic_api(void)
155 return has_x2apic_api;
158 bool kvm_enable_x2apic(void)
160 return MEMORIZE(
161 kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS |
162 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK),
163 has_x2apic_api);
166 bool kvm_hv_vpindex_settable(void)
168 return hv_vpindex_settable;
171 static int kvm_get_tsc(CPUState *cs)
173 X86CPU *cpu = X86_CPU(cs);
174 CPUX86State *env = &cpu->env;
175 struct {
176 struct kvm_msrs info;
177 struct kvm_msr_entry entries[1];
178 } msr_data;
179 int ret;
181 if (env->tsc_valid) {
182 return 0;
185 msr_data.info.nmsrs = 1;
186 msr_data.entries[0].index = MSR_IA32_TSC;
187 env->tsc_valid = !runstate_is_running();
189 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
190 if (ret < 0) {
191 return ret;
194 assert(ret == 1);
195 env->tsc = msr_data.entries[0].data;
196 return 0;
199 static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg)
201 kvm_get_tsc(cpu);
204 void kvm_synchronize_all_tsc(void)
206 CPUState *cpu;
208 if (kvm_enabled()) {
209 CPU_FOREACH(cpu) {
210 run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL);
215 static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
217 struct kvm_cpuid2 *cpuid;
218 int r, size;
220 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
221 cpuid = g_malloc0(size);
222 cpuid->nent = max;
223 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
224 if (r == 0 && cpuid->nent >= max) {
225 r = -E2BIG;
227 if (r < 0) {
228 if (r == -E2BIG) {
229 g_free(cpuid);
230 return NULL;
231 } else {
232 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
233 strerror(-r));
234 exit(1);
237 return cpuid;
240 /* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
241 * for all entries.
243 static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s)
245 struct kvm_cpuid2 *cpuid;
246 int max = 1;
248 if (cpuid_cache != NULL) {
249 return cpuid_cache;
251 while ((cpuid = try_get_cpuid(s, max)) == NULL) {
252 max *= 2;
254 cpuid_cache = cpuid;
255 return cpuid;
258 static const struct kvm_para_features {
259 int cap;
260 int feature;
261 } para_features[] = {
262 { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
263 { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
264 { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
265 { KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF },
268 static int get_para_features(KVMState *s)
270 int i, features = 0;
272 for (i = 0; i < ARRAY_SIZE(para_features); i++) {
273 if (kvm_check_extension(s, para_features[i].cap)) {
274 features |= (1 << para_features[i].feature);
278 return features;
281 static bool host_tsx_blacklisted(void)
283 int family, model, stepping;\
284 char vendor[CPUID_VENDOR_SZ + 1];
286 host_vendor_fms(vendor, &family, &model, &stepping);
288 /* Check if we are running on a Haswell host known to have broken TSX */
289 return !strcmp(vendor, CPUID_VENDOR_INTEL) &&
290 (family == 6) &&
291 ((model == 63 && stepping < 4) ||
292 model == 60 || model == 69 || model == 70);
295 /* Returns the value for a specific register on the cpuid entry
297 static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
299 uint32_t ret = 0;
300 switch (reg) {
301 case R_EAX:
302 ret = entry->eax;
303 break;
304 case R_EBX:
305 ret = entry->ebx;
306 break;
307 case R_ECX:
308 ret = entry->ecx;
309 break;
310 case R_EDX:
311 ret = entry->edx;
312 break;
314 return ret;
317 /* Find matching entry for function/index on kvm_cpuid2 struct
319 static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
320 uint32_t function,
321 uint32_t index)
323 int i;
324 for (i = 0; i < cpuid->nent; ++i) {
325 if (cpuid->entries[i].function == function &&
326 cpuid->entries[i].index == index) {
327 return &cpuid->entries[i];
330 /* not found: */
331 return NULL;
334 uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
335 uint32_t index, int reg)
337 struct kvm_cpuid2 *cpuid;
338 uint32_t ret = 0;
339 uint32_t cpuid_1_edx;
340 bool found = false;
342 cpuid = get_supported_cpuid(s);
344 struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index);
345 if (entry) {
346 found = true;
347 ret = cpuid_entry_get_reg(entry, reg);
350 /* Fixups for the data returned by KVM, below */
352 if (function == 1 && reg == R_EDX) {
353 /* KVM before 2.6.30 misreports the following features */
354 ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
355 } else if (function == 1 && reg == R_ECX) {
356 /* We can set the hypervisor flag, even if KVM does not return it on
357 * GET_SUPPORTED_CPUID
359 ret |= CPUID_EXT_HYPERVISOR;
360 /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
361 * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
362 * and the irqchip is in the kernel.
364 if (kvm_irqchip_in_kernel() &&
365 kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
366 ret |= CPUID_EXT_TSC_DEADLINE_TIMER;
369 /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
370 * without the in-kernel irqchip
372 if (!kvm_irqchip_in_kernel()) {
373 ret &= ~CPUID_EXT_X2APIC;
376 if (enable_cpu_pm) {
377 int disable_exits = kvm_check_extension(s,
378 KVM_CAP_X86_DISABLE_EXITS);
380 if (disable_exits & KVM_X86_DISABLE_EXITS_MWAIT) {
381 ret |= CPUID_EXT_MONITOR;
384 } else if (function == 6 && reg == R_EAX) {
385 ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */
386 } else if (function == 7 && index == 0 && reg == R_EBX) {
387 if (host_tsx_blacklisted()) {
388 ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE);
390 } else if (function == 0x80000001 && reg == R_ECX) {
392 * It's safe to enable TOPOEXT even if it's not returned by
393 * GET_SUPPORTED_CPUID. Unconditionally enabling TOPOEXT here allows
394 * us to keep CPU models including TOPOEXT runnable on older kernels.
396 ret |= CPUID_EXT3_TOPOEXT;
397 } else if (function == 0x80000001 && reg == R_EDX) {
398 /* On Intel, kvm returns cpuid according to the Intel spec,
399 * so add missing bits according to the AMD spec:
401 cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
402 ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
403 } else if (function == KVM_CPUID_FEATURES && reg == R_EAX) {
404 /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't
405 * be enabled without the in-kernel irqchip
407 if (!kvm_irqchip_in_kernel()) {
408 ret &= ~(1U << KVM_FEATURE_PV_UNHALT);
410 } else if (function == KVM_CPUID_FEATURES && reg == R_EDX) {
411 ret |= 1U << KVM_HINTS_REALTIME;
412 found = 1;
415 /* fallback for older kernels */
416 if ((function == KVM_CPUID_FEATURES) && !found) {
417 ret = get_para_features(s);
420 return ret;
423 typedef struct HWPoisonPage {
424 ram_addr_t ram_addr;
425 QLIST_ENTRY(HWPoisonPage) list;
426 } HWPoisonPage;
428 static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
429 QLIST_HEAD_INITIALIZER(hwpoison_page_list);
431 static void kvm_unpoison_all(void *param)
433 HWPoisonPage *page, *next_page;
435 QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
436 QLIST_REMOVE(page, list);
437 qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
438 g_free(page);
442 static void kvm_hwpoison_page_add(ram_addr_t ram_addr)
444 HWPoisonPage *page;
446 QLIST_FOREACH(page, &hwpoison_page_list, list) {
447 if (page->ram_addr == ram_addr) {
448 return;
451 page = g_new(HWPoisonPage, 1);
452 page->ram_addr = ram_addr;
453 QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
456 static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
457 int *max_banks)
459 int r;
461 r = kvm_check_extension(s, KVM_CAP_MCE);
462 if (r > 0) {
463 *max_banks = r;
464 return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
466 return -ENOSYS;
469 static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
471 CPUState *cs = CPU(cpu);
472 CPUX86State *env = &cpu->env;
473 uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
474 MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
475 uint64_t mcg_status = MCG_STATUS_MCIP;
476 int flags = 0;
478 if (code == BUS_MCEERR_AR) {
479 status |= MCI_STATUS_AR | 0x134;
480 mcg_status |= MCG_STATUS_EIPV;
481 } else {
482 status |= 0xc0;
483 mcg_status |= MCG_STATUS_RIPV;
486 flags = cpu_x86_support_mca_broadcast(env) ? MCE_INJECT_BROADCAST : 0;
487 /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the
488 * guest kernel back into env->mcg_ext_ctl.
490 cpu_synchronize_state(cs);
491 if (env->mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN) {
492 mcg_status |= MCG_STATUS_LMCE;
493 flags = 0;
496 cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
497 (MCM_ADDR_PHYS << 6) | 0xc, flags);
500 static void hardware_memory_error(void)
502 fprintf(stderr, "Hardware memory error!\n");
503 exit(1);
506 void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
508 X86CPU *cpu = X86_CPU(c);
509 CPUX86State *env = &cpu->env;
510 ram_addr_t ram_addr;
511 hwaddr paddr;
513 /* If we get an action required MCE, it has been injected by KVM
514 * while the VM was running. An action optional MCE instead should
515 * be coming from the main thread, which qemu_init_sigbus identifies
516 * as the "early kill" thread.
518 assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
520 if ((env->mcg_cap & MCG_SER_P) && addr) {
521 ram_addr = qemu_ram_addr_from_host(addr);
522 if (ram_addr != RAM_ADDR_INVALID &&
523 kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
524 kvm_hwpoison_page_add(ram_addr);
525 kvm_mce_inject(cpu, paddr, code);
526 return;
529 fprintf(stderr, "Hardware memory error for memory used by "
530 "QEMU itself instead of guest system!\n");
533 if (code == BUS_MCEERR_AR) {
534 hardware_memory_error();
537 /* Hope we are lucky for AO MCE */
540 static int kvm_inject_mce_oldstyle(X86CPU *cpu)
542 CPUX86State *env = &cpu->env;
544 if (!kvm_has_vcpu_events() && env->exception_injected == EXCP12_MCHK) {
545 unsigned int bank, bank_num = env->mcg_cap & 0xff;
546 struct kvm_x86_mce mce;
548 env->exception_injected = -1;
551 * There must be at least one bank in use if an MCE is pending.
552 * Find it and use its values for the event injection.
554 for (bank = 0; bank < bank_num; bank++) {
555 if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
556 break;
559 assert(bank < bank_num);
561 mce.bank = bank;
562 mce.status = env->mce_banks[bank * 4 + 1];
563 mce.mcg_status = env->mcg_status;
564 mce.addr = env->mce_banks[bank * 4 + 2];
565 mce.misc = env->mce_banks[bank * 4 + 3];
567 return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce);
569 return 0;
572 static void cpu_update_state(void *opaque, int running, RunState state)
574 CPUX86State *env = opaque;
576 if (running) {
577 env->tsc_valid = false;
581 unsigned long kvm_arch_vcpu_id(CPUState *cs)
583 X86CPU *cpu = X86_CPU(cs);
584 return cpu->apic_id;
587 #ifndef KVM_CPUID_SIGNATURE_NEXT
588 #define KVM_CPUID_SIGNATURE_NEXT 0x40000100
589 #endif
591 static bool hyperv_hypercall_available(X86CPU *cpu)
593 return cpu->hyperv_vapic ||
594 (cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY);
597 static bool hyperv_enabled(X86CPU *cpu)
599 CPUState *cs = CPU(cpu);
600 return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 &&
601 (hyperv_hypercall_available(cpu) ||
602 cpu->hyperv_time ||
603 cpu->hyperv_relaxed_timing ||
604 cpu->hyperv_crash ||
605 cpu->hyperv_reset ||
606 cpu->hyperv_vpindex ||
607 cpu->hyperv_runtime ||
608 cpu->hyperv_synic ||
609 cpu->hyperv_stimer ||
610 cpu->hyperv_reenlightenment ||
611 cpu->hyperv_tlbflush ||
612 cpu->hyperv_ipi);
615 static int kvm_arch_set_tsc_khz(CPUState *cs)
617 X86CPU *cpu = X86_CPU(cs);
618 CPUX86State *env = &cpu->env;
619 int r;
621 if (!env->tsc_khz) {
622 return 0;
625 r = kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL) ?
626 kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) :
627 -ENOTSUP;
628 if (r < 0) {
629 /* When KVM_SET_TSC_KHZ fails, it's an error only if the current
630 * TSC frequency doesn't match the one we want.
632 int cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
633 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
634 -ENOTSUP;
635 if (cur_freq <= 0 || cur_freq != env->tsc_khz) {
636 warn_report("TSC frequency mismatch between "
637 "VM (%" PRId64 " kHz) and host (%d kHz), "
638 "and TSC scaling unavailable",
639 env->tsc_khz, cur_freq);
640 return r;
644 return 0;
647 static bool tsc_is_stable_and_known(CPUX86State *env)
649 if (!env->tsc_khz) {
650 return false;
652 return (env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC)
653 || env->user_tsc_khz;
656 static int hyperv_handle_properties(CPUState *cs)
658 X86CPU *cpu = X86_CPU(cs);
659 CPUX86State *env = &cpu->env;
661 if (cpu->hyperv_relaxed_timing) {
662 env->features[FEAT_HYPERV_EAX] |= HV_HYPERCALL_AVAILABLE;
664 if (cpu->hyperv_vapic) {
665 env->features[FEAT_HYPERV_EAX] |= HV_HYPERCALL_AVAILABLE;
666 env->features[FEAT_HYPERV_EAX] |= HV_APIC_ACCESS_AVAILABLE;
668 if (cpu->hyperv_time) {
669 if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) <= 0) {
670 fprintf(stderr, "Hyper-V clocksources "
671 "(requested by 'hv-time' cpu flag) "
672 "are not supported by kernel\n");
673 return -ENOSYS;
675 env->features[FEAT_HYPERV_EAX] |= HV_HYPERCALL_AVAILABLE;
676 env->features[FEAT_HYPERV_EAX] |= HV_TIME_REF_COUNT_AVAILABLE;
677 env->features[FEAT_HYPERV_EAX] |= HV_REFERENCE_TSC_AVAILABLE;
679 if (cpu->hyperv_frequencies) {
680 if (!has_msr_hv_frequencies) {
681 fprintf(stderr, "Hyper-V frequency MSRs "
682 "(requested by 'hv-frequencies' cpu flag) "
683 "are not supported by kernel\n");
684 return -ENOSYS;
686 env->features[FEAT_HYPERV_EAX] |= HV_ACCESS_FREQUENCY_MSRS;
687 env->features[FEAT_HYPERV_EDX] |= HV_FREQUENCY_MSRS_AVAILABLE;
689 if (cpu->hyperv_crash) {
690 if (!has_msr_hv_crash) {
691 fprintf(stderr, "Hyper-V crash MSRs "
692 "(requested by 'hv-crash' cpu flag) "
693 "are not supported by kernel\n");
694 return -ENOSYS;
696 env->features[FEAT_HYPERV_EDX] |= HV_GUEST_CRASH_MSR_AVAILABLE;
698 if (cpu->hyperv_reenlightenment) {
699 if (!has_msr_hv_reenlightenment) {
700 fprintf(stderr,
701 "Hyper-V Reenlightenment MSRs "
702 "(requested by 'hv-reenlightenment' cpu flag) "
703 "are not supported by kernel\n");
704 return -ENOSYS;
706 env->features[FEAT_HYPERV_EAX] |= HV_ACCESS_REENLIGHTENMENTS_CONTROL;
708 env->features[FEAT_HYPERV_EDX] |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
709 if (cpu->hyperv_reset) {
710 if (!has_msr_hv_reset) {
711 fprintf(stderr, "Hyper-V reset MSR "
712 "(requested by 'hv-reset' cpu flag) "
713 "is not supported by kernel\n");
714 return -ENOSYS;
716 env->features[FEAT_HYPERV_EAX] |= HV_RESET_AVAILABLE;
718 if (cpu->hyperv_vpindex) {
719 if (!has_msr_hv_vpindex) {
720 fprintf(stderr, "Hyper-V VP_INDEX MSR "
721 "(requested by 'hv-vpindex' cpu flag) "
722 "is not supported by kernel\n");
723 return -ENOSYS;
725 env->features[FEAT_HYPERV_EAX] |= HV_VP_INDEX_AVAILABLE;
727 if (cpu->hyperv_runtime) {
728 if (!has_msr_hv_runtime) {
729 fprintf(stderr, "Hyper-V VP_RUNTIME MSR "
730 "(requested by 'hv-runtime' cpu flag) "
731 "is not supported by kernel\n");
732 return -ENOSYS;
734 env->features[FEAT_HYPERV_EAX] |= HV_VP_RUNTIME_AVAILABLE;
736 if (cpu->hyperv_synic) {
737 if (!has_msr_hv_synic ||
738 kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_SYNIC, 0)) {
739 fprintf(stderr, "Hyper-V SynIC is not supported by kernel\n");
740 return -ENOSYS;
743 env->features[FEAT_HYPERV_EAX] |= HV_SYNIC_AVAILABLE;
745 if (cpu->hyperv_stimer) {
746 if (!has_msr_hv_stimer) {
747 fprintf(stderr, "Hyper-V timers aren't supported by kernel\n");
748 return -ENOSYS;
750 env->features[FEAT_HYPERV_EAX] |= HV_SYNTIMERS_AVAILABLE;
752 return 0;
755 static int hyperv_init_vcpu(X86CPU *cpu)
757 if (cpu->hyperv_vpindex && !hv_vpindex_settable) {
759 * the kernel doesn't support setting vp_index; assert that its value
760 * is in sync
762 int ret;
763 struct {
764 struct kvm_msrs info;
765 struct kvm_msr_entry entries[1];
766 } msr_data = {
767 .info.nmsrs = 1,
768 .entries[0].index = HV_X64_MSR_VP_INDEX,
771 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
772 if (ret < 0) {
773 return ret;
775 assert(ret == 1);
777 if (msr_data.entries[0].data != hyperv_vp_index(cpu)) {
778 error_report("kernel's vp_index != QEMU's vp_index");
779 return -ENXIO;
783 return 0;
786 static Error *invtsc_mig_blocker;
788 #define KVM_MAX_CPUID_ENTRIES 100
790 int kvm_arch_init_vcpu(CPUState *cs)
792 struct {
793 struct kvm_cpuid2 cpuid;
794 struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
795 } QEMU_PACKED cpuid_data;
796 X86CPU *cpu = X86_CPU(cs);
797 CPUX86State *env = &cpu->env;
798 uint32_t limit, i, j, cpuid_i;
799 uint32_t unused;
800 struct kvm_cpuid_entry2 *c;
801 uint32_t signature[3];
802 int kvm_base = KVM_CPUID_SIGNATURE;
803 int r;
804 Error *local_err = NULL;
806 memset(&cpuid_data, 0, sizeof(cpuid_data));
808 cpuid_i = 0;
810 r = kvm_arch_set_tsc_khz(cs);
811 if (r < 0) {
812 goto fail;
815 /* vcpu's TSC frequency is either specified by user, or following
816 * the value used by KVM if the former is not present. In the
817 * latter case, we query it from KVM and record in env->tsc_khz,
818 * so that vcpu's TSC frequency can be migrated later via this field.
820 if (!env->tsc_khz) {
821 r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
822 kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
823 -ENOTSUP;
824 if (r > 0) {
825 env->tsc_khz = r;
829 /* Paravirtualization CPUIDs */
830 if (hyperv_enabled(cpu)) {
831 c = &cpuid_data.entries[cpuid_i++];
832 c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
833 if (!cpu->hyperv_vendor_id) {
834 memcpy(signature, "Microsoft Hv", 12);
835 } else {
836 size_t len = strlen(cpu->hyperv_vendor_id);
838 if (len > 12) {
839 error_report("hv-vendor-id truncated to 12 characters");
840 len = 12;
842 memset(signature, 0, 12);
843 memcpy(signature, cpu->hyperv_vendor_id, len);
845 c->eax = HV_CPUID_MIN;
846 c->ebx = signature[0];
847 c->ecx = signature[1];
848 c->edx = signature[2];
850 c = &cpuid_data.entries[cpuid_i++];
851 c->function = HV_CPUID_INTERFACE;
852 memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
853 c->eax = signature[0];
854 c->ebx = 0;
855 c->ecx = 0;
856 c->edx = 0;
858 c = &cpuid_data.entries[cpuid_i++];
859 c->function = HV_CPUID_VERSION;
860 c->eax = 0x00001bbc;
861 c->ebx = 0x00060001;
863 c = &cpuid_data.entries[cpuid_i++];
864 c->function = HV_CPUID_FEATURES;
865 r = hyperv_handle_properties(cs);
866 if (r) {
867 return r;
869 c->eax = env->features[FEAT_HYPERV_EAX];
870 c->ebx = env->features[FEAT_HYPERV_EBX];
871 c->edx = env->features[FEAT_HYPERV_EDX];
873 c = &cpuid_data.entries[cpuid_i++];
874 c->function = HV_CPUID_ENLIGHTMENT_INFO;
875 if (cpu->hyperv_relaxed_timing) {
876 c->eax |= HV_RELAXED_TIMING_RECOMMENDED;
878 if (cpu->hyperv_vapic) {
879 c->eax |= HV_APIC_ACCESS_RECOMMENDED;
881 if (cpu->hyperv_tlbflush) {
882 if (kvm_check_extension(cs->kvm_state,
883 KVM_CAP_HYPERV_TLBFLUSH) <= 0) {
884 fprintf(stderr, "Hyper-V TLB flush support "
885 "(requested by 'hv-tlbflush' cpu flag) "
886 " is not supported by kernel\n");
887 return -ENOSYS;
889 c->eax |= HV_REMOTE_TLB_FLUSH_RECOMMENDED;
890 c->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
892 if (cpu->hyperv_ipi) {
893 if (kvm_check_extension(cs->kvm_state,
894 KVM_CAP_HYPERV_SEND_IPI) <= 0) {
895 fprintf(stderr, "Hyper-V IPI send support "
896 "(requested by 'hv-ipi' cpu flag) "
897 " is not supported by kernel\n");
898 return -ENOSYS;
900 c->eax |= HV_CLUSTER_IPI_RECOMMENDED;
901 c->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
904 c->ebx = cpu->hyperv_spinlock_attempts;
906 c = &cpuid_data.entries[cpuid_i++];
907 c->function = HV_CPUID_IMPLEMENT_LIMITS;
909 c->eax = cpu->hv_max_vps;
910 c->ebx = 0x40;
912 kvm_base = KVM_CPUID_SIGNATURE_NEXT;
913 has_msr_hv_hypercall = true;
916 if (cpu->expose_kvm) {
917 memcpy(signature, "KVMKVMKVM\0\0\0", 12);
918 c = &cpuid_data.entries[cpuid_i++];
919 c->function = KVM_CPUID_SIGNATURE | kvm_base;
920 c->eax = KVM_CPUID_FEATURES | kvm_base;
921 c->ebx = signature[0];
922 c->ecx = signature[1];
923 c->edx = signature[2];
925 c = &cpuid_data.entries[cpuid_i++];
926 c->function = KVM_CPUID_FEATURES | kvm_base;
927 c->eax = env->features[FEAT_KVM];
928 c->edx = env->features[FEAT_KVM_HINTS];
931 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
933 for (i = 0; i <= limit; i++) {
934 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
935 fprintf(stderr, "unsupported level value: 0x%x\n", limit);
936 abort();
938 c = &cpuid_data.entries[cpuid_i++];
940 switch (i) {
941 case 2: {
942 /* Keep reading function 2 till all the input is received */
943 int times;
945 c->function = i;
946 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
947 KVM_CPUID_FLAG_STATE_READ_NEXT;
948 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
949 times = c->eax & 0xff;
951 for (j = 1; j < times; ++j) {
952 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
953 fprintf(stderr, "cpuid_data is full, no space for "
954 "cpuid(eax:2):eax & 0xf = 0x%x\n", times);
955 abort();
957 c = &cpuid_data.entries[cpuid_i++];
958 c->function = i;
959 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
960 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
962 break;
964 case 4:
965 case 0xb:
966 case 0xd:
967 for (j = 0; ; j++) {
968 if (i == 0xd && j == 64) {
969 break;
971 c->function = i;
972 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
973 c->index = j;
974 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
976 if (i == 4 && c->eax == 0) {
977 break;
979 if (i == 0xb && !(c->ecx & 0xff00)) {
980 break;
982 if (i == 0xd && c->eax == 0) {
983 continue;
985 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
986 fprintf(stderr, "cpuid_data is full, no space for "
987 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
988 abort();
990 c = &cpuid_data.entries[cpuid_i++];
992 break;
993 case 0x14: {
994 uint32_t times;
996 c->function = i;
997 c->index = 0;
998 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
999 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1000 times = c->eax;
1002 for (j = 1; j <= times; ++j) {
1003 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1004 fprintf(stderr, "cpuid_data is full, no space for "
1005 "cpuid(eax:0x14,ecx:0x%x)\n", j);
1006 abort();
1008 c = &cpuid_data.entries[cpuid_i++];
1009 c->function = i;
1010 c->index = j;
1011 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1012 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1014 break;
1016 default:
1017 c->function = i;
1018 c->flags = 0;
1019 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1020 break;
1024 if (limit >= 0x0a) {
1025 uint32_t eax, edx;
1027 cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx);
1029 has_architectural_pmu_version = eax & 0xff;
1030 if (has_architectural_pmu_version > 0) {
1031 num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8;
1033 /* Shouldn't be more than 32, since that's the number of bits
1034 * available in EBX to tell us _which_ counters are available.
1035 * Play it safe.
1037 if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) {
1038 num_architectural_pmu_gp_counters = MAX_GP_COUNTERS;
1041 if (has_architectural_pmu_version > 1) {
1042 num_architectural_pmu_fixed_counters = edx & 0x1f;
1044 if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
1045 num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS;
1051 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
1053 for (i = 0x80000000; i <= limit; i++) {
1054 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1055 fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
1056 abort();
1058 c = &cpuid_data.entries[cpuid_i++];
1060 switch (i) {
1061 case 0x8000001d:
1062 /* Query for all AMD cache information leaves */
1063 for (j = 0; ; j++) {
1064 c->function = i;
1065 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1066 c->index = j;
1067 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1069 if (c->eax == 0) {
1070 break;
1072 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1073 fprintf(stderr, "cpuid_data is full, no space for "
1074 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
1075 abort();
1077 c = &cpuid_data.entries[cpuid_i++];
1079 break;
1080 default:
1081 c->function = i;
1082 c->flags = 0;
1083 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1084 break;
1088 /* Call Centaur's CPUID instructions they are supported. */
1089 if (env->cpuid_xlevel2 > 0) {
1090 cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
1092 for (i = 0xC0000000; i <= limit; i++) {
1093 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1094 fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
1095 abort();
1097 c = &cpuid_data.entries[cpuid_i++];
1099 c->function = i;
1100 c->flags = 0;
1101 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1105 cpuid_data.cpuid.nent = cpuid_i;
1107 if (((env->cpuid_version >> 8)&0xF) >= 6
1108 && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
1109 (CPUID_MCE | CPUID_MCA)
1110 && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) {
1111 uint64_t mcg_cap, unsupported_caps;
1112 int banks;
1113 int ret;
1115 ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks);
1116 if (ret < 0) {
1117 fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
1118 return ret;
1121 if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) {
1122 error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
1123 (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks);
1124 return -ENOTSUP;
1127 unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK);
1128 if (unsupported_caps) {
1129 if (unsupported_caps & MCG_LMCE_P) {
1130 error_report("kvm: LMCE not supported");
1131 return -ENOTSUP;
1133 warn_report("Unsupported MCG_CAP bits: 0x%" PRIx64,
1134 unsupported_caps);
1137 env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK;
1138 ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap);
1139 if (ret < 0) {
1140 fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
1141 return ret;
1145 qemu_add_vm_change_state_handler(cpu_update_state, env);
1147 c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0);
1148 if (c) {
1149 has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) ||
1150 !!(c->ecx & CPUID_EXT_SMX);
1153 if (env->mcg_cap & MCG_LMCE_P) {
1154 has_msr_mcg_ext_ctl = has_msr_feature_control = true;
1157 if (!env->user_tsc_khz) {
1158 if ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) &&
1159 invtsc_mig_blocker == NULL) {
1160 /* for migration */
1161 error_setg(&invtsc_mig_blocker,
1162 "State blocked by non-migratable CPU device"
1163 " (invtsc flag)");
1164 r = migrate_add_blocker(invtsc_mig_blocker, &local_err);
1165 if (local_err) {
1166 error_report_err(local_err);
1167 error_free(invtsc_mig_blocker);
1168 goto fail;
1170 /* for savevm */
1171 vmstate_x86_cpu.unmigratable = 1;
1175 if (cpu->vmware_cpuid_freq
1176 /* Guests depend on 0x40000000 to detect this feature, so only expose
1177 * it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */
1178 && cpu->expose_kvm
1179 && kvm_base == KVM_CPUID_SIGNATURE
1180 /* TSC clock must be stable and known for this feature. */
1181 && tsc_is_stable_and_known(env)) {
1183 c = &cpuid_data.entries[cpuid_i++];
1184 c->function = KVM_CPUID_SIGNATURE | 0x10;
1185 c->eax = env->tsc_khz;
1186 /* LAPIC resolution of 1ns (freq: 1GHz) is hardcoded in KVM's
1187 * APIC_BUS_CYCLE_NS */
1188 c->ebx = 1000000;
1189 c->ecx = c->edx = 0;
1191 c = cpuid_find_entry(&cpuid_data.cpuid, kvm_base, 0);
1192 c->eax = MAX(c->eax, KVM_CPUID_SIGNATURE | 0x10);
1195 cpuid_data.cpuid.nent = cpuid_i;
1197 cpuid_data.cpuid.padding = 0;
1198 r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
1199 if (r) {
1200 goto fail;
1203 if (has_xsave) {
1204 env->xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
1206 cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE);
1208 if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
1209 has_msr_tsc_aux = false;
1212 r = hyperv_init_vcpu(cpu);
1213 if (r) {
1214 goto fail;
1217 return 0;
1219 fail:
1220 migrate_del_blocker(invtsc_mig_blocker);
1221 return r;
1224 void kvm_arch_reset_vcpu(X86CPU *cpu)
1226 CPUX86State *env = &cpu->env;
1228 env->xcr0 = 1;
1229 if (kvm_irqchip_in_kernel()) {
1230 env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
1231 KVM_MP_STATE_UNINITIALIZED;
1232 } else {
1233 env->mp_state = KVM_MP_STATE_RUNNABLE;
1236 if (cpu->hyperv_synic) {
1237 int i;
1238 for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
1239 env->msr_hv_synic_sint[i] = HV_SINT_MASKED;
1244 void kvm_arch_do_init_vcpu(X86CPU *cpu)
1246 CPUX86State *env = &cpu->env;
1248 /* APs get directly into wait-for-SIPI state. */
1249 if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) {
1250 env->mp_state = KVM_MP_STATE_INIT_RECEIVED;
1254 static int kvm_get_supported_msrs(KVMState *s)
1256 static int kvm_supported_msrs;
1257 int ret = 0;
1259 /* first time */
1260 if (kvm_supported_msrs == 0) {
1261 struct kvm_msr_list msr_list, *kvm_msr_list;
1263 kvm_supported_msrs = -1;
1265 /* Obtain MSR list from KVM. These are the MSRs that we must
1266 * save/restore */
1267 msr_list.nmsrs = 0;
1268 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
1269 if (ret < 0 && ret != -E2BIG) {
1270 return ret;
1272 /* Old kernel modules had a bug and could write beyond the provided
1273 memory. Allocate at least a safe amount of 1K. */
1274 kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
1275 msr_list.nmsrs *
1276 sizeof(msr_list.indices[0])));
1278 kvm_msr_list->nmsrs = msr_list.nmsrs;
1279 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
1280 if (ret >= 0) {
1281 int i;
1283 for (i = 0; i < kvm_msr_list->nmsrs; i++) {
1284 switch (kvm_msr_list->indices[i]) {
1285 case MSR_STAR:
1286 has_msr_star = true;
1287 break;
1288 case MSR_VM_HSAVE_PA:
1289 has_msr_hsave_pa = true;
1290 break;
1291 case MSR_TSC_AUX:
1292 has_msr_tsc_aux = true;
1293 break;
1294 case MSR_TSC_ADJUST:
1295 has_msr_tsc_adjust = true;
1296 break;
1297 case MSR_IA32_TSCDEADLINE:
1298 has_msr_tsc_deadline = true;
1299 break;
1300 case MSR_IA32_SMBASE:
1301 has_msr_smbase = true;
1302 break;
1303 case MSR_SMI_COUNT:
1304 has_msr_smi_count = true;
1305 break;
1306 case MSR_IA32_MISC_ENABLE:
1307 has_msr_misc_enable = true;
1308 break;
1309 case MSR_IA32_BNDCFGS:
1310 has_msr_bndcfgs = true;
1311 break;
1312 case MSR_IA32_XSS:
1313 has_msr_xss = true;
1314 break;
1315 case HV_X64_MSR_CRASH_CTL:
1316 has_msr_hv_crash = true;
1317 break;
1318 case HV_X64_MSR_RESET:
1319 has_msr_hv_reset = true;
1320 break;
1321 case HV_X64_MSR_VP_INDEX:
1322 has_msr_hv_vpindex = true;
1323 break;
1324 case HV_X64_MSR_VP_RUNTIME:
1325 has_msr_hv_runtime = true;
1326 break;
1327 case HV_X64_MSR_SCONTROL:
1328 has_msr_hv_synic = true;
1329 break;
1330 case HV_X64_MSR_STIMER0_CONFIG:
1331 has_msr_hv_stimer = true;
1332 break;
1333 case HV_X64_MSR_TSC_FREQUENCY:
1334 has_msr_hv_frequencies = true;
1335 break;
1336 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1337 has_msr_hv_reenlightenment = true;
1338 break;
1339 case MSR_IA32_SPEC_CTRL:
1340 has_msr_spec_ctrl = true;
1341 break;
1342 case MSR_VIRT_SSBD:
1343 has_msr_virt_ssbd = true;
1344 break;
1349 g_free(kvm_msr_list);
1352 return ret;
1355 static Notifier smram_machine_done;
1356 static KVMMemoryListener smram_listener;
1357 static AddressSpace smram_address_space;
1358 static MemoryRegion smram_as_root;
1359 static MemoryRegion smram_as_mem;
1361 static void register_smram_listener(Notifier *n, void *unused)
1363 MemoryRegion *smram =
1364 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
1366 /* Outer container... */
1367 memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull);
1368 memory_region_set_enabled(&smram_as_root, true);
1370 /* ... with two regions inside: normal system memory with low
1371 * priority, and...
1373 memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram",
1374 get_system_memory(), 0, ~0ull);
1375 memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0);
1376 memory_region_set_enabled(&smram_as_mem, true);
1378 if (smram) {
1379 /* ... SMRAM with higher priority */
1380 memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10);
1381 memory_region_set_enabled(smram, true);
1384 address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM");
1385 kvm_memory_listener_register(kvm_state, &smram_listener,
1386 &smram_address_space, 1);
1389 int kvm_arch_init(MachineState *ms, KVMState *s)
1391 uint64_t identity_base = 0xfffbc000;
1392 uint64_t shadow_mem;
1393 int ret;
1394 struct utsname utsname;
1396 has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
1397 has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
1398 has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
1400 hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX);
1402 ret = kvm_get_supported_msrs(s);
1403 if (ret < 0) {
1404 return ret;
1407 uname(&utsname);
1408 lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
1411 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
1412 * In order to use vm86 mode, an EPT identity map and a TSS are needed.
1413 * Since these must be part of guest physical memory, we need to allocate
1414 * them, both by setting their start addresses in the kernel and by
1415 * creating a corresponding e820 entry. We need 4 pages before the BIOS.
1417 * Older KVM versions may not support setting the identity map base. In
1418 * that case we need to stick with the default, i.e. a 256K maximum BIOS
1419 * size.
1421 if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
1422 /* Allows up to 16M BIOSes. */
1423 identity_base = 0xfeffc000;
1425 ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
1426 if (ret < 0) {
1427 return ret;
1431 /* Set TSS base one page after EPT identity map. */
1432 ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
1433 if (ret < 0) {
1434 return ret;
1437 /* Tell fw_cfg to notify the BIOS to reserve the range. */
1438 ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
1439 if (ret < 0) {
1440 fprintf(stderr, "e820_add_entry() table is full\n");
1441 return ret;
1443 qemu_register_reset(kvm_unpoison_all, NULL);
1445 shadow_mem = machine_kvm_shadow_mem(ms);
1446 if (shadow_mem != -1) {
1447 shadow_mem /= 4096;
1448 ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
1449 if (ret < 0) {
1450 return ret;
1454 if (kvm_check_extension(s, KVM_CAP_X86_SMM) &&
1455 object_dynamic_cast(OBJECT(ms), TYPE_PC_MACHINE) &&
1456 pc_machine_is_smm_enabled(PC_MACHINE(ms))) {
1457 smram_machine_done.notify = register_smram_listener;
1458 qemu_add_machine_init_done_notifier(&smram_machine_done);
1461 if (enable_cpu_pm) {
1462 int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS);
1463 int ret;
1465 /* Work around for kernel header with a typo. TODO: fix header and drop. */
1466 #if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT)
1467 #define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL
1468 #endif
1469 if (disable_exits) {
1470 disable_exits &= (KVM_X86_DISABLE_EXITS_MWAIT |
1471 KVM_X86_DISABLE_EXITS_HLT |
1472 KVM_X86_DISABLE_EXITS_PAUSE);
1475 ret = kvm_vm_enable_cap(s, KVM_CAP_X86_DISABLE_EXITS, 0,
1476 disable_exits);
1477 if (ret < 0) {
1478 error_report("kvm: guest stopping CPU not supported: %s",
1479 strerror(-ret));
1483 return 0;
1486 static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
1488 lhs->selector = rhs->selector;
1489 lhs->base = rhs->base;
1490 lhs->limit = rhs->limit;
1491 lhs->type = 3;
1492 lhs->present = 1;
1493 lhs->dpl = 3;
1494 lhs->db = 0;
1495 lhs->s = 1;
1496 lhs->l = 0;
1497 lhs->g = 0;
1498 lhs->avl = 0;
1499 lhs->unusable = 0;
1502 static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
1504 unsigned flags = rhs->flags;
1505 lhs->selector = rhs->selector;
1506 lhs->base = rhs->base;
1507 lhs->limit = rhs->limit;
1508 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
1509 lhs->present = (flags & DESC_P_MASK) != 0;
1510 lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
1511 lhs->db = (flags >> DESC_B_SHIFT) & 1;
1512 lhs->s = (flags & DESC_S_MASK) != 0;
1513 lhs->l = (flags >> DESC_L_SHIFT) & 1;
1514 lhs->g = (flags & DESC_G_MASK) != 0;
1515 lhs->avl = (flags & DESC_AVL_MASK) != 0;
1516 lhs->unusable = !lhs->present;
1517 lhs->padding = 0;
1520 static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
1522 lhs->selector = rhs->selector;
1523 lhs->base = rhs->base;
1524 lhs->limit = rhs->limit;
1525 lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
1526 ((rhs->present && !rhs->unusable) * DESC_P_MASK) |
1527 (rhs->dpl << DESC_DPL_SHIFT) |
1528 (rhs->db << DESC_B_SHIFT) |
1529 (rhs->s * DESC_S_MASK) |
1530 (rhs->l << DESC_L_SHIFT) |
1531 (rhs->g * DESC_G_MASK) |
1532 (rhs->avl * DESC_AVL_MASK);
1535 static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
1537 if (set) {
1538 *kvm_reg = *qemu_reg;
1539 } else {
1540 *qemu_reg = *kvm_reg;
1544 static int kvm_getput_regs(X86CPU *cpu, int set)
1546 CPUX86State *env = &cpu->env;
1547 struct kvm_regs regs;
1548 int ret = 0;
1550 if (!set) {
1551 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, &regs);
1552 if (ret < 0) {
1553 return ret;
1557 kvm_getput_reg(&regs.rax, &env->regs[R_EAX], set);
1558 kvm_getput_reg(&regs.rbx, &env->regs[R_EBX], set);
1559 kvm_getput_reg(&regs.rcx, &env->regs[R_ECX], set);
1560 kvm_getput_reg(&regs.rdx, &env->regs[R_EDX], set);
1561 kvm_getput_reg(&regs.rsi, &env->regs[R_ESI], set);
1562 kvm_getput_reg(&regs.rdi, &env->regs[R_EDI], set);
1563 kvm_getput_reg(&regs.rsp, &env->regs[R_ESP], set);
1564 kvm_getput_reg(&regs.rbp, &env->regs[R_EBP], set);
1565 #ifdef TARGET_X86_64
1566 kvm_getput_reg(&regs.r8, &env->regs[8], set);
1567 kvm_getput_reg(&regs.r9, &env->regs[9], set);
1568 kvm_getput_reg(&regs.r10, &env->regs[10], set);
1569 kvm_getput_reg(&regs.r11, &env->regs[11], set);
1570 kvm_getput_reg(&regs.r12, &env->regs[12], set);
1571 kvm_getput_reg(&regs.r13, &env->regs[13], set);
1572 kvm_getput_reg(&regs.r14, &env->regs[14], set);
1573 kvm_getput_reg(&regs.r15, &env->regs[15], set);
1574 #endif
1576 kvm_getput_reg(&regs.rflags, &env->eflags, set);
1577 kvm_getput_reg(&regs.rip, &env->eip, set);
1579 if (set) {
1580 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, &regs);
1583 return ret;
1586 static int kvm_put_fpu(X86CPU *cpu)
1588 CPUX86State *env = &cpu->env;
1589 struct kvm_fpu fpu;
1590 int i;
1592 memset(&fpu, 0, sizeof fpu);
1593 fpu.fsw = env->fpus & ~(7 << 11);
1594 fpu.fsw |= (env->fpstt & 7) << 11;
1595 fpu.fcw = env->fpuc;
1596 fpu.last_opcode = env->fpop;
1597 fpu.last_ip = env->fpip;
1598 fpu.last_dp = env->fpdp;
1599 for (i = 0; i < 8; ++i) {
1600 fpu.ftwx |= (!env->fptags[i]) << i;
1602 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
1603 for (i = 0; i < CPU_NB_REGS; i++) {
1604 stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0));
1605 stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1));
1607 fpu.mxcsr = env->mxcsr;
1609 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu);
1612 #define XSAVE_FCW_FSW 0
1613 #define XSAVE_FTW_FOP 1
1614 #define XSAVE_CWD_RIP 2
1615 #define XSAVE_CWD_RDP 4
1616 #define XSAVE_MXCSR 6
1617 #define XSAVE_ST_SPACE 8
1618 #define XSAVE_XMM_SPACE 40
1619 #define XSAVE_XSTATE_BV 128
1620 #define XSAVE_YMMH_SPACE 144
1621 #define XSAVE_BNDREGS 240
1622 #define XSAVE_BNDCSR 256
1623 #define XSAVE_OPMASK 272
1624 #define XSAVE_ZMM_Hi256 288
1625 #define XSAVE_Hi16_ZMM 416
1626 #define XSAVE_PKRU 672
1628 #define XSAVE_BYTE_OFFSET(word_offset) \
1629 ((word_offset) * sizeof_field(struct kvm_xsave, region[0]))
1631 #define ASSERT_OFFSET(word_offset, field) \
1632 QEMU_BUILD_BUG_ON(XSAVE_BYTE_OFFSET(word_offset) != \
1633 offsetof(X86XSaveArea, field))
1635 ASSERT_OFFSET(XSAVE_FCW_FSW, legacy.fcw);
1636 ASSERT_OFFSET(XSAVE_FTW_FOP, legacy.ftw);
1637 ASSERT_OFFSET(XSAVE_CWD_RIP, legacy.fpip);
1638 ASSERT_OFFSET(XSAVE_CWD_RDP, legacy.fpdp);
1639 ASSERT_OFFSET(XSAVE_MXCSR, legacy.mxcsr);
1640 ASSERT_OFFSET(XSAVE_ST_SPACE, legacy.fpregs);
1641 ASSERT_OFFSET(XSAVE_XMM_SPACE, legacy.xmm_regs);
1642 ASSERT_OFFSET(XSAVE_XSTATE_BV, header.xstate_bv);
1643 ASSERT_OFFSET(XSAVE_YMMH_SPACE, avx_state);
1644 ASSERT_OFFSET(XSAVE_BNDREGS, bndreg_state);
1645 ASSERT_OFFSET(XSAVE_BNDCSR, bndcsr_state);
1646 ASSERT_OFFSET(XSAVE_OPMASK, opmask_state);
1647 ASSERT_OFFSET(XSAVE_ZMM_Hi256, zmm_hi256_state);
1648 ASSERT_OFFSET(XSAVE_Hi16_ZMM, hi16_zmm_state);
1649 ASSERT_OFFSET(XSAVE_PKRU, pkru_state);
1651 static int kvm_put_xsave(X86CPU *cpu)
1653 CPUX86State *env = &cpu->env;
1654 X86XSaveArea *xsave = env->xsave_buf;
1656 if (!has_xsave) {
1657 return kvm_put_fpu(cpu);
1659 x86_cpu_xsave_all_areas(cpu, xsave);
1661 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
1664 static int kvm_put_xcrs(X86CPU *cpu)
1666 CPUX86State *env = &cpu->env;
1667 struct kvm_xcrs xcrs = {};
1669 if (!has_xcrs) {
1670 return 0;
1673 xcrs.nr_xcrs = 1;
1674 xcrs.flags = 0;
1675 xcrs.xcrs[0].xcr = 0;
1676 xcrs.xcrs[0].value = env->xcr0;
1677 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs);
1680 static int kvm_put_sregs(X86CPU *cpu)
1682 CPUX86State *env = &cpu->env;
1683 struct kvm_sregs sregs;
1685 memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
1686 if (env->interrupt_injected >= 0) {
1687 sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
1688 (uint64_t)1 << (env->interrupt_injected % 64);
1691 if ((env->eflags & VM_MASK)) {
1692 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
1693 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
1694 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
1695 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
1696 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
1697 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
1698 } else {
1699 set_seg(&sregs.cs, &env->segs[R_CS]);
1700 set_seg(&sregs.ds, &env->segs[R_DS]);
1701 set_seg(&sregs.es, &env->segs[R_ES]);
1702 set_seg(&sregs.fs, &env->segs[R_FS]);
1703 set_seg(&sregs.gs, &env->segs[R_GS]);
1704 set_seg(&sregs.ss, &env->segs[R_SS]);
1707 set_seg(&sregs.tr, &env->tr);
1708 set_seg(&sregs.ldt, &env->ldt);
1710 sregs.idt.limit = env->idt.limit;
1711 sregs.idt.base = env->idt.base;
1712 memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
1713 sregs.gdt.limit = env->gdt.limit;
1714 sregs.gdt.base = env->gdt.base;
1715 memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
1717 sregs.cr0 = env->cr[0];
1718 sregs.cr2 = env->cr[2];
1719 sregs.cr3 = env->cr[3];
1720 sregs.cr4 = env->cr[4];
1722 sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
1723 sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
1725 sregs.efer = env->efer;
1727 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
1730 static void kvm_msr_buf_reset(X86CPU *cpu)
1732 memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE);
1735 static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value)
1737 struct kvm_msrs *msrs = cpu->kvm_msr_buf;
1738 void *limit = ((void *)msrs) + MSR_BUF_SIZE;
1739 struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs];
1741 assert((void *)(entry + 1) <= limit);
1743 entry->index = index;
1744 entry->reserved = 0;
1745 entry->data = value;
1746 msrs->nmsrs++;
1749 static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value)
1751 kvm_msr_buf_reset(cpu);
1752 kvm_msr_entry_add(cpu, index, value);
1754 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
1757 void kvm_put_apicbase(X86CPU *cpu, uint64_t value)
1759 int ret;
1761 ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value);
1762 assert(ret == 1);
1765 static int kvm_put_tscdeadline_msr(X86CPU *cpu)
1767 CPUX86State *env = &cpu->env;
1768 int ret;
1770 if (!has_msr_tsc_deadline) {
1771 return 0;
1774 ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline);
1775 if (ret < 0) {
1776 return ret;
1779 assert(ret == 1);
1780 return 0;
1784 * Provide a separate write service for the feature control MSR in order to
1785 * kick the VCPU out of VMXON or even guest mode on reset. This has to be done
1786 * before writing any other state because forcibly leaving nested mode
1787 * invalidates the VCPU state.
1789 static int kvm_put_msr_feature_control(X86CPU *cpu)
1791 int ret;
1793 if (!has_msr_feature_control) {
1794 return 0;
1797 ret = kvm_put_one_msr(cpu, MSR_IA32_FEATURE_CONTROL,
1798 cpu->env.msr_ia32_feature_control);
1799 if (ret < 0) {
1800 return ret;
1803 assert(ret == 1);
1804 return 0;
1807 static int kvm_put_msrs(X86CPU *cpu, int level)
1809 CPUX86State *env = &cpu->env;
1810 int i;
1811 int ret;
1813 kvm_msr_buf_reset(cpu);
1815 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs);
1816 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
1817 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
1818 kvm_msr_entry_add(cpu, MSR_PAT, env->pat);
1819 if (has_msr_star) {
1820 kvm_msr_entry_add(cpu, MSR_STAR, env->star);
1822 if (has_msr_hsave_pa) {
1823 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave);
1825 if (has_msr_tsc_aux) {
1826 kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux);
1828 if (has_msr_tsc_adjust) {
1829 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust);
1831 if (has_msr_misc_enable) {
1832 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE,
1833 env->msr_ia32_misc_enable);
1835 if (has_msr_smbase) {
1836 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase);
1838 if (has_msr_smi_count) {
1839 kvm_msr_entry_add(cpu, MSR_SMI_COUNT, env->msr_smi_count);
1841 if (has_msr_bndcfgs) {
1842 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs);
1844 if (has_msr_xss) {
1845 kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss);
1847 if (has_msr_spec_ctrl) {
1848 kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl);
1850 if (has_msr_virt_ssbd) {
1851 kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, env->virt_ssbd);
1854 #ifdef TARGET_X86_64
1855 if (lm_capable_kernel) {
1856 kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
1857 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase);
1858 kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask);
1859 kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar);
1861 #endif
1864 * The following MSRs have side effects on the guest or are too heavy
1865 * for normal writeback. Limit them to reset or full state updates.
1867 if (level >= KVM_PUT_RESET_STATE) {
1868 kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
1869 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
1870 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
1871 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
1872 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
1874 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
1875 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
1877 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
1878 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
1880 if (has_architectural_pmu_version > 0) {
1881 if (has_architectural_pmu_version > 1) {
1882 /* Stop the counter. */
1883 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
1884 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
1887 /* Set the counter values. */
1888 for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
1889 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
1890 env->msr_fixed_counters[i]);
1892 for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
1893 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
1894 env->msr_gp_counters[i]);
1895 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
1896 env->msr_gp_evtsel[i]);
1898 if (has_architectural_pmu_version > 1) {
1899 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
1900 env->msr_global_status);
1901 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
1902 env->msr_global_ovf_ctrl);
1904 /* Now start the PMU. */
1905 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL,
1906 env->msr_fixed_ctr_ctrl);
1907 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
1908 env->msr_global_ctrl);
1912 * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add,
1913 * only sync them to KVM on the first cpu
1915 if (current_cpu == first_cpu) {
1916 if (has_msr_hv_hypercall) {
1917 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
1918 env->msr_hv_guest_os_id);
1919 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
1920 env->msr_hv_hypercall);
1922 if (cpu->hyperv_time) {
1923 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC,
1924 env->msr_hv_tsc);
1926 if (cpu->hyperv_reenlightenment) {
1927 kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL,
1928 env->msr_hv_reenlightenment_control);
1929 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL,
1930 env->msr_hv_tsc_emulation_control);
1931 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS,
1932 env->msr_hv_tsc_emulation_status);
1935 if (cpu->hyperv_vapic) {
1936 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
1937 env->msr_hv_vapic);
1939 if (has_msr_hv_crash) {
1940 int j;
1942 for (j = 0; j < HV_CRASH_PARAMS; j++)
1943 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j,
1944 env->msr_hv_crash_params[j]);
1946 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_NOTIFY);
1948 if (has_msr_hv_runtime) {
1949 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime);
1951 if (cpu->hyperv_vpindex && hv_vpindex_settable) {
1952 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_INDEX, hyperv_vp_index(cpu));
1954 if (cpu->hyperv_synic) {
1955 int j;
1957 kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, HV_SYNIC_VERSION);
1959 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL,
1960 env->msr_hv_synic_control);
1961 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP,
1962 env->msr_hv_synic_evt_page);
1963 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP,
1964 env->msr_hv_synic_msg_page);
1966 for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
1967 kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j,
1968 env->msr_hv_synic_sint[j]);
1971 if (has_msr_hv_stimer) {
1972 int j;
1974 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) {
1975 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2,
1976 env->msr_hv_stimer_config[j]);
1979 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) {
1980 kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2,
1981 env->msr_hv_stimer_count[j]);
1984 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
1985 uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits);
1987 kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype);
1988 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
1989 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
1990 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
1991 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
1992 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
1993 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
1994 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
1995 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
1996 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
1997 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
1998 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
1999 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
2000 /* The CPU GPs if we write to a bit above the physical limit of
2001 * the host CPU (and KVM emulates that)
2003 uint64_t mask = env->mtrr_var[i].mask;
2004 mask &= phys_mask;
2006 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i),
2007 env->mtrr_var[i].base);
2008 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask);
2011 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
2012 int addr_num = kvm_arch_get_supported_cpuid(kvm_state,
2013 0x14, 1, R_EAX) & 0x7;
2015 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL,
2016 env->msr_rtit_ctrl);
2017 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS,
2018 env->msr_rtit_status);
2019 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE,
2020 env->msr_rtit_output_base);
2021 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK,
2022 env->msr_rtit_output_mask);
2023 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH,
2024 env->msr_rtit_cr3_match);
2025 for (i = 0; i < addr_num; i++) {
2026 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i,
2027 env->msr_rtit_addrs[i]);
2031 /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
2032 * kvm_put_msr_feature_control. */
2034 if (env->mcg_cap) {
2035 int i;
2037 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status);
2038 kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl);
2039 if (has_msr_mcg_ext_ctl) {
2040 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl);
2042 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
2043 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]);
2047 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
2048 if (ret < 0) {
2049 return ret;
2052 if (ret < cpu->kvm_msr_buf->nmsrs) {
2053 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
2054 error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64,
2055 (uint32_t)e->index, (uint64_t)e->data);
2058 assert(ret == cpu->kvm_msr_buf->nmsrs);
2059 return 0;
2063 static int kvm_get_fpu(X86CPU *cpu)
2065 CPUX86State *env = &cpu->env;
2066 struct kvm_fpu fpu;
2067 int i, ret;
2069 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu);
2070 if (ret < 0) {
2071 return ret;
2074 env->fpstt = (fpu.fsw >> 11) & 7;
2075 env->fpus = fpu.fsw;
2076 env->fpuc = fpu.fcw;
2077 env->fpop = fpu.last_opcode;
2078 env->fpip = fpu.last_ip;
2079 env->fpdp = fpu.last_dp;
2080 for (i = 0; i < 8; ++i) {
2081 env->fptags[i] = !((fpu.ftwx >> i) & 1);
2083 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
2084 for (i = 0; i < CPU_NB_REGS; i++) {
2085 env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]);
2086 env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]);
2088 env->mxcsr = fpu.mxcsr;
2090 return 0;
2093 static int kvm_get_xsave(X86CPU *cpu)
2095 CPUX86State *env = &cpu->env;
2096 X86XSaveArea *xsave = env->xsave_buf;
2097 int ret;
2099 if (!has_xsave) {
2100 return kvm_get_fpu(cpu);
2103 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave);
2104 if (ret < 0) {
2105 return ret;
2107 x86_cpu_xrstor_all_areas(cpu, xsave);
2109 return 0;
2112 static int kvm_get_xcrs(X86CPU *cpu)
2114 CPUX86State *env = &cpu->env;
2115 int i, ret;
2116 struct kvm_xcrs xcrs;
2118 if (!has_xcrs) {
2119 return 0;
2122 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs);
2123 if (ret < 0) {
2124 return ret;
2127 for (i = 0; i < xcrs.nr_xcrs; i++) {
2128 /* Only support xcr0 now */
2129 if (xcrs.xcrs[i].xcr == 0) {
2130 env->xcr0 = xcrs.xcrs[i].value;
2131 break;
2134 return 0;
2137 static int kvm_get_sregs(X86CPU *cpu)
2139 CPUX86State *env = &cpu->env;
2140 struct kvm_sregs sregs;
2141 int bit, i, ret;
2143 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
2144 if (ret < 0) {
2145 return ret;
2148 /* There can only be one pending IRQ set in the bitmap at a time, so try
2149 to find it and save its number instead (-1 for none). */
2150 env->interrupt_injected = -1;
2151 for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
2152 if (sregs.interrupt_bitmap[i]) {
2153 bit = ctz64(sregs.interrupt_bitmap[i]);
2154 env->interrupt_injected = i * 64 + bit;
2155 break;
2159 get_seg(&env->segs[R_CS], &sregs.cs);
2160 get_seg(&env->segs[R_DS], &sregs.ds);
2161 get_seg(&env->segs[R_ES], &sregs.es);
2162 get_seg(&env->segs[R_FS], &sregs.fs);
2163 get_seg(&env->segs[R_GS], &sregs.gs);
2164 get_seg(&env->segs[R_SS], &sregs.ss);
2166 get_seg(&env->tr, &sregs.tr);
2167 get_seg(&env->ldt, &sregs.ldt);
2169 env->idt.limit = sregs.idt.limit;
2170 env->idt.base = sregs.idt.base;
2171 env->gdt.limit = sregs.gdt.limit;
2172 env->gdt.base = sregs.gdt.base;
2174 env->cr[0] = sregs.cr0;
2175 env->cr[2] = sregs.cr2;
2176 env->cr[3] = sregs.cr3;
2177 env->cr[4] = sregs.cr4;
2179 env->efer = sregs.efer;
2181 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
2182 x86_update_hflags(env);
2184 return 0;
2187 static int kvm_get_msrs(X86CPU *cpu)
2189 CPUX86State *env = &cpu->env;
2190 struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
2191 int ret, i;
2192 uint64_t mtrr_top_bits;
2194 kvm_msr_buf_reset(cpu);
2196 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0);
2197 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0);
2198 kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0);
2199 kvm_msr_entry_add(cpu, MSR_PAT, 0);
2200 if (has_msr_star) {
2201 kvm_msr_entry_add(cpu, MSR_STAR, 0);
2203 if (has_msr_hsave_pa) {
2204 kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0);
2206 if (has_msr_tsc_aux) {
2207 kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0);
2209 if (has_msr_tsc_adjust) {
2210 kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0);
2212 if (has_msr_tsc_deadline) {
2213 kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0);
2215 if (has_msr_misc_enable) {
2216 kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0);
2218 if (has_msr_smbase) {
2219 kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0);
2221 if (has_msr_smi_count) {
2222 kvm_msr_entry_add(cpu, MSR_SMI_COUNT, 0);
2224 if (has_msr_feature_control) {
2225 kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0);
2227 if (has_msr_bndcfgs) {
2228 kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0);
2230 if (has_msr_xss) {
2231 kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0);
2233 if (has_msr_spec_ctrl) {
2234 kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0);
2236 if (has_msr_virt_ssbd) {
2237 kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, 0);
2239 if (!env->tsc_valid) {
2240 kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0);
2241 env->tsc_valid = !runstate_is_running();
2244 #ifdef TARGET_X86_64
2245 if (lm_capable_kernel) {
2246 kvm_msr_entry_add(cpu, MSR_CSTAR, 0);
2247 kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0);
2248 kvm_msr_entry_add(cpu, MSR_FMASK, 0);
2249 kvm_msr_entry_add(cpu, MSR_LSTAR, 0);
2251 #endif
2252 kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
2253 kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
2254 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
2255 kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
2257 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
2258 kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
2260 if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
2261 kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
2263 if (has_architectural_pmu_version > 0) {
2264 if (has_architectural_pmu_version > 1) {
2265 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
2266 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
2267 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
2268 kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
2270 for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
2271 kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0);
2273 for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
2274 kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0);
2275 kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0);
2279 if (env->mcg_cap) {
2280 kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0);
2281 kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0);
2282 if (has_msr_mcg_ext_ctl) {
2283 kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, 0);
2285 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
2286 kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0);
2290 if (has_msr_hv_hypercall) {
2291 kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0);
2292 kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0);
2294 if (cpu->hyperv_vapic) {
2295 kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0);
2297 if (cpu->hyperv_time) {
2298 kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0);
2300 if (cpu->hyperv_reenlightenment) {
2301 kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0);
2302 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL, 0);
2303 kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, 0);
2305 if (has_msr_hv_crash) {
2306 int j;
2308 for (j = 0; j < HV_CRASH_PARAMS; j++) {
2309 kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0);
2312 if (has_msr_hv_runtime) {
2313 kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0);
2315 if (cpu->hyperv_synic) {
2316 uint32_t msr;
2318 kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0);
2319 kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0);
2320 kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0);
2321 for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
2322 kvm_msr_entry_add(cpu, msr, 0);
2325 if (has_msr_hv_stimer) {
2326 uint32_t msr;
2328 for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT;
2329 msr++) {
2330 kvm_msr_entry_add(cpu, msr, 0);
2333 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
2334 kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0);
2335 kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0);
2336 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0);
2337 kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0);
2338 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0);
2339 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0);
2340 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0);
2341 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0);
2342 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0);
2343 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0);
2344 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0);
2345 kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0);
2346 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
2347 kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0);
2348 kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0);
2352 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
2353 int addr_num =
2354 kvm_arch_get_supported_cpuid(kvm_state, 0x14, 1, R_EAX) & 0x7;
2356 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL, 0);
2357 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS, 0);
2358 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE, 0);
2359 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK, 0);
2360 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH, 0);
2361 for (i = 0; i < addr_num; i++) {
2362 kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i, 0);
2366 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
2367 if (ret < 0) {
2368 return ret;
2371 if (ret < cpu->kvm_msr_buf->nmsrs) {
2372 struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
2373 error_report("error: failed to get MSR 0x%" PRIx32,
2374 (uint32_t)e->index);
2377 assert(ret == cpu->kvm_msr_buf->nmsrs);
2379 * MTRR masks: Each mask consists of 5 parts
2380 * a 10..0: must be zero
2381 * b 11 : valid bit
2382 * c n-1.12: actual mask bits
2383 * d 51..n: reserved must be zero
2384 * e 63.52: reserved must be zero
2386 * 'n' is the number of physical bits supported by the CPU and is
2387 * apparently always <= 52. We know our 'n' but don't know what
2388 * the destinations 'n' is; it might be smaller, in which case
2389 * it masks (c) on loading. It might be larger, in which case
2390 * we fill 'd' so that d..c is consistent irrespetive of the 'n'
2391 * we're migrating to.
2394 if (cpu->fill_mtrr_mask) {
2395 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 52);
2396 assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS);
2397 mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits);
2398 } else {
2399 mtrr_top_bits = 0;
2402 for (i = 0; i < ret; i++) {
2403 uint32_t index = msrs[i].index;
2404 switch (index) {
2405 case MSR_IA32_SYSENTER_CS:
2406 env->sysenter_cs = msrs[i].data;
2407 break;
2408 case MSR_IA32_SYSENTER_ESP:
2409 env->sysenter_esp = msrs[i].data;
2410 break;
2411 case MSR_IA32_SYSENTER_EIP:
2412 env->sysenter_eip = msrs[i].data;
2413 break;
2414 case MSR_PAT:
2415 env->pat = msrs[i].data;
2416 break;
2417 case MSR_STAR:
2418 env->star = msrs[i].data;
2419 break;
2420 #ifdef TARGET_X86_64
2421 case MSR_CSTAR:
2422 env->cstar = msrs[i].data;
2423 break;
2424 case MSR_KERNELGSBASE:
2425 env->kernelgsbase = msrs[i].data;
2426 break;
2427 case MSR_FMASK:
2428 env->fmask = msrs[i].data;
2429 break;
2430 case MSR_LSTAR:
2431 env->lstar = msrs[i].data;
2432 break;
2433 #endif
2434 case MSR_IA32_TSC:
2435 env->tsc = msrs[i].data;
2436 break;
2437 case MSR_TSC_AUX:
2438 env->tsc_aux = msrs[i].data;
2439 break;
2440 case MSR_TSC_ADJUST:
2441 env->tsc_adjust = msrs[i].data;
2442 break;
2443 case MSR_IA32_TSCDEADLINE:
2444 env->tsc_deadline = msrs[i].data;
2445 break;
2446 case MSR_VM_HSAVE_PA:
2447 env->vm_hsave = msrs[i].data;
2448 break;
2449 case MSR_KVM_SYSTEM_TIME:
2450 env->system_time_msr = msrs[i].data;
2451 break;
2452 case MSR_KVM_WALL_CLOCK:
2453 env->wall_clock_msr = msrs[i].data;
2454 break;
2455 case MSR_MCG_STATUS:
2456 env->mcg_status = msrs[i].data;
2457 break;
2458 case MSR_MCG_CTL:
2459 env->mcg_ctl = msrs[i].data;
2460 break;
2461 case MSR_MCG_EXT_CTL:
2462 env->mcg_ext_ctl = msrs[i].data;
2463 break;
2464 case MSR_IA32_MISC_ENABLE:
2465 env->msr_ia32_misc_enable = msrs[i].data;
2466 break;
2467 case MSR_IA32_SMBASE:
2468 env->smbase = msrs[i].data;
2469 break;
2470 case MSR_SMI_COUNT:
2471 env->msr_smi_count = msrs[i].data;
2472 break;
2473 case MSR_IA32_FEATURE_CONTROL:
2474 env->msr_ia32_feature_control = msrs[i].data;
2475 break;
2476 case MSR_IA32_BNDCFGS:
2477 env->msr_bndcfgs = msrs[i].data;
2478 break;
2479 case MSR_IA32_XSS:
2480 env->xss = msrs[i].data;
2481 break;
2482 default:
2483 if (msrs[i].index >= MSR_MC0_CTL &&
2484 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
2485 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
2487 break;
2488 case MSR_KVM_ASYNC_PF_EN:
2489 env->async_pf_en_msr = msrs[i].data;
2490 break;
2491 case MSR_KVM_PV_EOI_EN:
2492 env->pv_eoi_en_msr = msrs[i].data;
2493 break;
2494 case MSR_KVM_STEAL_TIME:
2495 env->steal_time_msr = msrs[i].data;
2496 break;
2497 case MSR_CORE_PERF_FIXED_CTR_CTRL:
2498 env->msr_fixed_ctr_ctrl = msrs[i].data;
2499 break;
2500 case MSR_CORE_PERF_GLOBAL_CTRL:
2501 env->msr_global_ctrl = msrs[i].data;
2502 break;
2503 case MSR_CORE_PERF_GLOBAL_STATUS:
2504 env->msr_global_status = msrs[i].data;
2505 break;
2506 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
2507 env->msr_global_ovf_ctrl = msrs[i].data;
2508 break;
2509 case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1:
2510 env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data;
2511 break;
2512 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1:
2513 env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data;
2514 break;
2515 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1:
2516 env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data;
2517 break;
2518 case HV_X64_MSR_HYPERCALL:
2519 env->msr_hv_hypercall = msrs[i].data;
2520 break;
2521 case HV_X64_MSR_GUEST_OS_ID:
2522 env->msr_hv_guest_os_id = msrs[i].data;
2523 break;
2524 case HV_X64_MSR_APIC_ASSIST_PAGE:
2525 env->msr_hv_vapic = msrs[i].data;
2526 break;
2527 case HV_X64_MSR_REFERENCE_TSC:
2528 env->msr_hv_tsc = msrs[i].data;
2529 break;
2530 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
2531 env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data;
2532 break;
2533 case HV_X64_MSR_VP_RUNTIME:
2534 env->msr_hv_runtime = msrs[i].data;
2535 break;
2536 case HV_X64_MSR_SCONTROL:
2537 env->msr_hv_synic_control = msrs[i].data;
2538 break;
2539 case HV_X64_MSR_SIEFP:
2540 env->msr_hv_synic_evt_page = msrs[i].data;
2541 break;
2542 case HV_X64_MSR_SIMP:
2543 env->msr_hv_synic_msg_page = msrs[i].data;
2544 break;
2545 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
2546 env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data;
2547 break;
2548 case HV_X64_MSR_STIMER0_CONFIG:
2549 case HV_X64_MSR_STIMER1_CONFIG:
2550 case HV_X64_MSR_STIMER2_CONFIG:
2551 case HV_X64_MSR_STIMER3_CONFIG:
2552 env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] =
2553 msrs[i].data;
2554 break;
2555 case HV_X64_MSR_STIMER0_COUNT:
2556 case HV_X64_MSR_STIMER1_COUNT:
2557 case HV_X64_MSR_STIMER2_COUNT:
2558 case HV_X64_MSR_STIMER3_COUNT:
2559 env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] =
2560 msrs[i].data;
2561 break;
2562 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
2563 env->msr_hv_reenlightenment_control = msrs[i].data;
2564 break;
2565 case HV_X64_MSR_TSC_EMULATION_CONTROL:
2566 env->msr_hv_tsc_emulation_control = msrs[i].data;
2567 break;
2568 case HV_X64_MSR_TSC_EMULATION_STATUS:
2569 env->msr_hv_tsc_emulation_status = msrs[i].data;
2570 break;
2571 case MSR_MTRRdefType:
2572 env->mtrr_deftype = msrs[i].data;
2573 break;
2574 case MSR_MTRRfix64K_00000:
2575 env->mtrr_fixed[0] = msrs[i].data;
2576 break;
2577 case MSR_MTRRfix16K_80000:
2578 env->mtrr_fixed[1] = msrs[i].data;
2579 break;
2580 case MSR_MTRRfix16K_A0000:
2581 env->mtrr_fixed[2] = msrs[i].data;
2582 break;
2583 case MSR_MTRRfix4K_C0000:
2584 env->mtrr_fixed[3] = msrs[i].data;
2585 break;
2586 case MSR_MTRRfix4K_C8000:
2587 env->mtrr_fixed[4] = msrs[i].data;
2588 break;
2589 case MSR_MTRRfix4K_D0000:
2590 env->mtrr_fixed[5] = msrs[i].data;
2591 break;
2592 case MSR_MTRRfix4K_D8000:
2593 env->mtrr_fixed[6] = msrs[i].data;
2594 break;
2595 case MSR_MTRRfix4K_E0000:
2596 env->mtrr_fixed[7] = msrs[i].data;
2597 break;
2598 case MSR_MTRRfix4K_E8000:
2599 env->mtrr_fixed[8] = msrs[i].data;
2600 break;
2601 case MSR_MTRRfix4K_F0000:
2602 env->mtrr_fixed[9] = msrs[i].data;
2603 break;
2604 case MSR_MTRRfix4K_F8000:
2605 env->mtrr_fixed[10] = msrs[i].data;
2606 break;
2607 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
2608 if (index & 1) {
2609 env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data |
2610 mtrr_top_bits;
2611 } else {
2612 env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
2614 break;
2615 case MSR_IA32_SPEC_CTRL:
2616 env->spec_ctrl = msrs[i].data;
2617 break;
2618 case MSR_VIRT_SSBD:
2619 env->virt_ssbd = msrs[i].data;
2620 break;
2621 case MSR_IA32_RTIT_CTL:
2622 env->msr_rtit_ctrl = msrs[i].data;
2623 break;
2624 case MSR_IA32_RTIT_STATUS:
2625 env->msr_rtit_status = msrs[i].data;
2626 break;
2627 case MSR_IA32_RTIT_OUTPUT_BASE:
2628 env->msr_rtit_output_base = msrs[i].data;
2629 break;
2630 case MSR_IA32_RTIT_OUTPUT_MASK:
2631 env->msr_rtit_output_mask = msrs[i].data;
2632 break;
2633 case MSR_IA32_RTIT_CR3_MATCH:
2634 env->msr_rtit_cr3_match = msrs[i].data;
2635 break;
2636 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
2637 env->msr_rtit_addrs[index - MSR_IA32_RTIT_ADDR0_A] = msrs[i].data;
2638 break;
2642 return 0;
2645 static int kvm_put_mp_state(X86CPU *cpu)
2647 struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state };
2649 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
2652 static int kvm_get_mp_state(X86CPU *cpu)
2654 CPUState *cs = CPU(cpu);
2655 CPUX86State *env = &cpu->env;
2656 struct kvm_mp_state mp_state;
2657 int ret;
2659 ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
2660 if (ret < 0) {
2661 return ret;
2663 env->mp_state = mp_state.mp_state;
2664 if (kvm_irqchip_in_kernel()) {
2665 cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
2667 return 0;
2670 static int kvm_get_apic(X86CPU *cpu)
2672 DeviceState *apic = cpu->apic_state;
2673 struct kvm_lapic_state kapic;
2674 int ret;
2676 if (apic && kvm_irqchip_in_kernel()) {
2677 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic);
2678 if (ret < 0) {
2679 return ret;
2682 kvm_get_apic_state(apic, &kapic);
2684 return 0;
2687 static int kvm_put_vcpu_events(X86CPU *cpu, int level)
2689 CPUState *cs = CPU(cpu);
2690 CPUX86State *env = &cpu->env;
2691 struct kvm_vcpu_events events = {};
2693 if (!kvm_has_vcpu_events()) {
2694 return 0;
2697 events.exception.injected = (env->exception_injected >= 0);
2698 events.exception.nr = env->exception_injected;
2699 events.exception.has_error_code = env->has_error_code;
2700 events.exception.error_code = env->error_code;
2702 events.interrupt.injected = (env->interrupt_injected >= 0);
2703 events.interrupt.nr = env->interrupt_injected;
2704 events.interrupt.soft = env->soft_interrupt;
2706 events.nmi.injected = env->nmi_injected;
2707 events.nmi.pending = env->nmi_pending;
2708 events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
2710 events.sipi_vector = env->sipi_vector;
2711 events.flags = 0;
2713 if (has_msr_smbase) {
2714 events.smi.smm = !!(env->hflags & HF_SMM_MASK);
2715 events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK);
2716 if (kvm_irqchip_in_kernel()) {
2717 /* As soon as these are moved to the kernel, remove them
2718 * from cs->interrupt_request.
2720 events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
2721 events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
2722 cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
2723 } else {
2724 /* Keep these in cs->interrupt_request. */
2725 events.smi.pending = 0;
2726 events.smi.latched_init = 0;
2728 /* Stop SMI delivery on old machine types to avoid a reboot
2729 * on an inward migration of an old VM.
2731 if (!cpu->kvm_no_smi_migration) {
2732 events.flags |= KVM_VCPUEVENT_VALID_SMM;
2736 if (level >= KVM_PUT_RESET_STATE) {
2737 events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
2738 if (env->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
2739 events.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR;
2743 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
2746 static int kvm_get_vcpu_events(X86CPU *cpu)
2748 CPUX86State *env = &cpu->env;
2749 struct kvm_vcpu_events events;
2750 int ret;
2752 if (!kvm_has_vcpu_events()) {
2753 return 0;
2756 memset(&events, 0, sizeof(events));
2757 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
2758 if (ret < 0) {
2759 return ret;
2761 env->exception_injected =
2762 events.exception.injected ? events.exception.nr : -1;
2763 env->has_error_code = events.exception.has_error_code;
2764 env->error_code = events.exception.error_code;
2766 env->interrupt_injected =
2767 events.interrupt.injected ? events.interrupt.nr : -1;
2768 env->soft_interrupt = events.interrupt.soft;
2770 env->nmi_injected = events.nmi.injected;
2771 env->nmi_pending = events.nmi.pending;
2772 if (events.nmi.masked) {
2773 env->hflags2 |= HF2_NMI_MASK;
2774 } else {
2775 env->hflags2 &= ~HF2_NMI_MASK;
2778 if (events.flags & KVM_VCPUEVENT_VALID_SMM) {
2779 if (events.smi.smm) {
2780 env->hflags |= HF_SMM_MASK;
2781 } else {
2782 env->hflags &= ~HF_SMM_MASK;
2784 if (events.smi.pending) {
2785 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
2786 } else {
2787 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
2789 if (events.smi.smm_inside_nmi) {
2790 env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
2791 } else {
2792 env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
2794 if (events.smi.latched_init) {
2795 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
2796 } else {
2797 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
2801 env->sipi_vector = events.sipi_vector;
2803 return 0;
2806 static int kvm_guest_debug_workarounds(X86CPU *cpu)
2808 CPUState *cs = CPU(cpu);
2809 CPUX86State *env = &cpu->env;
2810 int ret = 0;
2811 unsigned long reinject_trap = 0;
2813 if (!kvm_has_vcpu_events()) {
2814 if (env->exception_injected == 1) {
2815 reinject_trap = KVM_GUESTDBG_INJECT_DB;
2816 } else if (env->exception_injected == 3) {
2817 reinject_trap = KVM_GUESTDBG_INJECT_BP;
2819 env->exception_injected = -1;
2823 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
2824 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
2825 * by updating the debug state once again if single-stepping is on.
2826 * Another reason to call kvm_update_guest_debug here is a pending debug
2827 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
2828 * reinject them via SET_GUEST_DEBUG.
2830 if (reinject_trap ||
2831 (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) {
2832 ret = kvm_update_guest_debug(cs, reinject_trap);
2834 return ret;
2837 static int kvm_put_debugregs(X86CPU *cpu)
2839 CPUX86State *env = &cpu->env;
2840 struct kvm_debugregs dbgregs;
2841 int i;
2843 if (!kvm_has_debugregs()) {
2844 return 0;
2847 for (i = 0; i < 4; i++) {
2848 dbgregs.db[i] = env->dr[i];
2850 dbgregs.dr6 = env->dr[6];
2851 dbgregs.dr7 = env->dr[7];
2852 dbgregs.flags = 0;
2854 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs);
2857 static int kvm_get_debugregs(X86CPU *cpu)
2859 CPUX86State *env = &cpu->env;
2860 struct kvm_debugregs dbgregs;
2861 int i, ret;
2863 if (!kvm_has_debugregs()) {
2864 return 0;
2867 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs);
2868 if (ret < 0) {
2869 return ret;
2871 for (i = 0; i < 4; i++) {
2872 env->dr[i] = dbgregs.db[i];
2874 env->dr[4] = env->dr[6] = dbgregs.dr6;
2875 env->dr[5] = env->dr[7] = dbgregs.dr7;
2877 return 0;
2880 int kvm_arch_put_registers(CPUState *cpu, int level)
2882 X86CPU *x86_cpu = X86_CPU(cpu);
2883 int ret;
2885 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
2887 if (level >= KVM_PUT_RESET_STATE) {
2888 ret = kvm_put_msr_feature_control(x86_cpu);
2889 if (ret < 0) {
2890 return ret;
2894 if (level == KVM_PUT_FULL_STATE) {
2895 /* We don't check for kvm_arch_set_tsc_khz() errors here,
2896 * because TSC frequency mismatch shouldn't abort migration,
2897 * unless the user explicitly asked for a more strict TSC
2898 * setting (e.g. using an explicit "tsc-freq" option).
2900 kvm_arch_set_tsc_khz(cpu);
2903 ret = kvm_getput_regs(x86_cpu, 1);
2904 if (ret < 0) {
2905 return ret;
2907 ret = kvm_put_xsave(x86_cpu);
2908 if (ret < 0) {
2909 return ret;
2911 ret = kvm_put_xcrs(x86_cpu);
2912 if (ret < 0) {
2913 return ret;
2915 ret = kvm_put_sregs(x86_cpu);
2916 if (ret < 0) {
2917 return ret;
2919 /* must be before kvm_put_msrs */
2920 ret = kvm_inject_mce_oldstyle(x86_cpu);
2921 if (ret < 0) {
2922 return ret;
2924 ret = kvm_put_msrs(x86_cpu, level);
2925 if (ret < 0) {
2926 return ret;
2928 ret = kvm_put_vcpu_events(x86_cpu, level);
2929 if (ret < 0) {
2930 return ret;
2932 if (level >= KVM_PUT_RESET_STATE) {
2933 ret = kvm_put_mp_state(x86_cpu);
2934 if (ret < 0) {
2935 return ret;
2939 ret = kvm_put_tscdeadline_msr(x86_cpu);
2940 if (ret < 0) {
2941 return ret;
2943 ret = kvm_put_debugregs(x86_cpu);
2944 if (ret < 0) {
2945 return ret;
2947 /* must be last */
2948 ret = kvm_guest_debug_workarounds(x86_cpu);
2949 if (ret < 0) {
2950 return ret;
2952 return 0;
2955 int kvm_arch_get_registers(CPUState *cs)
2957 X86CPU *cpu = X86_CPU(cs);
2958 int ret;
2960 assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs));
2962 ret = kvm_get_vcpu_events(cpu);
2963 if (ret < 0) {
2964 goto out;
2967 * KVM_GET_MPSTATE can modify CS and RIP, call it before
2968 * KVM_GET_REGS and KVM_GET_SREGS.
2970 ret = kvm_get_mp_state(cpu);
2971 if (ret < 0) {
2972 goto out;
2974 ret = kvm_getput_regs(cpu, 0);
2975 if (ret < 0) {
2976 goto out;
2978 ret = kvm_get_xsave(cpu);
2979 if (ret < 0) {
2980 goto out;
2982 ret = kvm_get_xcrs(cpu);
2983 if (ret < 0) {
2984 goto out;
2986 ret = kvm_get_sregs(cpu);
2987 if (ret < 0) {
2988 goto out;
2990 ret = kvm_get_msrs(cpu);
2991 if (ret < 0) {
2992 goto out;
2994 ret = kvm_get_apic(cpu);
2995 if (ret < 0) {
2996 goto out;
2998 ret = kvm_get_debugregs(cpu);
2999 if (ret < 0) {
3000 goto out;
3002 ret = 0;
3003 out:
3004 cpu_sync_bndcs_hflags(&cpu->env);
3005 return ret;
3008 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
3010 X86CPU *x86_cpu = X86_CPU(cpu);
3011 CPUX86State *env = &x86_cpu->env;
3012 int ret;
3014 /* Inject NMI */
3015 if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
3016 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
3017 qemu_mutex_lock_iothread();
3018 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
3019 qemu_mutex_unlock_iothread();
3020 DPRINTF("injected NMI\n");
3021 ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
3022 if (ret < 0) {
3023 fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
3024 strerror(-ret));
3027 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
3028 qemu_mutex_lock_iothread();
3029 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
3030 qemu_mutex_unlock_iothread();
3031 DPRINTF("injected SMI\n");
3032 ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
3033 if (ret < 0) {
3034 fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n",
3035 strerror(-ret));
3040 if (!kvm_pic_in_kernel()) {
3041 qemu_mutex_lock_iothread();
3044 /* Force the VCPU out of its inner loop to process any INIT requests
3045 * or (for userspace APIC, but it is cheap to combine the checks here)
3046 * pending TPR access reports.
3048 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
3049 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
3050 !(env->hflags & HF_SMM_MASK)) {
3051 cpu->exit_request = 1;
3053 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
3054 cpu->exit_request = 1;
3058 if (!kvm_pic_in_kernel()) {
3059 /* Try to inject an interrupt if the guest can accept it */
3060 if (run->ready_for_interrupt_injection &&
3061 (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
3062 (env->eflags & IF_MASK)) {
3063 int irq;
3065 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
3066 irq = cpu_get_pic_interrupt(env);
3067 if (irq >= 0) {
3068 struct kvm_interrupt intr;
3070 intr.irq = irq;
3071 DPRINTF("injected interrupt %d\n", irq);
3072 ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr);
3073 if (ret < 0) {
3074 fprintf(stderr,
3075 "KVM: injection failed, interrupt lost (%s)\n",
3076 strerror(-ret));
3081 /* If we have an interrupt but the guest is not ready to receive an
3082 * interrupt, request an interrupt window exit. This will
3083 * cause a return to userspace as soon as the guest is ready to
3084 * receive interrupts. */
3085 if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
3086 run->request_interrupt_window = 1;
3087 } else {
3088 run->request_interrupt_window = 0;
3091 DPRINTF("setting tpr\n");
3092 run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
3094 qemu_mutex_unlock_iothread();
3098 MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
3100 X86CPU *x86_cpu = X86_CPU(cpu);
3101 CPUX86State *env = &x86_cpu->env;
3103 if (run->flags & KVM_RUN_X86_SMM) {
3104 env->hflags |= HF_SMM_MASK;
3105 } else {
3106 env->hflags &= ~HF_SMM_MASK;
3108 if (run->if_flag) {
3109 env->eflags |= IF_MASK;
3110 } else {
3111 env->eflags &= ~IF_MASK;
3114 /* We need to protect the apic state against concurrent accesses from
3115 * different threads in case the userspace irqchip is used. */
3116 if (!kvm_irqchip_in_kernel()) {
3117 qemu_mutex_lock_iothread();
3119 cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
3120 cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
3121 if (!kvm_irqchip_in_kernel()) {
3122 qemu_mutex_unlock_iothread();
3124 return cpu_get_mem_attrs(env);
3127 int kvm_arch_process_async_events(CPUState *cs)
3129 X86CPU *cpu = X86_CPU(cs);
3130 CPUX86State *env = &cpu->env;
3132 if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
3133 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
3134 assert(env->mcg_cap);
3136 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
3138 kvm_cpu_synchronize_state(cs);
3140 if (env->exception_injected == EXCP08_DBLE) {
3141 /* this means triple fault */
3142 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
3143 cs->exit_request = 1;
3144 return 0;
3146 env->exception_injected = EXCP12_MCHK;
3147 env->has_error_code = 0;
3149 cs->halted = 0;
3150 if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
3151 env->mp_state = KVM_MP_STATE_RUNNABLE;
3155 if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
3156 !(env->hflags & HF_SMM_MASK)) {
3157 kvm_cpu_synchronize_state(cs);
3158 do_cpu_init(cpu);
3161 if (kvm_irqchip_in_kernel()) {
3162 return 0;
3165 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
3166 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
3167 apic_poll_irq(cpu->apic_state);
3169 if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
3170 (env->eflags & IF_MASK)) ||
3171 (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
3172 cs->halted = 0;
3174 if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
3175 kvm_cpu_synchronize_state(cs);
3176 do_cpu_sipi(cpu);
3178 if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
3179 cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
3180 kvm_cpu_synchronize_state(cs);
3181 apic_handle_tpr_access_report(cpu->apic_state, env->eip,
3182 env->tpr_access_type);
3185 return cs->halted;
3188 static int kvm_handle_halt(X86CPU *cpu)
3190 CPUState *cs = CPU(cpu);
3191 CPUX86State *env = &cpu->env;
3193 if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
3194 (env->eflags & IF_MASK)) &&
3195 !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
3196 cs->halted = 1;
3197 return EXCP_HLT;
3200 return 0;
3203 static int kvm_handle_tpr_access(X86CPU *cpu)
3205 CPUState *cs = CPU(cpu);
3206 struct kvm_run *run = cs->kvm_run;
3208 apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip,
3209 run->tpr_access.is_write ? TPR_ACCESS_WRITE
3210 : TPR_ACCESS_READ);
3211 return 1;
3214 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
3216 static const uint8_t int3 = 0xcc;
3218 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
3219 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) {
3220 return -EINVAL;
3222 return 0;
3225 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
3227 uint8_t int3;
3229 if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
3230 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
3231 return -EINVAL;
3233 return 0;
3236 static struct {
3237 target_ulong addr;
3238 int len;
3239 int type;
3240 } hw_breakpoint[4];
3242 static int nb_hw_breakpoint;
3244 static int find_hw_breakpoint(target_ulong addr, int len, int type)
3246 int n;
3248 for (n = 0; n < nb_hw_breakpoint; n++) {
3249 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
3250 (hw_breakpoint[n].len == len || len == -1)) {
3251 return n;
3254 return -1;
3257 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
3258 target_ulong len, int type)
3260 switch (type) {
3261 case GDB_BREAKPOINT_HW:
3262 len = 1;
3263 break;
3264 case GDB_WATCHPOINT_WRITE:
3265 case GDB_WATCHPOINT_ACCESS:
3266 switch (len) {
3267 case 1:
3268 break;
3269 case 2:
3270 case 4:
3271 case 8:
3272 if (addr & (len - 1)) {
3273 return -EINVAL;
3275 break;
3276 default:
3277 return -EINVAL;
3279 break;
3280 default:
3281 return -ENOSYS;
3284 if (nb_hw_breakpoint == 4) {
3285 return -ENOBUFS;
3287 if (find_hw_breakpoint(addr, len, type) >= 0) {
3288 return -EEXIST;
3290 hw_breakpoint[nb_hw_breakpoint].addr = addr;
3291 hw_breakpoint[nb_hw_breakpoint].len = len;
3292 hw_breakpoint[nb_hw_breakpoint].type = type;
3293 nb_hw_breakpoint++;
3295 return 0;
3298 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
3299 target_ulong len, int type)
3301 int n;
3303 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
3304 if (n < 0) {
3305 return -ENOENT;
3307 nb_hw_breakpoint--;
3308 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
3310 return 0;
3313 void kvm_arch_remove_all_hw_breakpoints(void)
3315 nb_hw_breakpoint = 0;
3318 static CPUWatchpoint hw_watchpoint;
3320 static int kvm_handle_debug(X86CPU *cpu,
3321 struct kvm_debug_exit_arch *arch_info)
3323 CPUState *cs = CPU(cpu);
3324 CPUX86State *env = &cpu->env;
3325 int ret = 0;
3326 int n;
3328 if (arch_info->exception == 1) {
3329 if (arch_info->dr6 & (1 << 14)) {
3330 if (cs->singlestep_enabled) {
3331 ret = EXCP_DEBUG;
3333 } else {
3334 for (n = 0; n < 4; n++) {
3335 if (arch_info->dr6 & (1 << n)) {
3336 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
3337 case 0x0:
3338 ret = EXCP_DEBUG;
3339 break;
3340 case 0x1:
3341 ret = EXCP_DEBUG;
3342 cs->watchpoint_hit = &hw_watchpoint;
3343 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
3344 hw_watchpoint.flags = BP_MEM_WRITE;
3345 break;
3346 case 0x3:
3347 ret = EXCP_DEBUG;
3348 cs->watchpoint_hit = &hw_watchpoint;
3349 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
3350 hw_watchpoint.flags = BP_MEM_ACCESS;
3351 break;
3356 } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) {
3357 ret = EXCP_DEBUG;
3359 if (ret == 0) {
3360 cpu_synchronize_state(cs);
3361 assert(env->exception_injected == -1);
3363 /* pass to guest */
3364 env->exception_injected = arch_info->exception;
3365 env->has_error_code = 0;
3368 return ret;
3371 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
3373 const uint8_t type_code[] = {
3374 [GDB_BREAKPOINT_HW] = 0x0,
3375 [GDB_WATCHPOINT_WRITE] = 0x1,
3376 [GDB_WATCHPOINT_ACCESS] = 0x3
3378 const uint8_t len_code[] = {
3379 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
3381 int n;
3383 if (kvm_sw_breakpoints_active(cpu)) {
3384 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
3386 if (nb_hw_breakpoint > 0) {
3387 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
3388 dbg->arch.debugreg[7] = 0x0600;
3389 for (n = 0; n < nb_hw_breakpoint; n++) {
3390 dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
3391 dbg->arch.debugreg[7] |= (2 << (n * 2)) |
3392 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
3393 ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
3398 static bool host_supports_vmx(void)
3400 uint32_t ecx, unused;
3402 host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
3403 return ecx & CPUID_EXT_VMX;
3406 #define VMX_INVALID_GUEST_STATE 0x80000021
3408 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
3410 X86CPU *cpu = X86_CPU(cs);
3411 uint64_t code;
3412 int ret;
3414 switch (run->exit_reason) {
3415 case KVM_EXIT_HLT:
3416 DPRINTF("handle_hlt\n");
3417 qemu_mutex_lock_iothread();
3418 ret = kvm_handle_halt(cpu);
3419 qemu_mutex_unlock_iothread();
3420 break;
3421 case KVM_EXIT_SET_TPR:
3422 ret = 0;
3423 break;
3424 case KVM_EXIT_TPR_ACCESS:
3425 qemu_mutex_lock_iothread();
3426 ret = kvm_handle_tpr_access(cpu);
3427 qemu_mutex_unlock_iothread();
3428 break;
3429 case KVM_EXIT_FAIL_ENTRY:
3430 code = run->fail_entry.hardware_entry_failure_reason;
3431 fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
3432 code);
3433 if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
3434 fprintf(stderr,
3435 "\nIf you're running a guest on an Intel machine without "
3436 "unrestricted mode\n"
3437 "support, the failure can be most likely due to the guest "
3438 "entering an invalid\n"
3439 "state for Intel VT. For example, the guest maybe running "
3440 "in big real mode\n"
3441 "which is not supported on less recent Intel processors."
3442 "\n\n");
3444 ret = -1;
3445 break;
3446 case KVM_EXIT_EXCEPTION:
3447 fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
3448 run->ex.exception, run->ex.error_code);
3449 ret = -1;
3450 break;
3451 case KVM_EXIT_DEBUG:
3452 DPRINTF("kvm_exit_debug\n");
3453 qemu_mutex_lock_iothread();
3454 ret = kvm_handle_debug(cpu, &run->debug.arch);
3455 qemu_mutex_unlock_iothread();
3456 break;
3457 case KVM_EXIT_HYPERV:
3458 ret = kvm_hv_handle_exit(cpu, &run->hyperv);
3459 break;
3460 case KVM_EXIT_IOAPIC_EOI:
3461 ioapic_eoi_broadcast(run->eoi.vector);
3462 ret = 0;
3463 break;
3464 default:
3465 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
3466 ret = -1;
3467 break;
3470 return ret;
3473 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
3475 X86CPU *cpu = X86_CPU(cs);
3476 CPUX86State *env = &cpu->env;
3478 kvm_cpu_synchronize_state(cs);
3479 return !(env->cr[0] & CR0_PE_MASK) ||
3480 ((env->segs[R_CS].selector & 3) != 3);
3483 void kvm_arch_init_irq_routing(KVMState *s)
3485 if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
3486 /* If kernel can't do irq routing, interrupt source
3487 * override 0->2 cannot be set up as required by HPET.
3488 * So we have to disable it.
3490 no_hpet = 1;
3492 /* We know at this point that we're using the in-kernel
3493 * irqchip, so we can use irqfds, and on x86 we know
3494 * we can use msi via irqfd and GSI routing.
3496 kvm_msi_via_irqfd_allowed = true;
3497 kvm_gsi_routing_allowed = true;
3499 if (kvm_irqchip_is_split()) {
3500 int i;
3502 /* If the ioapic is in QEMU and the lapics are in KVM, reserve
3503 MSI routes for signaling interrupts to the local apics. */
3504 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
3505 if (kvm_irqchip_add_msi_route(s, 0, NULL) < 0) {
3506 error_report("Could not enable split IRQ mode.");
3507 exit(1);
3513 int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
3515 int ret;
3516 if (machine_kernel_irqchip_split(ms)) {
3517 ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24);
3518 if (ret) {
3519 error_report("Could not enable split irqchip mode: %s",
3520 strerror(-ret));
3521 exit(1);
3522 } else {
3523 DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
3524 kvm_split_irqchip = true;
3525 return 1;
3527 } else {
3528 return 0;
3532 /* Classic KVM device assignment interface. Will remain x86 only. */
3533 int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr,
3534 uint32_t flags, uint32_t *dev_id)
3536 struct kvm_assigned_pci_dev dev_data = {
3537 .segnr = dev_addr->domain,
3538 .busnr = dev_addr->bus,
3539 .devfn = PCI_DEVFN(dev_addr->slot, dev_addr->function),
3540 .flags = flags,
3542 int ret;
3544 dev_data.assigned_dev_id =
3545 (dev_addr->domain << 16) | (dev_addr->bus << 8) | dev_data.devfn;
3547 ret = kvm_vm_ioctl(s, KVM_ASSIGN_PCI_DEVICE, &dev_data);
3548 if (ret < 0) {
3549 return ret;
3552 *dev_id = dev_data.assigned_dev_id;
3554 return 0;
3557 int kvm_device_pci_deassign(KVMState *s, uint32_t dev_id)
3559 struct kvm_assigned_pci_dev dev_data = {
3560 .assigned_dev_id = dev_id,
3563 return kvm_vm_ioctl(s, KVM_DEASSIGN_PCI_DEVICE, &dev_data);
3566 static int kvm_assign_irq_internal(KVMState *s, uint32_t dev_id,
3567 uint32_t irq_type, uint32_t guest_irq)
3569 struct kvm_assigned_irq assigned_irq = {
3570 .assigned_dev_id = dev_id,
3571 .guest_irq = guest_irq,
3572 .flags = irq_type,
3575 if (kvm_check_extension(s, KVM_CAP_ASSIGN_DEV_IRQ)) {
3576 return kvm_vm_ioctl(s, KVM_ASSIGN_DEV_IRQ, &assigned_irq);
3577 } else {
3578 return kvm_vm_ioctl(s, KVM_ASSIGN_IRQ, &assigned_irq);
3582 int kvm_device_intx_assign(KVMState *s, uint32_t dev_id, bool use_host_msi,
3583 uint32_t guest_irq)
3585 uint32_t irq_type = KVM_DEV_IRQ_GUEST_INTX |
3586 (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX);
3588 return kvm_assign_irq_internal(s, dev_id, irq_type, guest_irq);
3591 int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked)
3593 struct kvm_assigned_pci_dev dev_data = {
3594 .assigned_dev_id = dev_id,
3595 .flags = masked ? KVM_DEV_ASSIGN_MASK_INTX : 0,
3598 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_INTX_MASK, &dev_data);
3601 static int kvm_deassign_irq_internal(KVMState *s, uint32_t dev_id,
3602 uint32_t type)
3604 struct kvm_assigned_irq assigned_irq = {
3605 .assigned_dev_id = dev_id,
3606 .flags = type,
3609 return kvm_vm_ioctl(s, KVM_DEASSIGN_DEV_IRQ, &assigned_irq);
3612 int kvm_device_intx_deassign(KVMState *s, uint32_t dev_id, bool use_host_msi)
3614 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_INTX |
3615 (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX));
3618 int kvm_device_msi_assign(KVMState *s, uint32_t dev_id, int virq)
3620 return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSI |
3621 KVM_DEV_IRQ_GUEST_MSI, virq);
3624 int kvm_device_msi_deassign(KVMState *s, uint32_t dev_id)
3626 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSI |
3627 KVM_DEV_IRQ_HOST_MSI);
3630 bool kvm_device_msix_supported(KVMState *s)
3632 /* The kernel lacks a corresponding KVM_CAP, so we probe by calling
3633 * KVM_ASSIGN_SET_MSIX_NR with an invalid parameter. */
3634 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, NULL) == -EFAULT;
3637 int kvm_device_msix_init_vectors(KVMState *s, uint32_t dev_id,
3638 uint32_t nr_vectors)
3640 struct kvm_assigned_msix_nr msix_nr = {
3641 .assigned_dev_id = dev_id,
3642 .entry_nr = nr_vectors,
3645 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, &msix_nr);
3648 int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector,
3649 int virq)
3651 struct kvm_assigned_msix_entry msix_entry = {
3652 .assigned_dev_id = dev_id,
3653 .gsi = virq,
3654 .entry = vector,
3657 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_ENTRY, &msix_entry);
3660 int kvm_device_msix_assign(KVMState *s, uint32_t dev_id)
3662 return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSIX |
3663 KVM_DEV_IRQ_GUEST_MSIX, 0);
3666 int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id)
3668 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSIX |
3669 KVM_DEV_IRQ_HOST_MSIX);
3672 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
3673 uint64_t address, uint32_t data, PCIDevice *dev)
3675 X86IOMMUState *iommu = x86_iommu_get_default();
3677 if (iommu) {
3678 int ret;
3679 MSIMessage src, dst;
3680 X86IOMMUClass *class = X86_IOMMU_GET_CLASS(iommu);
3682 if (!class->int_remap) {
3683 return 0;
3686 src.address = route->u.msi.address_hi;
3687 src.address <<= VTD_MSI_ADDR_HI_SHIFT;
3688 src.address |= route->u.msi.address_lo;
3689 src.data = route->u.msi.data;
3691 ret = class->int_remap(iommu, &src, &dst, dev ? \
3692 pci_requester_id(dev) : \
3693 X86_IOMMU_SID_INVALID);
3694 if (ret) {
3695 trace_kvm_x86_fixup_msi_error(route->gsi);
3696 return 1;
3699 route->u.msi.address_hi = dst.address >> VTD_MSI_ADDR_HI_SHIFT;
3700 route->u.msi.address_lo = dst.address & VTD_MSI_ADDR_LO_MASK;
3701 route->u.msi.data = dst.data;
3704 return 0;
3707 typedef struct MSIRouteEntry MSIRouteEntry;
3709 struct MSIRouteEntry {
3710 PCIDevice *dev; /* Device pointer */
3711 int vector; /* MSI/MSIX vector index */
3712 int virq; /* Virtual IRQ index */
3713 QLIST_ENTRY(MSIRouteEntry) list;
3716 /* List of used GSI routes */
3717 static QLIST_HEAD(, MSIRouteEntry) msi_route_list = \
3718 QLIST_HEAD_INITIALIZER(msi_route_list);
3720 static void kvm_update_msi_routes_all(void *private, bool global,
3721 uint32_t index, uint32_t mask)
3723 int cnt = 0;
3724 MSIRouteEntry *entry;
3725 MSIMessage msg;
3726 PCIDevice *dev;
3728 /* TODO: explicit route update */
3729 QLIST_FOREACH(entry, &msi_route_list, list) {
3730 cnt++;
3731 dev = entry->dev;
3732 if (!msix_enabled(dev) && !msi_enabled(dev)) {
3733 continue;
3735 msg = pci_get_msi_message(dev, entry->vector);
3736 kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, dev);
3738 kvm_irqchip_commit_routes(kvm_state);
3739 trace_kvm_x86_update_msi_routes(cnt);
3742 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
3743 int vector, PCIDevice *dev)
3745 static bool notify_list_inited = false;
3746 MSIRouteEntry *entry;
3748 if (!dev) {
3749 /* These are (possibly) IOAPIC routes only used for split
3750 * kernel irqchip mode, while what we are housekeeping are
3751 * PCI devices only. */
3752 return 0;
3755 entry = g_new0(MSIRouteEntry, 1);
3756 entry->dev = dev;
3757 entry->vector = vector;
3758 entry->virq = route->gsi;
3759 QLIST_INSERT_HEAD(&msi_route_list, entry, list);
3761 trace_kvm_x86_add_msi_route(route->gsi);
3763 if (!notify_list_inited) {
3764 /* For the first time we do add route, add ourselves into
3765 * IOMMU's IEC notify list if needed. */
3766 X86IOMMUState *iommu = x86_iommu_get_default();
3767 if (iommu) {
3768 x86_iommu_iec_register_notifier(iommu,
3769 kvm_update_msi_routes_all,
3770 NULL);
3772 notify_list_inited = true;
3774 return 0;
3777 int kvm_arch_release_virq_post(int virq)
3779 MSIRouteEntry *entry, *next;
3780 QLIST_FOREACH_SAFE(entry, &msi_route_list, list, next) {
3781 if (entry->virq == virq) {
3782 trace_kvm_x86_remove_msi_route(virq);
3783 QLIST_REMOVE(entry, list);
3784 g_free(entry);
3785 break;
3788 return 0;
3791 int kvm_arch_msi_data_to_gsi(uint32_t data)
3793 abort();