4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
15 #include "qemu/osdep.h"
16 #include "qapi/error.h"
17 #include <sys/ioctl.h>
18 #include <sys/utsname.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_para.h>
23 #include "qemu-common.h"
25 #include "sysemu/sysemu.h"
26 #include "sysemu/hw_accel.h"
27 #include "sysemu/kvm_int.h"
31 #include "exec/gdbstub.h"
32 #include "qemu/host-utils.h"
33 #include "qemu/config-file.h"
34 #include "qemu/error-report.h"
35 #include "hw/i386/pc.h"
36 #include "hw/i386/apic.h"
37 #include "hw/i386/apic_internal.h"
38 #include "hw/i386/apic-msidef.h"
39 #include "hw/i386/intel_iommu.h"
40 #include "hw/i386/x86-iommu.h"
42 #include "exec/ioport.h"
43 #include "standard-headers/asm-x86/hyperv.h"
44 #include "hw/pci/pci.h"
45 #include "hw/pci/msi.h"
46 #include "migration/migration.h"
47 #include "exec/memattrs.h"
53 #define DPRINTF(fmt, ...) \
54 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
56 #define DPRINTF(fmt, ...) \
60 #define MSR_KVM_WALL_CLOCK 0x11
61 #define MSR_KVM_SYSTEM_TIME 0x12
63 /* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
64 * 255 kvm_msr_entry structs */
65 #define MSR_BUF_SIZE 4096
68 #define BUS_MCEERR_AR 4
71 #define BUS_MCEERR_AO 5
74 const KVMCapabilityInfo kvm_arch_required_capabilities
[] = {
75 KVM_CAP_INFO(SET_TSS_ADDR
),
76 KVM_CAP_INFO(EXT_CPUID
),
77 KVM_CAP_INFO(MP_STATE
),
81 static bool has_msr_star
;
82 static bool has_msr_hsave_pa
;
83 static bool has_msr_tsc_aux
;
84 static bool has_msr_tsc_adjust
;
85 static bool has_msr_tsc_deadline
;
86 static bool has_msr_feature_control
;
87 static bool has_msr_misc_enable
;
88 static bool has_msr_smbase
;
89 static bool has_msr_bndcfgs
;
90 static int lm_capable_kernel
;
91 static bool has_msr_hv_hypercall
;
92 static bool has_msr_hv_crash
;
93 static bool has_msr_hv_reset
;
94 static bool has_msr_hv_vpindex
;
95 static bool has_msr_hv_runtime
;
96 static bool has_msr_hv_synic
;
97 static bool has_msr_hv_stimer
;
98 static bool has_msr_xss
;
100 static bool has_msr_architectural_pmu
;
101 static uint32_t num_architectural_pmu_counters
;
103 static int has_xsave
;
105 static int has_pit_state2
;
107 static bool has_msr_mcg_ext_ctl
;
109 static struct kvm_cpuid2
*cpuid_cache
;
111 int kvm_has_pit_state2(void)
113 return has_pit_state2
;
116 bool kvm_has_smm(void)
118 return kvm_check_extension(kvm_state
, KVM_CAP_X86_SMM
);
121 bool kvm_has_adjust_clock_stable(void)
123 int ret
= kvm_check_extension(kvm_state
, KVM_CAP_ADJUST_CLOCK
);
125 return (ret
== KVM_CLOCK_TSC_STABLE
);
128 bool kvm_allows_irq0_override(void)
130 return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
133 static bool kvm_x2apic_api_set_flags(uint64_t flags
)
135 KVMState
*s
= KVM_STATE(current_machine
->accelerator
);
137 return !kvm_vm_enable_cap(s
, KVM_CAP_X2APIC_API
, 0, flags
);
140 #define MEMORIZE(fn, _result) \
142 static bool _memorized; \
151 static bool has_x2apic_api
;
153 bool kvm_has_x2apic_api(void)
155 return has_x2apic_api
;
158 bool kvm_enable_x2apic(void)
161 kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS
|
162 KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK
),
166 static int kvm_get_tsc(CPUState
*cs
)
168 X86CPU
*cpu
= X86_CPU(cs
);
169 CPUX86State
*env
= &cpu
->env
;
171 struct kvm_msrs info
;
172 struct kvm_msr_entry entries
[1];
176 if (env
->tsc_valid
) {
180 msr_data
.info
.nmsrs
= 1;
181 msr_data
.entries
[0].index
= MSR_IA32_TSC
;
182 env
->tsc_valid
= !runstate_is_running();
184 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_MSRS
, &msr_data
);
190 env
->tsc
= msr_data
.entries
[0].data
;
194 static inline void do_kvm_synchronize_tsc(CPUState
*cpu
, run_on_cpu_data arg
)
199 void kvm_synchronize_all_tsc(void)
205 run_on_cpu(cpu
, do_kvm_synchronize_tsc
, RUN_ON_CPU_NULL
);
210 static struct kvm_cpuid2
*try_get_cpuid(KVMState
*s
, int max
)
212 struct kvm_cpuid2
*cpuid
;
215 size
= sizeof(*cpuid
) + max
* sizeof(*cpuid
->entries
);
216 cpuid
= g_malloc0(size
);
218 r
= kvm_ioctl(s
, KVM_GET_SUPPORTED_CPUID
, cpuid
);
219 if (r
== 0 && cpuid
->nent
>= max
) {
227 fprintf(stderr
, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
235 /* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
238 static struct kvm_cpuid2
*get_supported_cpuid(KVMState
*s
)
240 struct kvm_cpuid2
*cpuid
;
243 if (cpuid_cache
!= NULL
) {
246 while ((cpuid
= try_get_cpuid(s
, max
)) == NULL
) {
253 static const struct kvm_para_features
{
256 } para_features
[] = {
257 { KVM_CAP_CLOCKSOURCE
, KVM_FEATURE_CLOCKSOURCE
},
258 { KVM_CAP_NOP_IO_DELAY
, KVM_FEATURE_NOP_IO_DELAY
},
259 { KVM_CAP_PV_MMU
, KVM_FEATURE_MMU_OP
},
260 { KVM_CAP_ASYNC_PF
, KVM_FEATURE_ASYNC_PF
},
263 static int get_para_features(KVMState
*s
)
267 for (i
= 0; i
< ARRAY_SIZE(para_features
); i
++) {
268 if (kvm_check_extension(s
, para_features
[i
].cap
)) {
269 features
|= (1 << para_features
[i
].feature
);
277 /* Returns the value for a specific register on the cpuid entry
279 static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2
*entry
, int reg
)
299 /* Find matching entry for function/index on kvm_cpuid2 struct
301 static struct kvm_cpuid_entry2
*cpuid_find_entry(struct kvm_cpuid2
*cpuid
,
306 for (i
= 0; i
< cpuid
->nent
; ++i
) {
307 if (cpuid
->entries
[i
].function
== function
&&
308 cpuid
->entries
[i
].index
== index
) {
309 return &cpuid
->entries
[i
];
316 uint32_t kvm_arch_get_supported_cpuid(KVMState
*s
, uint32_t function
,
317 uint32_t index
, int reg
)
319 struct kvm_cpuid2
*cpuid
;
321 uint32_t cpuid_1_edx
;
324 cpuid
= get_supported_cpuid(s
);
326 struct kvm_cpuid_entry2
*entry
= cpuid_find_entry(cpuid
, function
, index
);
329 ret
= cpuid_entry_get_reg(entry
, reg
);
332 /* Fixups for the data returned by KVM, below */
334 if (function
== 1 && reg
== R_EDX
) {
335 /* KVM before 2.6.30 misreports the following features */
336 ret
|= CPUID_MTRR
| CPUID_PAT
| CPUID_MCE
| CPUID_MCA
;
337 } else if (function
== 1 && reg
== R_ECX
) {
338 /* We can set the hypervisor flag, even if KVM does not return it on
339 * GET_SUPPORTED_CPUID
341 ret
|= CPUID_EXT_HYPERVISOR
;
342 /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
343 * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
344 * and the irqchip is in the kernel.
346 if (kvm_irqchip_in_kernel() &&
347 kvm_check_extension(s
, KVM_CAP_TSC_DEADLINE_TIMER
)) {
348 ret
|= CPUID_EXT_TSC_DEADLINE_TIMER
;
351 /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
352 * without the in-kernel irqchip
354 if (!kvm_irqchip_in_kernel()) {
355 ret
&= ~CPUID_EXT_X2APIC
;
357 } else if (function
== 6 && reg
== R_EAX
) {
358 ret
|= CPUID_6_EAX_ARAT
; /* safe to allow because of emulated APIC */
359 } else if (function
== 0x80000001 && reg
== R_EDX
) {
360 /* On Intel, kvm returns cpuid according to the Intel spec,
361 * so add missing bits according to the AMD spec:
363 cpuid_1_edx
= kvm_arch_get_supported_cpuid(s
, 1, 0, R_EDX
);
364 ret
|= cpuid_1_edx
& CPUID_EXT2_AMD_ALIASES
;
365 } else if (function
== KVM_CPUID_FEATURES
&& reg
== R_EAX
) {
366 /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't
367 * be enabled without the in-kernel irqchip
369 if (!kvm_irqchip_in_kernel()) {
370 ret
&= ~(1U << KVM_FEATURE_PV_UNHALT
);
374 /* fallback for older kernels */
375 if ((function
== KVM_CPUID_FEATURES
) && !found
) {
376 ret
= get_para_features(s
);
382 typedef struct HWPoisonPage
{
384 QLIST_ENTRY(HWPoisonPage
) list
;
387 static QLIST_HEAD(, HWPoisonPage
) hwpoison_page_list
=
388 QLIST_HEAD_INITIALIZER(hwpoison_page_list
);
390 static void kvm_unpoison_all(void *param
)
392 HWPoisonPage
*page
, *next_page
;
394 QLIST_FOREACH_SAFE(page
, &hwpoison_page_list
, list
, next_page
) {
395 QLIST_REMOVE(page
, list
);
396 qemu_ram_remap(page
->ram_addr
, TARGET_PAGE_SIZE
);
401 static void kvm_hwpoison_page_add(ram_addr_t ram_addr
)
405 QLIST_FOREACH(page
, &hwpoison_page_list
, list
) {
406 if (page
->ram_addr
== ram_addr
) {
410 page
= g_new(HWPoisonPage
, 1);
411 page
->ram_addr
= ram_addr
;
412 QLIST_INSERT_HEAD(&hwpoison_page_list
, page
, list
);
415 static int kvm_get_mce_cap_supported(KVMState
*s
, uint64_t *mce_cap
,
420 r
= kvm_check_extension(s
, KVM_CAP_MCE
);
423 return kvm_ioctl(s
, KVM_X86_GET_MCE_CAP_SUPPORTED
, mce_cap
);
428 static void kvm_mce_inject(X86CPU
*cpu
, hwaddr paddr
, int code
)
430 CPUState
*cs
= CPU(cpu
);
431 CPUX86State
*env
= &cpu
->env
;
432 uint64_t status
= MCI_STATUS_VAL
| MCI_STATUS_UC
| MCI_STATUS_EN
|
433 MCI_STATUS_MISCV
| MCI_STATUS_ADDRV
| MCI_STATUS_S
;
434 uint64_t mcg_status
= MCG_STATUS_MCIP
;
437 if (code
== BUS_MCEERR_AR
) {
438 status
|= MCI_STATUS_AR
| 0x134;
439 mcg_status
|= MCG_STATUS_EIPV
;
442 mcg_status
|= MCG_STATUS_RIPV
;
445 flags
= cpu_x86_support_mca_broadcast(env
) ? MCE_INJECT_BROADCAST
: 0;
446 /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the
447 * guest kernel back into env->mcg_ext_ctl.
449 cpu_synchronize_state(cs
);
450 if (env
->mcg_ext_ctl
& MCG_EXT_CTL_LMCE_EN
) {
451 mcg_status
|= MCG_STATUS_LMCE
;
455 cpu_x86_inject_mce(NULL
, cpu
, 9, status
, mcg_status
, paddr
,
456 (MCM_ADDR_PHYS
<< 6) | 0xc, flags
);
459 static void hardware_memory_error(void)
461 fprintf(stderr
, "Hardware memory error!\n");
465 int kvm_arch_on_sigbus_vcpu(CPUState
*c
, int code
, void *addr
)
467 X86CPU
*cpu
= X86_CPU(c
);
468 CPUX86State
*env
= &cpu
->env
;
472 if ((env
->mcg_cap
& MCG_SER_P
) && addr
473 && (code
== BUS_MCEERR_AR
|| code
== BUS_MCEERR_AO
)) {
474 ram_addr
= qemu_ram_addr_from_host(addr
);
475 if (ram_addr
== RAM_ADDR_INVALID
||
476 !kvm_physical_memory_addr_from_host(c
->kvm_state
, addr
, &paddr
)) {
477 fprintf(stderr
, "Hardware memory error for memory used by "
478 "QEMU itself instead of guest system!\n");
479 /* Hope we are lucky for AO MCE */
480 if (code
== BUS_MCEERR_AO
) {
483 hardware_memory_error();
486 kvm_hwpoison_page_add(ram_addr
);
487 kvm_mce_inject(cpu
, paddr
, code
);
489 if (code
== BUS_MCEERR_AO
) {
491 } else if (code
== BUS_MCEERR_AR
) {
492 hardware_memory_error();
500 int kvm_arch_on_sigbus(int code
, void *addr
)
502 X86CPU
*cpu
= X86_CPU(first_cpu
);
504 if ((cpu
->env
.mcg_cap
& MCG_SER_P
) && addr
&& code
== BUS_MCEERR_AO
) {
508 /* Hope we are lucky for AO MCE */
509 ram_addr
= qemu_ram_addr_from_host(addr
);
510 if (ram_addr
== RAM_ADDR_INVALID
||
511 !kvm_physical_memory_addr_from_host(first_cpu
->kvm_state
,
513 fprintf(stderr
, "Hardware memory error for memory used by "
514 "QEMU itself instead of guest system!: %p\n", addr
);
517 kvm_hwpoison_page_add(ram_addr
);
518 kvm_mce_inject(X86_CPU(first_cpu
), paddr
, code
);
520 if (code
== BUS_MCEERR_AO
) {
522 } else if (code
== BUS_MCEERR_AR
) {
523 hardware_memory_error();
531 static int kvm_inject_mce_oldstyle(X86CPU
*cpu
)
533 CPUX86State
*env
= &cpu
->env
;
535 if (!kvm_has_vcpu_events() && env
->exception_injected
== EXCP12_MCHK
) {
536 unsigned int bank
, bank_num
= env
->mcg_cap
& 0xff;
537 struct kvm_x86_mce mce
;
539 env
->exception_injected
= -1;
542 * There must be at least one bank in use if an MCE is pending.
543 * Find it and use its values for the event injection.
545 for (bank
= 0; bank
< bank_num
; bank
++) {
546 if (env
->mce_banks
[bank
* 4 + 1] & MCI_STATUS_VAL
) {
550 assert(bank
< bank_num
);
553 mce
.status
= env
->mce_banks
[bank
* 4 + 1];
554 mce
.mcg_status
= env
->mcg_status
;
555 mce
.addr
= env
->mce_banks
[bank
* 4 + 2];
556 mce
.misc
= env
->mce_banks
[bank
* 4 + 3];
558 return kvm_vcpu_ioctl(CPU(cpu
), KVM_X86_SET_MCE
, &mce
);
563 static void cpu_update_state(void *opaque
, int running
, RunState state
)
565 CPUX86State
*env
= opaque
;
568 env
->tsc_valid
= false;
572 unsigned long kvm_arch_vcpu_id(CPUState
*cs
)
574 X86CPU
*cpu
= X86_CPU(cs
);
578 #ifndef KVM_CPUID_SIGNATURE_NEXT
579 #define KVM_CPUID_SIGNATURE_NEXT 0x40000100
582 static bool hyperv_hypercall_available(X86CPU
*cpu
)
584 return cpu
->hyperv_vapic
||
585 (cpu
->hyperv_spinlock_attempts
!= HYPERV_SPINLOCK_NEVER_RETRY
);
588 static bool hyperv_enabled(X86CPU
*cpu
)
590 CPUState
*cs
= CPU(cpu
);
591 return kvm_check_extension(cs
->kvm_state
, KVM_CAP_HYPERV
) > 0 &&
592 (hyperv_hypercall_available(cpu
) ||
594 cpu
->hyperv_relaxed_timing
||
597 cpu
->hyperv_vpindex
||
598 cpu
->hyperv_runtime
||
603 static int kvm_arch_set_tsc_khz(CPUState
*cs
)
605 X86CPU
*cpu
= X86_CPU(cs
);
606 CPUX86State
*env
= &cpu
->env
;
613 r
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_TSC_CONTROL
) ?
614 kvm_vcpu_ioctl(cs
, KVM_SET_TSC_KHZ
, env
->tsc_khz
) :
617 /* When KVM_SET_TSC_KHZ fails, it's an error only if the current
618 * TSC frequency doesn't match the one we want.
620 int cur_freq
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_GET_TSC_KHZ
) ?
621 kvm_vcpu_ioctl(cs
, KVM_GET_TSC_KHZ
) :
623 if (cur_freq
<= 0 || cur_freq
!= env
->tsc_khz
) {
624 error_report("warning: TSC frequency mismatch between "
625 "VM (%" PRId64
" kHz) and host (%d kHz), "
626 "and TSC scaling unavailable",
627 env
->tsc_khz
, cur_freq
);
635 static int hyperv_handle_properties(CPUState
*cs
)
637 X86CPU
*cpu
= X86_CPU(cs
);
638 CPUX86State
*env
= &cpu
->env
;
640 if (cpu
->hyperv_time
&&
641 kvm_check_extension(cs
->kvm_state
, KVM_CAP_HYPERV_TIME
) <= 0) {
642 cpu
->hyperv_time
= false;
645 if (cpu
->hyperv_relaxed_timing
) {
646 env
->features
[FEAT_HYPERV_EAX
] |= HV_X64_MSR_HYPERCALL_AVAILABLE
;
648 if (cpu
->hyperv_vapic
) {
649 env
->features
[FEAT_HYPERV_EAX
] |= HV_X64_MSR_HYPERCALL_AVAILABLE
;
650 env
->features
[FEAT_HYPERV_EAX
] |= HV_X64_MSR_APIC_ACCESS_AVAILABLE
;
652 if (cpu
->hyperv_time
) {
653 env
->features
[FEAT_HYPERV_EAX
] |= HV_X64_MSR_HYPERCALL_AVAILABLE
;
654 env
->features
[FEAT_HYPERV_EAX
] |= HV_X64_MSR_TIME_REF_COUNT_AVAILABLE
;
655 env
->features
[FEAT_HYPERV_EAX
] |= 0x200;
657 if (cpu
->hyperv_crash
&& has_msr_hv_crash
) {
658 env
->features
[FEAT_HYPERV_EDX
] |= HV_X64_GUEST_CRASH_MSR_AVAILABLE
;
660 env
->features
[FEAT_HYPERV_EDX
] |= HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE
;
661 if (cpu
->hyperv_reset
&& has_msr_hv_reset
) {
662 env
->features
[FEAT_HYPERV_EAX
] |= HV_X64_MSR_RESET_AVAILABLE
;
664 if (cpu
->hyperv_vpindex
&& has_msr_hv_vpindex
) {
665 env
->features
[FEAT_HYPERV_EAX
] |= HV_X64_MSR_VP_INDEX_AVAILABLE
;
667 if (cpu
->hyperv_runtime
&& has_msr_hv_runtime
) {
668 env
->features
[FEAT_HYPERV_EAX
] |= HV_X64_MSR_VP_RUNTIME_AVAILABLE
;
670 if (cpu
->hyperv_synic
) {
673 if (!has_msr_hv_synic
||
674 kvm_vcpu_enable_cap(cs
, KVM_CAP_HYPERV_SYNIC
, 0)) {
675 fprintf(stderr
, "Hyper-V SynIC is not supported by kernel\n");
679 env
->features
[FEAT_HYPERV_EAX
] |= HV_X64_MSR_SYNIC_AVAILABLE
;
680 env
->msr_hv_synic_version
= HV_SYNIC_VERSION_1
;
681 for (sint
= 0; sint
< ARRAY_SIZE(env
->msr_hv_synic_sint
); sint
++) {
682 env
->msr_hv_synic_sint
[sint
] = HV_SYNIC_SINT_MASKED
;
685 if (cpu
->hyperv_stimer
) {
686 if (!has_msr_hv_stimer
) {
687 fprintf(stderr
, "Hyper-V timers aren't supported by kernel\n");
690 env
->features
[FEAT_HYPERV_EAX
] |= HV_X64_MSR_SYNTIMER_AVAILABLE
;
695 static Error
*invtsc_mig_blocker
;
697 #define KVM_MAX_CPUID_ENTRIES 100
699 int kvm_arch_init_vcpu(CPUState
*cs
)
702 struct kvm_cpuid2 cpuid
;
703 struct kvm_cpuid_entry2 entries
[KVM_MAX_CPUID_ENTRIES
];
704 } QEMU_PACKED cpuid_data
;
705 X86CPU
*cpu
= X86_CPU(cs
);
706 CPUX86State
*env
= &cpu
->env
;
707 uint32_t limit
, i
, j
, cpuid_i
;
709 struct kvm_cpuid_entry2
*c
;
710 uint32_t signature
[3];
711 int kvm_base
= KVM_CPUID_SIGNATURE
;
714 memset(&cpuid_data
, 0, sizeof(cpuid_data
));
718 /* Paravirtualization CPUIDs */
719 if (hyperv_enabled(cpu
)) {
720 c
= &cpuid_data
.entries
[cpuid_i
++];
721 c
->function
= HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS
;
722 if (!cpu
->hyperv_vendor_id
) {
723 memcpy(signature
, "Microsoft Hv", 12);
725 size_t len
= strlen(cpu
->hyperv_vendor_id
);
728 error_report("hv-vendor-id truncated to 12 characters");
731 memset(signature
, 0, 12);
732 memcpy(signature
, cpu
->hyperv_vendor_id
, len
);
734 c
->eax
= HYPERV_CPUID_MIN
;
735 c
->ebx
= signature
[0];
736 c
->ecx
= signature
[1];
737 c
->edx
= signature
[2];
739 c
= &cpuid_data
.entries
[cpuid_i
++];
740 c
->function
= HYPERV_CPUID_INTERFACE
;
741 memcpy(signature
, "Hv#1\0\0\0\0\0\0\0\0", 12);
742 c
->eax
= signature
[0];
747 c
= &cpuid_data
.entries
[cpuid_i
++];
748 c
->function
= HYPERV_CPUID_VERSION
;
752 c
= &cpuid_data
.entries
[cpuid_i
++];
753 c
->function
= HYPERV_CPUID_FEATURES
;
754 r
= hyperv_handle_properties(cs
);
758 c
->eax
= env
->features
[FEAT_HYPERV_EAX
];
759 c
->ebx
= env
->features
[FEAT_HYPERV_EBX
];
760 c
->edx
= env
->features
[FEAT_HYPERV_EDX
];
762 c
= &cpuid_data
.entries
[cpuid_i
++];
763 c
->function
= HYPERV_CPUID_ENLIGHTMENT_INFO
;
764 if (cpu
->hyperv_relaxed_timing
) {
765 c
->eax
|= HV_X64_RELAXED_TIMING_RECOMMENDED
;
767 if (cpu
->hyperv_vapic
) {
768 c
->eax
|= HV_X64_APIC_ACCESS_RECOMMENDED
;
770 c
->ebx
= cpu
->hyperv_spinlock_attempts
;
772 c
= &cpuid_data
.entries
[cpuid_i
++];
773 c
->function
= HYPERV_CPUID_IMPLEMENT_LIMITS
;
777 kvm_base
= KVM_CPUID_SIGNATURE_NEXT
;
778 has_msr_hv_hypercall
= true;
781 if (cpu
->expose_kvm
) {
782 memcpy(signature
, "KVMKVMKVM\0\0\0", 12);
783 c
= &cpuid_data
.entries
[cpuid_i
++];
784 c
->function
= KVM_CPUID_SIGNATURE
| kvm_base
;
785 c
->eax
= KVM_CPUID_FEATURES
| kvm_base
;
786 c
->ebx
= signature
[0];
787 c
->ecx
= signature
[1];
788 c
->edx
= signature
[2];
790 c
= &cpuid_data
.entries
[cpuid_i
++];
791 c
->function
= KVM_CPUID_FEATURES
| kvm_base
;
792 c
->eax
= env
->features
[FEAT_KVM
];
795 cpu_x86_cpuid(env
, 0, 0, &limit
, &unused
, &unused
, &unused
);
797 for (i
= 0; i
<= limit
; i
++) {
798 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
799 fprintf(stderr
, "unsupported level value: 0x%x\n", limit
);
802 c
= &cpuid_data
.entries
[cpuid_i
++];
806 /* Keep reading function 2 till all the input is received */
810 c
->flags
= KVM_CPUID_FLAG_STATEFUL_FUNC
|
811 KVM_CPUID_FLAG_STATE_READ_NEXT
;
812 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
813 times
= c
->eax
& 0xff;
815 for (j
= 1; j
< times
; ++j
) {
816 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
817 fprintf(stderr
, "cpuid_data is full, no space for "
818 "cpuid(eax:2):eax & 0xf = 0x%x\n", times
);
821 c
= &cpuid_data
.entries
[cpuid_i
++];
823 c
->flags
= KVM_CPUID_FLAG_STATEFUL_FUNC
;
824 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
832 if (i
== 0xd && j
== 64) {
836 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
838 cpu_x86_cpuid(env
, i
, j
, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
840 if (i
== 4 && c
->eax
== 0) {
843 if (i
== 0xb && !(c
->ecx
& 0xff00)) {
846 if (i
== 0xd && c
->eax
== 0) {
849 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
850 fprintf(stderr
, "cpuid_data is full, no space for "
851 "cpuid(eax:0x%x,ecx:0x%x)\n", i
, j
);
854 c
= &cpuid_data
.entries
[cpuid_i
++];
860 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
868 cpu_x86_cpuid(env
, 0x0a, 0, &ver
, &unused
, &unused
, &unused
);
869 if ((ver
& 0xff) > 0) {
870 has_msr_architectural_pmu
= true;
871 num_architectural_pmu_counters
= (ver
& 0xff00) >> 8;
873 /* Shouldn't be more than 32, since that's the number of bits
874 * available in EBX to tell us _which_ counters are available.
877 if (num_architectural_pmu_counters
> MAX_GP_COUNTERS
) {
878 num_architectural_pmu_counters
= MAX_GP_COUNTERS
;
883 cpu_x86_cpuid(env
, 0x80000000, 0, &limit
, &unused
, &unused
, &unused
);
885 for (i
= 0x80000000; i
<= limit
; i
++) {
886 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
887 fprintf(stderr
, "unsupported xlevel value: 0x%x\n", limit
);
890 c
= &cpuid_data
.entries
[cpuid_i
++];
894 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
897 /* Call Centaur's CPUID instructions they are supported. */
898 if (env
->cpuid_xlevel2
> 0) {
899 cpu_x86_cpuid(env
, 0xC0000000, 0, &limit
, &unused
, &unused
, &unused
);
901 for (i
= 0xC0000000; i
<= limit
; i
++) {
902 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
903 fprintf(stderr
, "unsupported xlevel2 value: 0x%x\n", limit
);
906 c
= &cpuid_data
.entries
[cpuid_i
++];
910 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
914 cpuid_data
.cpuid
.nent
= cpuid_i
;
916 if (((env
->cpuid_version
>> 8)&0xF) >= 6
917 && (env
->features
[FEAT_1_EDX
] & (CPUID_MCE
| CPUID_MCA
)) ==
918 (CPUID_MCE
| CPUID_MCA
)
919 && kvm_check_extension(cs
->kvm_state
, KVM_CAP_MCE
) > 0) {
920 uint64_t mcg_cap
, unsupported_caps
;
924 ret
= kvm_get_mce_cap_supported(cs
->kvm_state
, &mcg_cap
, &banks
);
926 fprintf(stderr
, "kvm_get_mce_cap_supported: %s", strerror(-ret
));
930 if (banks
< (env
->mcg_cap
& MCG_CAP_BANKS_MASK
)) {
931 error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
932 (int)(env
->mcg_cap
& MCG_CAP_BANKS_MASK
), banks
);
936 unsupported_caps
= env
->mcg_cap
& ~(mcg_cap
| MCG_CAP_BANKS_MASK
);
937 if (unsupported_caps
) {
938 if (unsupported_caps
& MCG_LMCE_P
) {
939 error_report("kvm: LMCE not supported");
942 error_report("warning: Unsupported MCG_CAP bits: 0x%" PRIx64
,
946 env
->mcg_cap
&= mcg_cap
| MCG_CAP_BANKS_MASK
;
947 ret
= kvm_vcpu_ioctl(cs
, KVM_X86_SETUP_MCE
, &env
->mcg_cap
);
949 fprintf(stderr
, "KVM_X86_SETUP_MCE: %s", strerror(-ret
));
954 qemu_add_vm_change_state_handler(cpu_update_state
, env
);
956 c
= cpuid_find_entry(&cpuid_data
.cpuid
, 1, 0);
958 has_msr_feature_control
= !!(c
->ecx
& CPUID_EXT_VMX
) ||
959 !!(c
->ecx
& CPUID_EXT_SMX
);
962 if (env
->mcg_cap
& MCG_LMCE_P
) {
963 has_msr_mcg_ext_ctl
= has_msr_feature_control
= true;
966 if (!env
->user_tsc_khz
) {
967 if ((env
->features
[FEAT_8000_0007_EDX
] & CPUID_APM_INVTSC
) &&
968 invtsc_mig_blocker
== NULL
) {
970 error_setg(&invtsc_mig_blocker
,
971 "State blocked by non-migratable CPU device"
973 migrate_add_blocker(invtsc_mig_blocker
);
975 vmstate_x86_cpu
.unmigratable
= 1;
979 cpuid_data
.cpuid
.padding
= 0;
980 r
= kvm_vcpu_ioctl(cs
, KVM_SET_CPUID2
, &cpuid_data
);
985 r
= kvm_arch_set_tsc_khz(cs
);
990 /* vcpu's TSC frequency is either specified by user, or following
991 * the value used by KVM if the former is not present. In the
992 * latter case, we query it from KVM and record in env->tsc_khz,
993 * so that vcpu's TSC frequency can be migrated later via this field.
996 r
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_GET_TSC_KHZ
) ?
997 kvm_vcpu_ioctl(cs
, KVM_GET_TSC_KHZ
) :
1005 env
->kvm_xsave_buf
= qemu_memalign(4096, sizeof(struct kvm_xsave
));
1007 cpu
->kvm_msr_buf
= g_malloc0(MSR_BUF_SIZE
);
1009 if (!(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_RDTSCP
)) {
1010 has_msr_tsc_aux
= false;
1016 void kvm_arch_reset_vcpu(X86CPU
*cpu
)
1018 CPUX86State
*env
= &cpu
->env
;
1020 env
->exception_injected
= -1;
1021 env
->interrupt_injected
= -1;
1023 if (kvm_irqchip_in_kernel()) {
1024 env
->mp_state
= cpu_is_bsp(cpu
) ? KVM_MP_STATE_RUNNABLE
:
1025 KVM_MP_STATE_UNINITIALIZED
;
1027 env
->mp_state
= KVM_MP_STATE_RUNNABLE
;
1031 void kvm_arch_do_init_vcpu(X86CPU
*cpu
)
1033 CPUX86State
*env
= &cpu
->env
;
1035 /* APs get directly into wait-for-SIPI state. */
1036 if (env
->mp_state
== KVM_MP_STATE_UNINITIALIZED
) {
1037 env
->mp_state
= KVM_MP_STATE_INIT_RECEIVED
;
1041 static int kvm_get_supported_msrs(KVMState
*s
)
1043 static int kvm_supported_msrs
;
1047 if (kvm_supported_msrs
== 0) {
1048 struct kvm_msr_list msr_list
, *kvm_msr_list
;
1050 kvm_supported_msrs
= -1;
1052 /* Obtain MSR list from KVM. These are the MSRs that we must
1055 ret
= kvm_ioctl(s
, KVM_GET_MSR_INDEX_LIST
, &msr_list
);
1056 if (ret
< 0 && ret
!= -E2BIG
) {
1059 /* Old kernel modules had a bug and could write beyond the provided
1060 memory. Allocate at least a safe amount of 1K. */
1061 kvm_msr_list
= g_malloc0(MAX(1024, sizeof(msr_list
) +
1063 sizeof(msr_list
.indices
[0])));
1065 kvm_msr_list
->nmsrs
= msr_list
.nmsrs
;
1066 ret
= kvm_ioctl(s
, KVM_GET_MSR_INDEX_LIST
, kvm_msr_list
);
1070 for (i
= 0; i
< kvm_msr_list
->nmsrs
; i
++) {
1071 if (kvm_msr_list
->indices
[i
] == MSR_STAR
) {
1072 has_msr_star
= true;
1075 if (kvm_msr_list
->indices
[i
] == MSR_VM_HSAVE_PA
) {
1076 has_msr_hsave_pa
= true;
1079 if (kvm_msr_list
->indices
[i
] == MSR_TSC_AUX
) {
1080 has_msr_tsc_aux
= true;
1083 if (kvm_msr_list
->indices
[i
] == MSR_TSC_ADJUST
) {
1084 has_msr_tsc_adjust
= true;
1087 if (kvm_msr_list
->indices
[i
] == MSR_IA32_TSCDEADLINE
) {
1088 has_msr_tsc_deadline
= true;
1091 if (kvm_msr_list
->indices
[i
] == MSR_IA32_SMBASE
) {
1092 has_msr_smbase
= true;
1095 if (kvm_msr_list
->indices
[i
] == MSR_IA32_MISC_ENABLE
) {
1096 has_msr_misc_enable
= true;
1099 if (kvm_msr_list
->indices
[i
] == MSR_IA32_BNDCFGS
) {
1100 has_msr_bndcfgs
= true;
1103 if (kvm_msr_list
->indices
[i
] == MSR_IA32_XSS
) {
1107 if (kvm_msr_list
->indices
[i
] == HV_X64_MSR_CRASH_CTL
) {
1108 has_msr_hv_crash
= true;
1111 if (kvm_msr_list
->indices
[i
] == HV_X64_MSR_RESET
) {
1112 has_msr_hv_reset
= true;
1115 if (kvm_msr_list
->indices
[i
] == HV_X64_MSR_VP_INDEX
) {
1116 has_msr_hv_vpindex
= true;
1119 if (kvm_msr_list
->indices
[i
] == HV_X64_MSR_VP_RUNTIME
) {
1120 has_msr_hv_runtime
= true;
1123 if (kvm_msr_list
->indices
[i
] == HV_X64_MSR_SCONTROL
) {
1124 has_msr_hv_synic
= true;
1127 if (kvm_msr_list
->indices
[i
] == HV_X64_MSR_STIMER0_CONFIG
) {
1128 has_msr_hv_stimer
= true;
1134 g_free(kvm_msr_list
);
1140 static Notifier smram_machine_done
;
1141 static KVMMemoryListener smram_listener
;
1142 static AddressSpace smram_address_space
;
1143 static MemoryRegion smram_as_root
;
1144 static MemoryRegion smram_as_mem
;
1146 static void register_smram_listener(Notifier
*n
, void *unused
)
1148 MemoryRegion
*smram
=
1149 (MemoryRegion
*) object_resolve_path("/machine/smram", NULL
);
1151 /* Outer container... */
1152 memory_region_init(&smram_as_root
, OBJECT(kvm_state
), "mem-container-smram", ~0ull);
1153 memory_region_set_enabled(&smram_as_root
, true);
1155 /* ... with two regions inside: normal system memory with low
1158 memory_region_init_alias(&smram_as_mem
, OBJECT(kvm_state
), "mem-smram",
1159 get_system_memory(), 0, ~0ull);
1160 memory_region_add_subregion_overlap(&smram_as_root
, 0, &smram_as_mem
, 0);
1161 memory_region_set_enabled(&smram_as_mem
, true);
1164 /* ... SMRAM with higher priority */
1165 memory_region_add_subregion_overlap(&smram_as_root
, 0, smram
, 10);
1166 memory_region_set_enabled(smram
, true);
1169 address_space_init(&smram_address_space
, &smram_as_root
, "KVM-SMRAM");
1170 kvm_memory_listener_register(kvm_state
, &smram_listener
,
1171 &smram_address_space
, 1);
1174 int kvm_arch_init(MachineState
*ms
, KVMState
*s
)
1176 uint64_t identity_base
= 0xfffbc000;
1177 uint64_t shadow_mem
;
1179 struct utsname utsname
;
1181 #ifdef KVM_CAP_XSAVE
1182 has_xsave
= kvm_check_extension(s
, KVM_CAP_XSAVE
);
1186 has_xcrs
= kvm_check_extension(s
, KVM_CAP_XCRS
);
1189 #ifdef KVM_CAP_PIT_STATE2
1190 has_pit_state2
= kvm_check_extension(s
, KVM_CAP_PIT_STATE2
);
1193 ret
= kvm_get_supported_msrs(s
);
1199 lm_capable_kernel
= strcmp(utsname
.machine
, "x86_64") == 0;
1202 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
1203 * In order to use vm86 mode, an EPT identity map and a TSS are needed.
1204 * Since these must be part of guest physical memory, we need to allocate
1205 * them, both by setting their start addresses in the kernel and by
1206 * creating a corresponding e820 entry. We need 4 pages before the BIOS.
1208 * Older KVM versions may not support setting the identity map base. In
1209 * that case we need to stick with the default, i.e. a 256K maximum BIOS
1212 if (kvm_check_extension(s
, KVM_CAP_SET_IDENTITY_MAP_ADDR
)) {
1213 /* Allows up to 16M BIOSes. */
1214 identity_base
= 0xfeffc000;
1216 ret
= kvm_vm_ioctl(s
, KVM_SET_IDENTITY_MAP_ADDR
, &identity_base
);
1222 /* Set TSS base one page after EPT identity map. */
1223 ret
= kvm_vm_ioctl(s
, KVM_SET_TSS_ADDR
, identity_base
+ 0x1000);
1228 /* Tell fw_cfg to notify the BIOS to reserve the range. */
1229 ret
= e820_add_entry(identity_base
, 0x4000, E820_RESERVED
);
1231 fprintf(stderr
, "e820_add_entry() table is full\n");
1234 qemu_register_reset(kvm_unpoison_all
, NULL
);
1236 shadow_mem
= machine_kvm_shadow_mem(ms
);
1237 if (shadow_mem
!= -1) {
1239 ret
= kvm_vm_ioctl(s
, KVM_SET_NR_MMU_PAGES
, shadow_mem
);
1245 if (kvm_check_extension(s
, KVM_CAP_X86_SMM
)) {
1246 smram_machine_done
.notify
= register_smram_listener
;
1247 qemu_add_machine_init_done_notifier(&smram_machine_done
);
1252 static void set_v8086_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
1254 lhs
->selector
= rhs
->selector
;
1255 lhs
->base
= rhs
->base
;
1256 lhs
->limit
= rhs
->limit
;
1268 static void set_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
1270 unsigned flags
= rhs
->flags
;
1271 lhs
->selector
= rhs
->selector
;
1272 lhs
->base
= rhs
->base
;
1273 lhs
->limit
= rhs
->limit
;
1274 lhs
->type
= (flags
>> DESC_TYPE_SHIFT
) & 15;
1275 lhs
->present
= (flags
& DESC_P_MASK
) != 0;
1276 lhs
->dpl
= (flags
>> DESC_DPL_SHIFT
) & 3;
1277 lhs
->db
= (flags
>> DESC_B_SHIFT
) & 1;
1278 lhs
->s
= (flags
& DESC_S_MASK
) != 0;
1279 lhs
->l
= (flags
>> DESC_L_SHIFT
) & 1;
1280 lhs
->g
= (flags
& DESC_G_MASK
) != 0;
1281 lhs
->avl
= (flags
& DESC_AVL_MASK
) != 0;
1282 lhs
->unusable
= !lhs
->present
;
1286 static void get_seg(SegmentCache
*lhs
, const struct kvm_segment
*rhs
)
1288 lhs
->selector
= rhs
->selector
;
1289 lhs
->base
= rhs
->base
;
1290 lhs
->limit
= rhs
->limit
;
1291 if (rhs
->unusable
) {
1294 lhs
->flags
= (rhs
->type
<< DESC_TYPE_SHIFT
) |
1295 (rhs
->present
* DESC_P_MASK
) |
1296 (rhs
->dpl
<< DESC_DPL_SHIFT
) |
1297 (rhs
->db
<< DESC_B_SHIFT
) |
1298 (rhs
->s
* DESC_S_MASK
) |
1299 (rhs
->l
<< DESC_L_SHIFT
) |
1300 (rhs
->g
* DESC_G_MASK
) |
1301 (rhs
->avl
* DESC_AVL_MASK
);
1305 static void kvm_getput_reg(__u64
*kvm_reg
, target_ulong
*qemu_reg
, int set
)
1308 *kvm_reg
= *qemu_reg
;
1310 *qemu_reg
= *kvm_reg
;
1314 static int kvm_getput_regs(X86CPU
*cpu
, int set
)
1316 CPUX86State
*env
= &cpu
->env
;
1317 struct kvm_regs regs
;
1321 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_REGS
, ®s
);
1327 kvm_getput_reg(®s
.rax
, &env
->regs
[R_EAX
], set
);
1328 kvm_getput_reg(®s
.rbx
, &env
->regs
[R_EBX
], set
);
1329 kvm_getput_reg(®s
.rcx
, &env
->regs
[R_ECX
], set
);
1330 kvm_getput_reg(®s
.rdx
, &env
->regs
[R_EDX
], set
);
1331 kvm_getput_reg(®s
.rsi
, &env
->regs
[R_ESI
], set
);
1332 kvm_getput_reg(®s
.rdi
, &env
->regs
[R_EDI
], set
);
1333 kvm_getput_reg(®s
.rsp
, &env
->regs
[R_ESP
], set
);
1334 kvm_getput_reg(®s
.rbp
, &env
->regs
[R_EBP
], set
);
1335 #ifdef TARGET_X86_64
1336 kvm_getput_reg(®s
.r8
, &env
->regs
[8], set
);
1337 kvm_getput_reg(®s
.r9
, &env
->regs
[9], set
);
1338 kvm_getput_reg(®s
.r10
, &env
->regs
[10], set
);
1339 kvm_getput_reg(®s
.r11
, &env
->regs
[11], set
);
1340 kvm_getput_reg(®s
.r12
, &env
->regs
[12], set
);
1341 kvm_getput_reg(®s
.r13
, &env
->regs
[13], set
);
1342 kvm_getput_reg(®s
.r14
, &env
->regs
[14], set
);
1343 kvm_getput_reg(®s
.r15
, &env
->regs
[15], set
);
1346 kvm_getput_reg(®s
.rflags
, &env
->eflags
, set
);
1347 kvm_getput_reg(®s
.rip
, &env
->eip
, set
);
1350 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_REGS
, ®s
);
1356 static int kvm_put_fpu(X86CPU
*cpu
)
1358 CPUX86State
*env
= &cpu
->env
;
1362 memset(&fpu
, 0, sizeof fpu
);
1363 fpu
.fsw
= env
->fpus
& ~(7 << 11);
1364 fpu
.fsw
|= (env
->fpstt
& 7) << 11;
1365 fpu
.fcw
= env
->fpuc
;
1366 fpu
.last_opcode
= env
->fpop
;
1367 fpu
.last_ip
= env
->fpip
;
1368 fpu
.last_dp
= env
->fpdp
;
1369 for (i
= 0; i
< 8; ++i
) {
1370 fpu
.ftwx
|= (!env
->fptags
[i
]) << i
;
1372 memcpy(fpu
.fpr
, env
->fpregs
, sizeof env
->fpregs
);
1373 for (i
= 0; i
< CPU_NB_REGS
; i
++) {
1374 stq_p(&fpu
.xmm
[i
][0], env
->xmm_regs
[i
].ZMM_Q(0));
1375 stq_p(&fpu
.xmm
[i
][8], env
->xmm_regs
[i
].ZMM_Q(1));
1377 fpu
.mxcsr
= env
->mxcsr
;
1379 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_FPU
, &fpu
);
1382 #define XSAVE_FCW_FSW 0
1383 #define XSAVE_FTW_FOP 1
1384 #define XSAVE_CWD_RIP 2
1385 #define XSAVE_CWD_RDP 4
1386 #define XSAVE_MXCSR 6
1387 #define XSAVE_ST_SPACE 8
1388 #define XSAVE_XMM_SPACE 40
1389 #define XSAVE_XSTATE_BV 128
1390 #define XSAVE_YMMH_SPACE 144
1391 #define XSAVE_BNDREGS 240
1392 #define XSAVE_BNDCSR 256
1393 #define XSAVE_OPMASK 272
1394 #define XSAVE_ZMM_Hi256 288
1395 #define XSAVE_Hi16_ZMM 416
1396 #define XSAVE_PKRU 672
1398 #define XSAVE_BYTE_OFFSET(word_offset) \
1399 ((word_offset) * sizeof(((struct kvm_xsave *)0)->region[0]))
1401 #define ASSERT_OFFSET(word_offset, field) \
1402 QEMU_BUILD_BUG_ON(XSAVE_BYTE_OFFSET(word_offset) != \
1403 offsetof(X86XSaveArea, field))
1405 ASSERT_OFFSET(XSAVE_FCW_FSW
, legacy
.fcw
);
1406 ASSERT_OFFSET(XSAVE_FTW_FOP
, legacy
.ftw
);
1407 ASSERT_OFFSET(XSAVE_CWD_RIP
, legacy
.fpip
);
1408 ASSERT_OFFSET(XSAVE_CWD_RDP
, legacy
.fpdp
);
1409 ASSERT_OFFSET(XSAVE_MXCSR
, legacy
.mxcsr
);
1410 ASSERT_OFFSET(XSAVE_ST_SPACE
, legacy
.fpregs
);
1411 ASSERT_OFFSET(XSAVE_XMM_SPACE
, legacy
.xmm_regs
);
1412 ASSERT_OFFSET(XSAVE_XSTATE_BV
, header
.xstate_bv
);
1413 ASSERT_OFFSET(XSAVE_YMMH_SPACE
, avx_state
);
1414 ASSERT_OFFSET(XSAVE_BNDREGS
, bndreg_state
);
1415 ASSERT_OFFSET(XSAVE_BNDCSR
, bndcsr_state
);
1416 ASSERT_OFFSET(XSAVE_OPMASK
, opmask_state
);
1417 ASSERT_OFFSET(XSAVE_ZMM_Hi256
, zmm_hi256_state
);
1418 ASSERT_OFFSET(XSAVE_Hi16_ZMM
, hi16_zmm_state
);
1419 ASSERT_OFFSET(XSAVE_PKRU
, pkru_state
);
1421 static int kvm_put_xsave(X86CPU
*cpu
)
1423 CPUX86State
*env
= &cpu
->env
;
1424 X86XSaveArea
*xsave
= env
->kvm_xsave_buf
;
1425 uint16_t cwd
, swd
, twd
;
1429 return kvm_put_fpu(cpu
);
1432 memset(xsave
, 0, sizeof(struct kvm_xsave
));
1434 swd
= env
->fpus
& ~(7 << 11);
1435 swd
|= (env
->fpstt
& 7) << 11;
1437 for (i
= 0; i
< 8; ++i
) {
1438 twd
|= (!env
->fptags
[i
]) << i
;
1440 xsave
->legacy
.fcw
= cwd
;
1441 xsave
->legacy
.fsw
= swd
;
1442 xsave
->legacy
.ftw
= twd
;
1443 xsave
->legacy
.fpop
= env
->fpop
;
1444 xsave
->legacy
.fpip
= env
->fpip
;
1445 xsave
->legacy
.fpdp
= env
->fpdp
;
1446 memcpy(&xsave
->legacy
.fpregs
, env
->fpregs
,
1447 sizeof env
->fpregs
);
1448 xsave
->legacy
.mxcsr
= env
->mxcsr
;
1449 xsave
->header
.xstate_bv
= env
->xstate_bv
;
1450 memcpy(&xsave
->bndreg_state
.bnd_regs
, env
->bnd_regs
,
1451 sizeof env
->bnd_regs
);
1452 xsave
->bndcsr_state
.bndcsr
= env
->bndcs_regs
;
1453 memcpy(&xsave
->opmask_state
.opmask_regs
, env
->opmask_regs
,
1454 sizeof env
->opmask_regs
);
1456 for (i
= 0; i
< CPU_NB_REGS
; i
++) {
1457 uint8_t *xmm
= xsave
->legacy
.xmm_regs
[i
];
1458 uint8_t *ymmh
= xsave
->avx_state
.ymmh
[i
];
1459 uint8_t *zmmh
= xsave
->zmm_hi256_state
.zmm_hi256
[i
];
1460 stq_p(xmm
, env
->xmm_regs
[i
].ZMM_Q(0));
1461 stq_p(xmm
+8, env
->xmm_regs
[i
].ZMM_Q(1));
1462 stq_p(ymmh
, env
->xmm_regs
[i
].ZMM_Q(2));
1463 stq_p(ymmh
+8, env
->xmm_regs
[i
].ZMM_Q(3));
1464 stq_p(zmmh
, env
->xmm_regs
[i
].ZMM_Q(4));
1465 stq_p(zmmh
+8, env
->xmm_regs
[i
].ZMM_Q(5));
1466 stq_p(zmmh
+16, env
->xmm_regs
[i
].ZMM_Q(6));
1467 stq_p(zmmh
+24, env
->xmm_regs
[i
].ZMM_Q(7));
1470 #ifdef TARGET_X86_64
1471 memcpy(&xsave
->hi16_zmm_state
.hi16_zmm
, &env
->xmm_regs
[16],
1472 16 * sizeof env
->xmm_regs
[16]);
1473 memcpy(&xsave
->pkru_state
, &env
->pkru
, sizeof env
->pkru
);
1475 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_XSAVE
, xsave
);
1478 static int kvm_put_xcrs(X86CPU
*cpu
)
1480 CPUX86State
*env
= &cpu
->env
;
1481 struct kvm_xcrs xcrs
= {};
1489 xcrs
.xcrs
[0].xcr
= 0;
1490 xcrs
.xcrs
[0].value
= env
->xcr0
;
1491 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_XCRS
, &xcrs
);
1494 static int kvm_put_sregs(X86CPU
*cpu
)
1496 CPUX86State
*env
= &cpu
->env
;
1497 struct kvm_sregs sregs
;
1499 memset(sregs
.interrupt_bitmap
, 0, sizeof(sregs
.interrupt_bitmap
));
1500 if (env
->interrupt_injected
>= 0) {
1501 sregs
.interrupt_bitmap
[env
->interrupt_injected
/ 64] |=
1502 (uint64_t)1 << (env
->interrupt_injected
% 64);
1505 if ((env
->eflags
& VM_MASK
)) {
1506 set_v8086_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
1507 set_v8086_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
1508 set_v8086_seg(&sregs
.es
, &env
->segs
[R_ES
]);
1509 set_v8086_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
1510 set_v8086_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
1511 set_v8086_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
1513 set_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
1514 set_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
1515 set_seg(&sregs
.es
, &env
->segs
[R_ES
]);
1516 set_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
1517 set_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
1518 set_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
1521 set_seg(&sregs
.tr
, &env
->tr
);
1522 set_seg(&sregs
.ldt
, &env
->ldt
);
1524 sregs
.idt
.limit
= env
->idt
.limit
;
1525 sregs
.idt
.base
= env
->idt
.base
;
1526 memset(sregs
.idt
.padding
, 0, sizeof sregs
.idt
.padding
);
1527 sregs
.gdt
.limit
= env
->gdt
.limit
;
1528 sregs
.gdt
.base
= env
->gdt
.base
;
1529 memset(sregs
.gdt
.padding
, 0, sizeof sregs
.gdt
.padding
);
1531 sregs
.cr0
= env
->cr
[0];
1532 sregs
.cr2
= env
->cr
[2];
1533 sregs
.cr3
= env
->cr
[3];
1534 sregs
.cr4
= env
->cr
[4];
1536 sregs
.cr8
= cpu_get_apic_tpr(cpu
->apic_state
);
1537 sregs
.apic_base
= cpu_get_apic_base(cpu
->apic_state
);
1539 sregs
.efer
= env
->efer
;
1541 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_SREGS
, &sregs
);
1544 static void kvm_msr_buf_reset(X86CPU
*cpu
)
1546 memset(cpu
->kvm_msr_buf
, 0, MSR_BUF_SIZE
);
1549 static void kvm_msr_entry_add(X86CPU
*cpu
, uint32_t index
, uint64_t value
)
1551 struct kvm_msrs
*msrs
= cpu
->kvm_msr_buf
;
1552 void *limit
= ((void *)msrs
) + MSR_BUF_SIZE
;
1553 struct kvm_msr_entry
*entry
= &msrs
->entries
[msrs
->nmsrs
];
1555 assert((void *)(entry
+ 1) <= limit
);
1557 entry
->index
= index
;
1558 entry
->reserved
= 0;
1559 entry
->data
= value
;
1563 static int kvm_put_one_msr(X86CPU
*cpu
, int index
, uint64_t value
)
1565 kvm_msr_buf_reset(cpu
);
1566 kvm_msr_entry_add(cpu
, index
, value
);
1568 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MSRS
, cpu
->kvm_msr_buf
);
1571 void kvm_put_apicbase(X86CPU
*cpu
, uint64_t value
)
1575 ret
= kvm_put_one_msr(cpu
, MSR_IA32_APICBASE
, value
);
1579 static int kvm_put_tscdeadline_msr(X86CPU
*cpu
)
1581 CPUX86State
*env
= &cpu
->env
;
1584 if (!has_msr_tsc_deadline
) {
1588 ret
= kvm_put_one_msr(cpu
, MSR_IA32_TSCDEADLINE
, env
->tsc_deadline
);
1598 * Provide a separate write service for the feature control MSR in order to
1599 * kick the VCPU out of VMXON or even guest mode on reset. This has to be done
1600 * before writing any other state because forcibly leaving nested mode
1601 * invalidates the VCPU state.
1603 static int kvm_put_msr_feature_control(X86CPU
*cpu
)
1607 if (!has_msr_feature_control
) {
1611 ret
= kvm_put_one_msr(cpu
, MSR_IA32_FEATURE_CONTROL
,
1612 cpu
->env
.msr_ia32_feature_control
);
1621 static int kvm_put_msrs(X86CPU
*cpu
, int level
)
1623 CPUX86State
*env
= &cpu
->env
;
1627 kvm_msr_buf_reset(cpu
);
1629 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_CS
, env
->sysenter_cs
);
1630 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_ESP
, env
->sysenter_esp
);
1631 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_EIP
, env
->sysenter_eip
);
1632 kvm_msr_entry_add(cpu
, MSR_PAT
, env
->pat
);
1634 kvm_msr_entry_add(cpu
, MSR_STAR
, env
->star
);
1636 if (has_msr_hsave_pa
) {
1637 kvm_msr_entry_add(cpu
, MSR_VM_HSAVE_PA
, env
->vm_hsave
);
1639 if (has_msr_tsc_aux
) {
1640 kvm_msr_entry_add(cpu
, MSR_TSC_AUX
, env
->tsc_aux
);
1642 if (has_msr_tsc_adjust
) {
1643 kvm_msr_entry_add(cpu
, MSR_TSC_ADJUST
, env
->tsc_adjust
);
1645 if (has_msr_misc_enable
) {
1646 kvm_msr_entry_add(cpu
, MSR_IA32_MISC_ENABLE
,
1647 env
->msr_ia32_misc_enable
);
1649 if (has_msr_smbase
) {
1650 kvm_msr_entry_add(cpu
, MSR_IA32_SMBASE
, env
->smbase
);
1652 if (has_msr_bndcfgs
) {
1653 kvm_msr_entry_add(cpu
, MSR_IA32_BNDCFGS
, env
->msr_bndcfgs
);
1656 kvm_msr_entry_add(cpu
, MSR_IA32_XSS
, env
->xss
);
1658 #ifdef TARGET_X86_64
1659 if (lm_capable_kernel
) {
1660 kvm_msr_entry_add(cpu
, MSR_CSTAR
, env
->cstar
);
1661 kvm_msr_entry_add(cpu
, MSR_KERNELGSBASE
, env
->kernelgsbase
);
1662 kvm_msr_entry_add(cpu
, MSR_FMASK
, env
->fmask
);
1663 kvm_msr_entry_add(cpu
, MSR_LSTAR
, env
->lstar
);
1667 * The following MSRs have side effects on the guest or are too heavy
1668 * for normal writeback. Limit them to reset or full state updates.
1670 if (level
>= KVM_PUT_RESET_STATE
) {
1671 kvm_msr_entry_add(cpu
, MSR_IA32_TSC
, env
->tsc
);
1672 kvm_msr_entry_add(cpu
, MSR_KVM_SYSTEM_TIME
, env
->system_time_msr
);
1673 kvm_msr_entry_add(cpu
, MSR_KVM_WALL_CLOCK
, env
->wall_clock_msr
);
1674 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_ASYNC_PF
)) {
1675 kvm_msr_entry_add(cpu
, MSR_KVM_ASYNC_PF_EN
, env
->async_pf_en_msr
);
1677 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_PV_EOI
)) {
1678 kvm_msr_entry_add(cpu
, MSR_KVM_PV_EOI_EN
, env
->pv_eoi_en_msr
);
1680 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_STEAL_TIME
)) {
1681 kvm_msr_entry_add(cpu
, MSR_KVM_STEAL_TIME
, env
->steal_time_msr
);
1683 if (has_msr_architectural_pmu
) {
1684 /* Stop the counter. */
1685 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR_CTRL
, 0);
1686 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_CTRL
, 0);
1688 /* Set the counter values. */
1689 for (i
= 0; i
< MAX_FIXED_COUNTERS
; i
++) {
1690 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR0
+ i
,
1691 env
->msr_fixed_counters
[i
]);
1693 for (i
= 0; i
< num_architectural_pmu_counters
; i
++) {
1694 kvm_msr_entry_add(cpu
, MSR_P6_PERFCTR0
+ i
,
1695 env
->msr_gp_counters
[i
]);
1696 kvm_msr_entry_add(cpu
, MSR_P6_EVNTSEL0
+ i
,
1697 env
->msr_gp_evtsel
[i
]);
1699 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_STATUS
,
1700 env
->msr_global_status
);
1701 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_OVF_CTRL
,
1702 env
->msr_global_ovf_ctrl
);
1704 /* Now start the PMU. */
1705 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR_CTRL
,
1706 env
->msr_fixed_ctr_ctrl
);
1707 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_CTRL
,
1708 env
->msr_global_ctrl
);
1710 if (has_msr_hv_hypercall
) {
1711 kvm_msr_entry_add(cpu
, HV_X64_MSR_GUEST_OS_ID
,
1712 env
->msr_hv_guest_os_id
);
1713 kvm_msr_entry_add(cpu
, HV_X64_MSR_HYPERCALL
,
1714 env
->msr_hv_hypercall
);
1716 if (cpu
->hyperv_vapic
) {
1717 kvm_msr_entry_add(cpu
, HV_X64_MSR_APIC_ASSIST_PAGE
,
1720 if (cpu
->hyperv_time
) {
1721 kvm_msr_entry_add(cpu
, HV_X64_MSR_REFERENCE_TSC
, env
->msr_hv_tsc
);
1723 if (has_msr_hv_crash
) {
1726 for (j
= 0; j
< HV_X64_MSR_CRASH_PARAMS
; j
++)
1727 kvm_msr_entry_add(cpu
, HV_X64_MSR_CRASH_P0
+ j
,
1728 env
->msr_hv_crash_params
[j
]);
1730 kvm_msr_entry_add(cpu
, HV_X64_MSR_CRASH_CTL
,
1731 HV_X64_MSR_CRASH_CTL_NOTIFY
);
1733 if (has_msr_hv_runtime
) {
1734 kvm_msr_entry_add(cpu
, HV_X64_MSR_VP_RUNTIME
, env
->msr_hv_runtime
);
1736 if (cpu
->hyperv_synic
) {
1739 kvm_msr_entry_add(cpu
, HV_X64_MSR_SCONTROL
,
1740 env
->msr_hv_synic_control
);
1741 kvm_msr_entry_add(cpu
, HV_X64_MSR_SVERSION
,
1742 env
->msr_hv_synic_version
);
1743 kvm_msr_entry_add(cpu
, HV_X64_MSR_SIEFP
,
1744 env
->msr_hv_synic_evt_page
);
1745 kvm_msr_entry_add(cpu
, HV_X64_MSR_SIMP
,
1746 env
->msr_hv_synic_msg_page
);
1748 for (j
= 0; j
< ARRAY_SIZE(env
->msr_hv_synic_sint
); j
++) {
1749 kvm_msr_entry_add(cpu
, HV_X64_MSR_SINT0
+ j
,
1750 env
->msr_hv_synic_sint
[j
]);
1753 if (has_msr_hv_stimer
) {
1756 for (j
= 0; j
< ARRAY_SIZE(env
->msr_hv_stimer_config
); j
++) {
1757 kvm_msr_entry_add(cpu
, HV_X64_MSR_STIMER0_CONFIG
+ j
* 2,
1758 env
->msr_hv_stimer_config
[j
]);
1761 for (j
= 0; j
< ARRAY_SIZE(env
->msr_hv_stimer_count
); j
++) {
1762 kvm_msr_entry_add(cpu
, HV_X64_MSR_STIMER0_COUNT
+ j
* 2,
1763 env
->msr_hv_stimer_count
[j
]);
1766 if (env
->features
[FEAT_1_EDX
] & CPUID_MTRR
) {
1767 uint64_t phys_mask
= MAKE_64BIT_MASK(0, cpu
->phys_bits
);
1769 kvm_msr_entry_add(cpu
, MSR_MTRRdefType
, env
->mtrr_deftype
);
1770 kvm_msr_entry_add(cpu
, MSR_MTRRfix64K_00000
, env
->mtrr_fixed
[0]);
1771 kvm_msr_entry_add(cpu
, MSR_MTRRfix16K_80000
, env
->mtrr_fixed
[1]);
1772 kvm_msr_entry_add(cpu
, MSR_MTRRfix16K_A0000
, env
->mtrr_fixed
[2]);
1773 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_C0000
, env
->mtrr_fixed
[3]);
1774 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_C8000
, env
->mtrr_fixed
[4]);
1775 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_D0000
, env
->mtrr_fixed
[5]);
1776 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_D8000
, env
->mtrr_fixed
[6]);
1777 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_E0000
, env
->mtrr_fixed
[7]);
1778 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_E8000
, env
->mtrr_fixed
[8]);
1779 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_F0000
, env
->mtrr_fixed
[9]);
1780 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_F8000
, env
->mtrr_fixed
[10]);
1781 for (i
= 0; i
< MSR_MTRRcap_VCNT
; i
++) {
1782 /* The CPU GPs if we write to a bit above the physical limit of
1783 * the host CPU (and KVM emulates that)
1785 uint64_t mask
= env
->mtrr_var
[i
].mask
;
1788 kvm_msr_entry_add(cpu
, MSR_MTRRphysBase(i
),
1789 env
->mtrr_var
[i
].base
);
1790 kvm_msr_entry_add(cpu
, MSR_MTRRphysMask(i
), mask
);
1794 /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
1795 * kvm_put_msr_feature_control. */
1800 kvm_msr_entry_add(cpu
, MSR_MCG_STATUS
, env
->mcg_status
);
1801 kvm_msr_entry_add(cpu
, MSR_MCG_CTL
, env
->mcg_ctl
);
1802 if (has_msr_mcg_ext_ctl
) {
1803 kvm_msr_entry_add(cpu
, MSR_MCG_EXT_CTL
, env
->mcg_ext_ctl
);
1805 for (i
= 0; i
< (env
->mcg_cap
& 0xff) * 4; i
++) {
1806 kvm_msr_entry_add(cpu
, MSR_MC0_CTL
+ i
, env
->mce_banks
[i
]);
1810 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MSRS
, cpu
->kvm_msr_buf
);
1815 assert(ret
== cpu
->kvm_msr_buf
->nmsrs
);
1820 static int kvm_get_fpu(X86CPU
*cpu
)
1822 CPUX86State
*env
= &cpu
->env
;
1826 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_FPU
, &fpu
);
1831 env
->fpstt
= (fpu
.fsw
>> 11) & 7;
1832 env
->fpus
= fpu
.fsw
;
1833 env
->fpuc
= fpu
.fcw
;
1834 env
->fpop
= fpu
.last_opcode
;
1835 env
->fpip
= fpu
.last_ip
;
1836 env
->fpdp
= fpu
.last_dp
;
1837 for (i
= 0; i
< 8; ++i
) {
1838 env
->fptags
[i
] = !((fpu
.ftwx
>> i
) & 1);
1840 memcpy(env
->fpregs
, fpu
.fpr
, sizeof env
->fpregs
);
1841 for (i
= 0; i
< CPU_NB_REGS
; i
++) {
1842 env
->xmm_regs
[i
].ZMM_Q(0) = ldq_p(&fpu
.xmm
[i
][0]);
1843 env
->xmm_regs
[i
].ZMM_Q(1) = ldq_p(&fpu
.xmm
[i
][8]);
1845 env
->mxcsr
= fpu
.mxcsr
;
1850 static int kvm_get_xsave(X86CPU
*cpu
)
1852 CPUX86State
*env
= &cpu
->env
;
1853 X86XSaveArea
*xsave
= env
->kvm_xsave_buf
;
1855 uint16_t cwd
, swd
, twd
;
1858 return kvm_get_fpu(cpu
);
1861 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_XSAVE
, xsave
);
1866 cwd
= xsave
->legacy
.fcw
;
1867 swd
= xsave
->legacy
.fsw
;
1868 twd
= xsave
->legacy
.ftw
;
1869 env
->fpop
= xsave
->legacy
.fpop
;
1870 env
->fpstt
= (swd
>> 11) & 7;
1873 for (i
= 0; i
< 8; ++i
) {
1874 env
->fptags
[i
] = !((twd
>> i
) & 1);
1876 env
->fpip
= xsave
->legacy
.fpip
;
1877 env
->fpdp
= xsave
->legacy
.fpdp
;
1878 env
->mxcsr
= xsave
->legacy
.mxcsr
;
1879 memcpy(env
->fpregs
, &xsave
->legacy
.fpregs
,
1880 sizeof env
->fpregs
);
1881 env
->xstate_bv
= xsave
->header
.xstate_bv
;
1882 memcpy(env
->bnd_regs
, &xsave
->bndreg_state
.bnd_regs
,
1883 sizeof env
->bnd_regs
);
1884 env
->bndcs_regs
= xsave
->bndcsr_state
.bndcsr
;
1885 memcpy(env
->opmask_regs
, &xsave
->opmask_state
.opmask_regs
,
1886 sizeof env
->opmask_regs
);
1888 for (i
= 0; i
< CPU_NB_REGS
; i
++) {
1889 uint8_t *xmm
= xsave
->legacy
.xmm_regs
[i
];
1890 uint8_t *ymmh
= xsave
->avx_state
.ymmh
[i
];
1891 uint8_t *zmmh
= xsave
->zmm_hi256_state
.zmm_hi256
[i
];
1892 env
->xmm_regs
[i
].ZMM_Q(0) = ldq_p(xmm
);
1893 env
->xmm_regs
[i
].ZMM_Q(1) = ldq_p(xmm
+8);
1894 env
->xmm_regs
[i
].ZMM_Q(2) = ldq_p(ymmh
);
1895 env
->xmm_regs
[i
].ZMM_Q(3) = ldq_p(ymmh
+8);
1896 env
->xmm_regs
[i
].ZMM_Q(4) = ldq_p(zmmh
);
1897 env
->xmm_regs
[i
].ZMM_Q(5) = ldq_p(zmmh
+8);
1898 env
->xmm_regs
[i
].ZMM_Q(6) = ldq_p(zmmh
+16);
1899 env
->xmm_regs
[i
].ZMM_Q(7) = ldq_p(zmmh
+24);
1902 #ifdef TARGET_X86_64
1903 memcpy(&env
->xmm_regs
[16], &xsave
->hi16_zmm_state
.hi16_zmm
,
1904 16 * sizeof env
->xmm_regs
[16]);
1905 memcpy(&env
->pkru
, &xsave
->pkru_state
, sizeof env
->pkru
);
1910 static int kvm_get_xcrs(X86CPU
*cpu
)
1912 CPUX86State
*env
= &cpu
->env
;
1914 struct kvm_xcrs xcrs
;
1920 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_XCRS
, &xcrs
);
1925 for (i
= 0; i
< xcrs
.nr_xcrs
; i
++) {
1926 /* Only support xcr0 now */
1927 if (xcrs
.xcrs
[i
].xcr
== 0) {
1928 env
->xcr0
= xcrs
.xcrs
[i
].value
;
1935 static int kvm_get_sregs(X86CPU
*cpu
)
1937 CPUX86State
*env
= &cpu
->env
;
1938 struct kvm_sregs sregs
;
1942 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_SREGS
, &sregs
);
1947 /* There can only be one pending IRQ set in the bitmap at a time, so try
1948 to find it and save its number instead (-1 for none). */
1949 env
->interrupt_injected
= -1;
1950 for (i
= 0; i
< ARRAY_SIZE(sregs
.interrupt_bitmap
); i
++) {
1951 if (sregs
.interrupt_bitmap
[i
]) {
1952 bit
= ctz64(sregs
.interrupt_bitmap
[i
]);
1953 env
->interrupt_injected
= i
* 64 + bit
;
1958 get_seg(&env
->segs
[R_CS
], &sregs
.cs
);
1959 get_seg(&env
->segs
[R_DS
], &sregs
.ds
);
1960 get_seg(&env
->segs
[R_ES
], &sregs
.es
);
1961 get_seg(&env
->segs
[R_FS
], &sregs
.fs
);
1962 get_seg(&env
->segs
[R_GS
], &sregs
.gs
);
1963 get_seg(&env
->segs
[R_SS
], &sregs
.ss
);
1965 get_seg(&env
->tr
, &sregs
.tr
);
1966 get_seg(&env
->ldt
, &sregs
.ldt
);
1968 env
->idt
.limit
= sregs
.idt
.limit
;
1969 env
->idt
.base
= sregs
.idt
.base
;
1970 env
->gdt
.limit
= sregs
.gdt
.limit
;
1971 env
->gdt
.base
= sregs
.gdt
.base
;
1973 env
->cr
[0] = sregs
.cr0
;
1974 env
->cr
[2] = sregs
.cr2
;
1975 env
->cr
[3] = sregs
.cr3
;
1976 env
->cr
[4] = sregs
.cr4
;
1978 env
->efer
= sregs
.efer
;
1980 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
1982 #define HFLAG_COPY_MASK \
1983 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
1984 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
1985 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
1986 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
1988 hflags
= env
->hflags
& HFLAG_COPY_MASK
;
1989 hflags
|= (env
->segs
[R_SS
].flags
>> DESC_DPL_SHIFT
) & HF_CPL_MASK
;
1990 hflags
|= (env
->cr
[0] & CR0_PE_MASK
) << (HF_PE_SHIFT
- CR0_PE_SHIFT
);
1991 hflags
|= (env
->cr
[0] << (HF_MP_SHIFT
- CR0_MP_SHIFT
)) &
1992 (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
);
1993 hflags
|= (env
->eflags
& (HF_TF_MASK
| HF_VM_MASK
| HF_IOPL_MASK
));
1995 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
1996 hflags
|= HF_OSFXSR_MASK
;
1999 if (env
->efer
& MSR_EFER_LMA
) {
2000 hflags
|= HF_LMA_MASK
;
2003 if ((hflags
& HF_LMA_MASK
) && (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
2004 hflags
|= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
2006 hflags
|= (env
->segs
[R_CS
].flags
& DESC_B_MASK
) >>
2007 (DESC_B_SHIFT
- HF_CS32_SHIFT
);
2008 hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
) >>
2009 (DESC_B_SHIFT
- HF_SS32_SHIFT
);
2010 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
) ||
2011 !(hflags
& HF_CS32_MASK
)) {
2012 hflags
|= HF_ADDSEG_MASK
;
2014 hflags
|= ((env
->segs
[R_DS
].base
| env
->segs
[R_ES
].base
|
2015 env
->segs
[R_SS
].base
) != 0) << HF_ADDSEG_SHIFT
;
2018 env
->hflags
= hflags
;
2023 static int kvm_get_msrs(X86CPU
*cpu
)
2025 CPUX86State
*env
= &cpu
->env
;
2026 struct kvm_msr_entry
*msrs
= cpu
->kvm_msr_buf
->entries
;
2028 uint64_t mtrr_top_bits
;
2030 kvm_msr_buf_reset(cpu
);
2032 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_CS
, 0);
2033 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_ESP
, 0);
2034 kvm_msr_entry_add(cpu
, MSR_IA32_SYSENTER_EIP
, 0);
2035 kvm_msr_entry_add(cpu
, MSR_PAT
, 0);
2037 kvm_msr_entry_add(cpu
, MSR_STAR
, 0);
2039 if (has_msr_hsave_pa
) {
2040 kvm_msr_entry_add(cpu
, MSR_VM_HSAVE_PA
, 0);
2042 if (has_msr_tsc_aux
) {
2043 kvm_msr_entry_add(cpu
, MSR_TSC_AUX
, 0);
2045 if (has_msr_tsc_adjust
) {
2046 kvm_msr_entry_add(cpu
, MSR_TSC_ADJUST
, 0);
2048 if (has_msr_tsc_deadline
) {
2049 kvm_msr_entry_add(cpu
, MSR_IA32_TSCDEADLINE
, 0);
2051 if (has_msr_misc_enable
) {
2052 kvm_msr_entry_add(cpu
, MSR_IA32_MISC_ENABLE
, 0);
2054 if (has_msr_smbase
) {
2055 kvm_msr_entry_add(cpu
, MSR_IA32_SMBASE
, 0);
2057 if (has_msr_feature_control
) {
2058 kvm_msr_entry_add(cpu
, MSR_IA32_FEATURE_CONTROL
, 0);
2060 if (has_msr_bndcfgs
) {
2061 kvm_msr_entry_add(cpu
, MSR_IA32_BNDCFGS
, 0);
2064 kvm_msr_entry_add(cpu
, MSR_IA32_XSS
, 0);
2068 if (!env
->tsc_valid
) {
2069 kvm_msr_entry_add(cpu
, MSR_IA32_TSC
, 0);
2070 env
->tsc_valid
= !runstate_is_running();
2073 #ifdef TARGET_X86_64
2074 if (lm_capable_kernel
) {
2075 kvm_msr_entry_add(cpu
, MSR_CSTAR
, 0);
2076 kvm_msr_entry_add(cpu
, MSR_KERNELGSBASE
, 0);
2077 kvm_msr_entry_add(cpu
, MSR_FMASK
, 0);
2078 kvm_msr_entry_add(cpu
, MSR_LSTAR
, 0);
2081 kvm_msr_entry_add(cpu
, MSR_KVM_SYSTEM_TIME
, 0);
2082 kvm_msr_entry_add(cpu
, MSR_KVM_WALL_CLOCK
, 0);
2083 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_ASYNC_PF
)) {
2084 kvm_msr_entry_add(cpu
, MSR_KVM_ASYNC_PF_EN
, 0);
2086 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_PV_EOI
)) {
2087 kvm_msr_entry_add(cpu
, MSR_KVM_PV_EOI_EN
, 0);
2089 if (env
->features
[FEAT_KVM
] & (1 << KVM_FEATURE_STEAL_TIME
)) {
2090 kvm_msr_entry_add(cpu
, MSR_KVM_STEAL_TIME
, 0);
2092 if (has_msr_architectural_pmu
) {
2093 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR_CTRL
, 0);
2094 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_CTRL
, 0);
2095 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_STATUS
, 0);
2096 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_GLOBAL_OVF_CTRL
, 0);
2097 for (i
= 0; i
< MAX_FIXED_COUNTERS
; i
++) {
2098 kvm_msr_entry_add(cpu
, MSR_CORE_PERF_FIXED_CTR0
+ i
, 0);
2100 for (i
= 0; i
< num_architectural_pmu_counters
; i
++) {
2101 kvm_msr_entry_add(cpu
, MSR_P6_PERFCTR0
+ i
, 0);
2102 kvm_msr_entry_add(cpu
, MSR_P6_EVNTSEL0
+ i
, 0);
2107 kvm_msr_entry_add(cpu
, MSR_MCG_STATUS
, 0);
2108 kvm_msr_entry_add(cpu
, MSR_MCG_CTL
, 0);
2109 if (has_msr_mcg_ext_ctl
) {
2110 kvm_msr_entry_add(cpu
, MSR_MCG_EXT_CTL
, 0);
2112 for (i
= 0; i
< (env
->mcg_cap
& 0xff) * 4; i
++) {
2113 kvm_msr_entry_add(cpu
, MSR_MC0_CTL
+ i
, 0);
2117 if (has_msr_hv_hypercall
) {
2118 kvm_msr_entry_add(cpu
, HV_X64_MSR_HYPERCALL
, 0);
2119 kvm_msr_entry_add(cpu
, HV_X64_MSR_GUEST_OS_ID
, 0);
2121 if (cpu
->hyperv_vapic
) {
2122 kvm_msr_entry_add(cpu
, HV_X64_MSR_APIC_ASSIST_PAGE
, 0);
2124 if (cpu
->hyperv_time
) {
2125 kvm_msr_entry_add(cpu
, HV_X64_MSR_REFERENCE_TSC
, 0);
2127 if (has_msr_hv_crash
) {
2130 for (j
= 0; j
< HV_X64_MSR_CRASH_PARAMS
; j
++) {
2131 kvm_msr_entry_add(cpu
, HV_X64_MSR_CRASH_P0
+ j
, 0);
2134 if (has_msr_hv_runtime
) {
2135 kvm_msr_entry_add(cpu
, HV_X64_MSR_VP_RUNTIME
, 0);
2137 if (cpu
->hyperv_synic
) {
2140 kvm_msr_entry_add(cpu
, HV_X64_MSR_SCONTROL
, 0);
2141 kvm_msr_entry_add(cpu
, HV_X64_MSR_SVERSION
, 0);
2142 kvm_msr_entry_add(cpu
, HV_X64_MSR_SIEFP
, 0);
2143 kvm_msr_entry_add(cpu
, HV_X64_MSR_SIMP
, 0);
2144 for (msr
= HV_X64_MSR_SINT0
; msr
<= HV_X64_MSR_SINT15
; msr
++) {
2145 kvm_msr_entry_add(cpu
, msr
, 0);
2148 if (has_msr_hv_stimer
) {
2151 for (msr
= HV_X64_MSR_STIMER0_CONFIG
; msr
<= HV_X64_MSR_STIMER3_COUNT
;
2153 kvm_msr_entry_add(cpu
, msr
, 0);
2156 if (env
->features
[FEAT_1_EDX
] & CPUID_MTRR
) {
2157 kvm_msr_entry_add(cpu
, MSR_MTRRdefType
, 0);
2158 kvm_msr_entry_add(cpu
, MSR_MTRRfix64K_00000
, 0);
2159 kvm_msr_entry_add(cpu
, MSR_MTRRfix16K_80000
, 0);
2160 kvm_msr_entry_add(cpu
, MSR_MTRRfix16K_A0000
, 0);
2161 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_C0000
, 0);
2162 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_C8000
, 0);
2163 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_D0000
, 0);
2164 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_D8000
, 0);
2165 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_E0000
, 0);
2166 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_E8000
, 0);
2167 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_F0000
, 0);
2168 kvm_msr_entry_add(cpu
, MSR_MTRRfix4K_F8000
, 0);
2169 for (i
= 0; i
< MSR_MTRRcap_VCNT
; i
++) {
2170 kvm_msr_entry_add(cpu
, MSR_MTRRphysBase(i
), 0);
2171 kvm_msr_entry_add(cpu
, MSR_MTRRphysMask(i
), 0);
2175 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_MSRS
, cpu
->kvm_msr_buf
);
2180 assert(ret
== cpu
->kvm_msr_buf
->nmsrs
);
2182 * MTRR masks: Each mask consists of 5 parts
2183 * a 10..0: must be zero
2185 * c n-1.12: actual mask bits
2186 * d 51..n: reserved must be zero
2187 * e 63.52: reserved must be zero
2189 * 'n' is the number of physical bits supported by the CPU and is
2190 * apparently always <= 52. We know our 'n' but don't know what
2191 * the destinations 'n' is; it might be smaller, in which case
2192 * it masks (c) on loading. It might be larger, in which case
2193 * we fill 'd' so that d..c is consistent irrespetive of the 'n'
2194 * we're migrating to.
2197 if (cpu
->fill_mtrr_mask
) {
2198 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS
> 52);
2199 assert(cpu
->phys_bits
<= TARGET_PHYS_ADDR_SPACE_BITS
);
2200 mtrr_top_bits
= MAKE_64BIT_MASK(cpu
->phys_bits
, 52 - cpu
->phys_bits
);
2205 for (i
= 0; i
< ret
; i
++) {
2206 uint32_t index
= msrs
[i
].index
;
2208 case MSR_IA32_SYSENTER_CS
:
2209 env
->sysenter_cs
= msrs
[i
].data
;
2211 case MSR_IA32_SYSENTER_ESP
:
2212 env
->sysenter_esp
= msrs
[i
].data
;
2214 case MSR_IA32_SYSENTER_EIP
:
2215 env
->sysenter_eip
= msrs
[i
].data
;
2218 env
->pat
= msrs
[i
].data
;
2221 env
->star
= msrs
[i
].data
;
2223 #ifdef TARGET_X86_64
2225 env
->cstar
= msrs
[i
].data
;
2227 case MSR_KERNELGSBASE
:
2228 env
->kernelgsbase
= msrs
[i
].data
;
2231 env
->fmask
= msrs
[i
].data
;
2234 env
->lstar
= msrs
[i
].data
;
2238 env
->tsc
= msrs
[i
].data
;
2241 env
->tsc_aux
= msrs
[i
].data
;
2243 case MSR_TSC_ADJUST
:
2244 env
->tsc_adjust
= msrs
[i
].data
;
2246 case MSR_IA32_TSCDEADLINE
:
2247 env
->tsc_deadline
= msrs
[i
].data
;
2249 case MSR_VM_HSAVE_PA
:
2250 env
->vm_hsave
= msrs
[i
].data
;
2252 case MSR_KVM_SYSTEM_TIME
:
2253 env
->system_time_msr
= msrs
[i
].data
;
2255 case MSR_KVM_WALL_CLOCK
:
2256 env
->wall_clock_msr
= msrs
[i
].data
;
2258 case MSR_MCG_STATUS
:
2259 env
->mcg_status
= msrs
[i
].data
;
2262 env
->mcg_ctl
= msrs
[i
].data
;
2264 case MSR_MCG_EXT_CTL
:
2265 env
->mcg_ext_ctl
= msrs
[i
].data
;
2267 case MSR_IA32_MISC_ENABLE
:
2268 env
->msr_ia32_misc_enable
= msrs
[i
].data
;
2270 case MSR_IA32_SMBASE
:
2271 env
->smbase
= msrs
[i
].data
;
2273 case MSR_IA32_FEATURE_CONTROL
:
2274 env
->msr_ia32_feature_control
= msrs
[i
].data
;
2276 case MSR_IA32_BNDCFGS
:
2277 env
->msr_bndcfgs
= msrs
[i
].data
;
2280 env
->xss
= msrs
[i
].data
;
2283 if (msrs
[i
].index
>= MSR_MC0_CTL
&&
2284 msrs
[i
].index
< MSR_MC0_CTL
+ (env
->mcg_cap
& 0xff) * 4) {
2285 env
->mce_banks
[msrs
[i
].index
- MSR_MC0_CTL
] = msrs
[i
].data
;
2288 case MSR_KVM_ASYNC_PF_EN
:
2289 env
->async_pf_en_msr
= msrs
[i
].data
;
2291 case MSR_KVM_PV_EOI_EN
:
2292 env
->pv_eoi_en_msr
= msrs
[i
].data
;
2294 case MSR_KVM_STEAL_TIME
:
2295 env
->steal_time_msr
= msrs
[i
].data
;
2297 case MSR_CORE_PERF_FIXED_CTR_CTRL
:
2298 env
->msr_fixed_ctr_ctrl
= msrs
[i
].data
;
2300 case MSR_CORE_PERF_GLOBAL_CTRL
:
2301 env
->msr_global_ctrl
= msrs
[i
].data
;
2303 case MSR_CORE_PERF_GLOBAL_STATUS
:
2304 env
->msr_global_status
= msrs
[i
].data
;
2306 case MSR_CORE_PERF_GLOBAL_OVF_CTRL
:
2307 env
->msr_global_ovf_ctrl
= msrs
[i
].data
;
2309 case MSR_CORE_PERF_FIXED_CTR0
... MSR_CORE_PERF_FIXED_CTR0
+ MAX_FIXED_COUNTERS
- 1:
2310 env
->msr_fixed_counters
[index
- MSR_CORE_PERF_FIXED_CTR0
] = msrs
[i
].data
;
2312 case MSR_P6_PERFCTR0
... MSR_P6_PERFCTR0
+ MAX_GP_COUNTERS
- 1:
2313 env
->msr_gp_counters
[index
- MSR_P6_PERFCTR0
] = msrs
[i
].data
;
2315 case MSR_P6_EVNTSEL0
... MSR_P6_EVNTSEL0
+ MAX_GP_COUNTERS
- 1:
2316 env
->msr_gp_evtsel
[index
- MSR_P6_EVNTSEL0
] = msrs
[i
].data
;
2318 case HV_X64_MSR_HYPERCALL
:
2319 env
->msr_hv_hypercall
= msrs
[i
].data
;
2321 case HV_X64_MSR_GUEST_OS_ID
:
2322 env
->msr_hv_guest_os_id
= msrs
[i
].data
;
2324 case HV_X64_MSR_APIC_ASSIST_PAGE
:
2325 env
->msr_hv_vapic
= msrs
[i
].data
;
2327 case HV_X64_MSR_REFERENCE_TSC
:
2328 env
->msr_hv_tsc
= msrs
[i
].data
;
2330 case HV_X64_MSR_CRASH_P0
... HV_X64_MSR_CRASH_P4
:
2331 env
->msr_hv_crash_params
[index
- HV_X64_MSR_CRASH_P0
] = msrs
[i
].data
;
2333 case HV_X64_MSR_VP_RUNTIME
:
2334 env
->msr_hv_runtime
= msrs
[i
].data
;
2336 case HV_X64_MSR_SCONTROL
:
2337 env
->msr_hv_synic_control
= msrs
[i
].data
;
2339 case HV_X64_MSR_SVERSION
:
2340 env
->msr_hv_synic_version
= msrs
[i
].data
;
2342 case HV_X64_MSR_SIEFP
:
2343 env
->msr_hv_synic_evt_page
= msrs
[i
].data
;
2345 case HV_X64_MSR_SIMP
:
2346 env
->msr_hv_synic_msg_page
= msrs
[i
].data
;
2348 case HV_X64_MSR_SINT0
... HV_X64_MSR_SINT15
:
2349 env
->msr_hv_synic_sint
[index
- HV_X64_MSR_SINT0
] = msrs
[i
].data
;
2351 case HV_X64_MSR_STIMER0_CONFIG
:
2352 case HV_X64_MSR_STIMER1_CONFIG
:
2353 case HV_X64_MSR_STIMER2_CONFIG
:
2354 case HV_X64_MSR_STIMER3_CONFIG
:
2355 env
->msr_hv_stimer_config
[(index
- HV_X64_MSR_STIMER0_CONFIG
)/2] =
2358 case HV_X64_MSR_STIMER0_COUNT
:
2359 case HV_X64_MSR_STIMER1_COUNT
:
2360 case HV_X64_MSR_STIMER2_COUNT
:
2361 case HV_X64_MSR_STIMER3_COUNT
:
2362 env
->msr_hv_stimer_count
[(index
- HV_X64_MSR_STIMER0_COUNT
)/2] =
2365 case MSR_MTRRdefType
:
2366 env
->mtrr_deftype
= msrs
[i
].data
;
2368 case MSR_MTRRfix64K_00000
:
2369 env
->mtrr_fixed
[0] = msrs
[i
].data
;
2371 case MSR_MTRRfix16K_80000
:
2372 env
->mtrr_fixed
[1] = msrs
[i
].data
;
2374 case MSR_MTRRfix16K_A0000
:
2375 env
->mtrr_fixed
[2] = msrs
[i
].data
;
2377 case MSR_MTRRfix4K_C0000
:
2378 env
->mtrr_fixed
[3] = msrs
[i
].data
;
2380 case MSR_MTRRfix4K_C8000
:
2381 env
->mtrr_fixed
[4] = msrs
[i
].data
;
2383 case MSR_MTRRfix4K_D0000
:
2384 env
->mtrr_fixed
[5] = msrs
[i
].data
;
2386 case MSR_MTRRfix4K_D8000
:
2387 env
->mtrr_fixed
[6] = msrs
[i
].data
;
2389 case MSR_MTRRfix4K_E0000
:
2390 env
->mtrr_fixed
[7] = msrs
[i
].data
;
2392 case MSR_MTRRfix4K_E8000
:
2393 env
->mtrr_fixed
[8] = msrs
[i
].data
;
2395 case MSR_MTRRfix4K_F0000
:
2396 env
->mtrr_fixed
[9] = msrs
[i
].data
;
2398 case MSR_MTRRfix4K_F8000
:
2399 env
->mtrr_fixed
[10] = msrs
[i
].data
;
2401 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT
- 1):
2403 env
->mtrr_var
[MSR_MTRRphysIndex(index
)].mask
= msrs
[i
].data
|
2406 env
->mtrr_var
[MSR_MTRRphysIndex(index
)].base
= msrs
[i
].data
;
2415 static int kvm_put_mp_state(X86CPU
*cpu
)
2417 struct kvm_mp_state mp_state
= { .mp_state
= cpu
->env
.mp_state
};
2419 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MP_STATE
, &mp_state
);
2422 static int kvm_get_mp_state(X86CPU
*cpu
)
2424 CPUState
*cs
= CPU(cpu
);
2425 CPUX86State
*env
= &cpu
->env
;
2426 struct kvm_mp_state mp_state
;
2429 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_MP_STATE
, &mp_state
);
2433 env
->mp_state
= mp_state
.mp_state
;
2434 if (kvm_irqchip_in_kernel()) {
2435 cs
->halted
= (mp_state
.mp_state
== KVM_MP_STATE_HALTED
);
2440 static int kvm_get_apic(X86CPU
*cpu
)
2442 DeviceState
*apic
= cpu
->apic_state
;
2443 struct kvm_lapic_state kapic
;
2446 if (apic
&& kvm_irqchip_in_kernel()) {
2447 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_LAPIC
, &kapic
);
2452 kvm_get_apic_state(apic
, &kapic
);
2457 static int kvm_put_vcpu_events(X86CPU
*cpu
, int level
)
2459 CPUState
*cs
= CPU(cpu
);
2460 CPUX86State
*env
= &cpu
->env
;
2461 struct kvm_vcpu_events events
= {};
2463 if (!kvm_has_vcpu_events()) {
2467 events
.exception
.injected
= (env
->exception_injected
>= 0);
2468 events
.exception
.nr
= env
->exception_injected
;
2469 events
.exception
.has_error_code
= env
->has_error_code
;
2470 events
.exception
.error_code
= env
->error_code
;
2471 events
.exception
.pad
= 0;
2473 events
.interrupt
.injected
= (env
->interrupt_injected
>= 0);
2474 events
.interrupt
.nr
= env
->interrupt_injected
;
2475 events
.interrupt
.soft
= env
->soft_interrupt
;
2477 events
.nmi
.injected
= env
->nmi_injected
;
2478 events
.nmi
.pending
= env
->nmi_pending
;
2479 events
.nmi
.masked
= !!(env
->hflags2
& HF2_NMI_MASK
);
2482 events
.sipi_vector
= env
->sipi_vector
;
2485 if (has_msr_smbase
) {
2486 events
.smi
.smm
= !!(env
->hflags
& HF_SMM_MASK
);
2487 events
.smi
.smm_inside_nmi
= !!(env
->hflags2
& HF2_SMM_INSIDE_NMI_MASK
);
2488 if (kvm_irqchip_in_kernel()) {
2489 /* As soon as these are moved to the kernel, remove them
2490 * from cs->interrupt_request.
2492 events
.smi
.pending
= cs
->interrupt_request
& CPU_INTERRUPT_SMI
;
2493 events
.smi
.latched_init
= cs
->interrupt_request
& CPU_INTERRUPT_INIT
;
2494 cs
->interrupt_request
&= ~(CPU_INTERRUPT_INIT
| CPU_INTERRUPT_SMI
);
2496 /* Keep these in cs->interrupt_request. */
2497 events
.smi
.pending
= 0;
2498 events
.smi
.latched_init
= 0;
2500 events
.flags
|= KVM_VCPUEVENT_VALID_SMM
;
2503 if (level
>= KVM_PUT_RESET_STATE
) {
2505 KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SIPI_VECTOR
;
2508 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_VCPU_EVENTS
, &events
);
2511 static int kvm_get_vcpu_events(X86CPU
*cpu
)
2513 CPUX86State
*env
= &cpu
->env
;
2514 struct kvm_vcpu_events events
;
2517 if (!kvm_has_vcpu_events()) {
2521 memset(&events
, 0, sizeof(events
));
2522 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_VCPU_EVENTS
, &events
);
2526 env
->exception_injected
=
2527 events
.exception
.injected
? events
.exception
.nr
: -1;
2528 env
->has_error_code
= events
.exception
.has_error_code
;
2529 env
->error_code
= events
.exception
.error_code
;
2531 env
->interrupt_injected
=
2532 events
.interrupt
.injected
? events
.interrupt
.nr
: -1;
2533 env
->soft_interrupt
= events
.interrupt
.soft
;
2535 env
->nmi_injected
= events
.nmi
.injected
;
2536 env
->nmi_pending
= events
.nmi
.pending
;
2537 if (events
.nmi
.masked
) {
2538 env
->hflags2
|= HF2_NMI_MASK
;
2540 env
->hflags2
&= ~HF2_NMI_MASK
;
2543 if (events
.flags
& KVM_VCPUEVENT_VALID_SMM
) {
2544 if (events
.smi
.smm
) {
2545 env
->hflags
|= HF_SMM_MASK
;
2547 env
->hflags
&= ~HF_SMM_MASK
;
2549 if (events
.smi
.pending
) {
2550 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_SMI
);
2552 cpu_reset_interrupt(CPU(cpu
), CPU_INTERRUPT_SMI
);
2554 if (events
.smi
.smm_inside_nmi
) {
2555 env
->hflags2
|= HF2_SMM_INSIDE_NMI_MASK
;
2557 env
->hflags2
&= ~HF2_SMM_INSIDE_NMI_MASK
;
2559 if (events
.smi
.latched_init
) {
2560 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_INIT
);
2562 cpu_reset_interrupt(CPU(cpu
), CPU_INTERRUPT_INIT
);
2566 env
->sipi_vector
= events
.sipi_vector
;
2571 static int kvm_guest_debug_workarounds(X86CPU
*cpu
)
2573 CPUState
*cs
= CPU(cpu
);
2574 CPUX86State
*env
= &cpu
->env
;
2576 unsigned long reinject_trap
= 0;
2578 if (!kvm_has_vcpu_events()) {
2579 if (env
->exception_injected
== 1) {
2580 reinject_trap
= KVM_GUESTDBG_INJECT_DB
;
2581 } else if (env
->exception_injected
== 3) {
2582 reinject_trap
= KVM_GUESTDBG_INJECT_BP
;
2584 env
->exception_injected
= -1;
2588 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
2589 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
2590 * by updating the debug state once again if single-stepping is on.
2591 * Another reason to call kvm_update_guest_debug here is a pending debug
2592 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
2593 * reinject them via SET_GUEST_DEBUG.
2595 if (reinject_trap
||
2596 (!kvm_has_robust_singlestep() && cs
->singlestep_enabled
)) {
2597 ret
= kvm_update_guest_debug(cs
, reinject_trap
);
2602 static int kvm_put_debugregs(X86CPU
*cpu
)
2604 CPUX86State
*env
= &cpu
->env
;
2605 struct kvm_debugregs dbgregs
;
2608 if (!kvm_has_debugregs()) {
2612 for (i
= 0; i
< 4; i
++) {
2613 dbgregs
.db
[i
] = env
->dr
[i
];
2615 dbgregs
.dr6
= env
->dr
[6];
2616 dbgregs
.dr7
= env
->dr
[7];
2619 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_DEBUGREGS
, &dbgregs
);
2622 static int kvm_get_debugregs(X86CPU
*cpu
)
2624 CPUX86State
*env
= &cpu
->env
;
2625 struct kvm_debugregs dbgregs
;
2628 if (!kvm_has_debugregs()) {
2632 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_DEBUGREGS
, &dbgregs
);
2636 for (i
= 0; i
< 4; i
++) {
2637 env
->dr
[i
] = dbgregs
.db
[i
];
2639 env
->dr
[4] = env
->dr
[6] = dbgregs
.dr6
;
2640 env
->dr
[5] = env
->dr
[7] = dbgregs
.dr7
;
2645 int kvm_arch_put_registers(CPUState
*cpu
, int level
)
2647 X86CPU
*x86_cpu
= X86_CPU(cpu
);
2650 assert(cpu_is_stopped(cpu
) || qemu_cpu_is_self(cpu
));
2652 if (level
>= KVM_PUT_RESET_STATE
) {
2653 ret
= kvm_put_msr_feature_control(x86_cpu
);
2659 if (level
== KVM_PUT_FULL_STATE
) {
2660 /* We don't check for kvm_arch_set_tsc_khz() errors here,
2661 * because TSC frequency mismatch shouldn't abort migration,
2662 * unless the user explicitly asked for a more strict TSC
2663 * setting (e.g. using an explicit "tsc-freq" option).
2665 kvm_arch_set_tsc_khz(cpu
);
2668 ret
= kvm_getput_regs(x86_cpu
, 1);
2672 ret
= kvm_put_xsave(x86_cpu
);
2676 ret
= kvm_put_xcrs(x86_cpu
);
2680 ret
= kvm_put_sregs(x86_cpu
);
2684 /* must be before kvm_put_msrs */
2685 ret
= kvm_inject_mce_oldstyle(x86_cpu
);
2689 ret
= kvm_put_msrs(x86_cpu
, level
);
2693 if (level
>= KVM_PUT_RESET_STATE
) {
2694 ret
= kvm_put_mp_state(x86_cpu
);
2700 ret
= kvm_put_tscdeadline_msr(x86_cpu
);
2705 ret
= kvm_put_vcpu_events(x86_cpu
, level
);
2709 ret
= kvm_put_debugregs(x86_cpu
);
2714 ret
= kvm_guest_debug_workarounds(x86_cpu
);
2721 int kvm_arch_get_registers(CPUState
*cs
)
2723 X86CPU
*cpu
= X86_CPU(cs
);
2726 assert(cpu_is_stopped(cs
) || qemu_cpu_is_self(cs
));
2728 ret
= kvm_getput_regs(cpu
, 0);
2732 ret
= kvm_get_xsave(cpu
);
2736 ret
= kvm_get_xcrs(cpu
);
2740 ret
= kvm_get_sregs(cpu
);
2744 ret
= kvm_get_msrs(cpu
);
2748 ret
= kvm_get_mp_state(cpu
);
2752 ret
= kvm_get_apic(cpu
);
2756 ret
= kvm_get_vcpu_events(cpu
);
2760 ret
= kvm_get_debugregs(cpu
);
2766 cpu_sync_bndcs_hflags(&cpu
->env
);
2770 void kvm_arch_pre_run(CPUState
*cpu
, struct kvm_run
*run
)
2772 X86CPU
*x86_cpu
= X86_CPU(cpu
);
2773 CPUX86State
*env
= &x86_cpu
->env
;
2777 if (cpu
->interrupt_request
& (CPU_INTERRUPT_NMI
| CPU_INTERRUPT_SMI
)) {
2778 if (cpu
->interrupt_request
& CPU_INTERRUPT_NMI
) {
2779 qemu_mutex_lock_iothread();
2780 cpu
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
2781 qemu_mutex_unlock_iothread();
2782 DPRINTF("injected NMI\n");
2783 ret
= kvm_vcpu_ioctl(cpu
, KVM_NMI
);
2785 fprintf(stderr
, "KVM: injection failed, NMI lost (%s)\n",
2789 if (cpu
->interrupt_request
& CPU_INTERRUPT_SMI
) {
2790 qemu_mutex_lock_iothread();
2791 cpu
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
2792 qemu_mutex_unlock_iothread();
2793 DPRINTF("injected SMI\n");
2794 ret
= kvm_vcpu_ioctl(cpu
, KVM_SMI
);
2796 fprintf(stderr
, "KVM: injection failed, SMI lost (%s)\n",
2802 if (!kvm_pic_in_kernel()) {
2803 qemu_mutex_lock_iothread();
2806 /* Force the VCPU out of its inner loop to process any INIT requests
2807 * or (for userspace APIC, but it is cheap to combine the checks here)
2808 * pending TPR access reports.
2810 if (cpu
->interrupt_request
& (CPU_INTERRUPT_INIT
| CPU_INTERRUPT_TPR
)) {
2811 if ((cpu
->interrupt_request
& CPU_INTERRUPT_INIT
) &&
2812 !(env
->hflags
& HF_SMM_MASK
)) {
2813 cpu
->exit_request
= 1;
2815 if (cpu
->interrupt_request
& CPU_INTERRUPT_TPR
) {
2816 cpu
->exit_request
= 1;
2820 if (!kvm_pic_in_kernel()) {
2821 /* Try to inject an interrupt if the guest can accept it */
2822 if (run
->ready_for_interrupt_injection
&&
2823 (cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
2824 (env
->eflags
& IF_MASK
)) {
2827 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
2828 irq
= cpu_get_pic_interrupt(env
);
2830 struct kvm_interrupt intr
;
2833 DPRINTF("injected interrupt %d\n", irq
);
2834 ret
= kvm_vcpu_ioctl(cpu
, KVM_INTERRUPT
, &intr
);
2837 "KVM: injection failed, interrupt lost (%s)\n",
2843 /* If we have an interrupt but the guest is not ready to receive an
2844 * interrupt, request an interrupt window exit. This will
2845 * cause a return to userspace as soon as the guest is ready to
2846 * receive interrupts. */
2847 if ((cpu
->interrupt_request
& CPU_INTERRUPT_HARD
)) {
2848 run
->request_interrupt_window
= 1;
2850 run
->request_interrupt_window
= 0;
2853 DPRINTF("setting tpr\n");
2854 run
->cr8
= cpu_get_apic_tpr(x86_cpu
->apic_state
);
2856 qemu_mutex_unlock_iothread();
2860 MemTxAttrs
kvm_arch_post_run(CPUState
*cpu
, struct kvm_run
*run
)
2862 X86CPU
*x86_cpu
= X86_CPU(cpu
);
2863 CPUX86State
*env
= &x86_cpu
->env
;
2865 if (run
->flags
& KVM_RUN_X86_SMM
) {
2866 env
->hflags
|= HF_SMM_MASK
;
2868 env
->hflags
&= ~HF_SMM_MASK
;
2871 env
->eflags
|= IF_MASK
;
2873 env
->eflags
&= ~IF_MASK
;
2876 /* We need to protect the apic state against concurrent accesses from
2877 * different threads in case the userspace irqchip is used. */
2878 if (!kvm_irqchip_in_kernel()) {
2879 qemu_mutex_lock_iothread();
2881 cpu_set_apic_tpr(x86_cpu
->apic_state
, run
->cr8
);
2882 cpu_set_apic_base(x86_cpu
->apic_state
, run
->apic_base
);
2883 if (!kvm_irqchip_in_kernel()) {
2884 qemu_mutex_unlock_iothread();
2886 return cpu_get_mem_attrs(env
);
2889 int kvm_arch_process_async_events(CPUState
*cs
)
2891 X86CPU
*cpu
= X86_CPU(cs
);
2892 CPUX86State
*env
= &cpu
->env
;
2894 if (cs
->interrupt_request
& CPU_INTERRUPT_MCE
) {
2895 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
2896 assert(env
->mcg_cap
);
2898 cs
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
2900 kvm_cpu_synchronize_state(cs
);
2902 if (env
->exception_injected
== EXCP08_DBLE
) {
2903 /* this means triple fault */
2904 qemu_system_reset_request();
2905 cs
->exit_request
= 1;
2908 env
->exception_injected
= EXCP12_MCHK
;
2909 env
->has_error_code
= 0;
2912 if (kvm_irqchip_in_kernel() && env
->mp_state
== KVM_MP_STATE_HALTED
) {
2913 env
->mp_state
= KVM_MP_STATE_RUNNABLE
;
2917 if ((cs
->interrupt_request
& CPU_INTERRUPT_INIT
) &&
2918 !(env
->hflags
& HF_SMM_MASK
)) {
2919 kvm_cpu_synchronize_state(cs
);
2923 if (kvm_irqchip_in_kernel()) {
2927 if (cs
->interrupt_request
& CPU_INTERRUPT_POLL
) {
2928 cs
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
2929 apic_poll_irq(cpu
->apic_state
);
2931 if (((cs
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
2932 (env
->eflags
& IF_MASK
)) ||
2933 (cs
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
2936 if (cs
->interrupt_request
& CPU_INTERRUPT_SIPI
) {
2937 kvm_cpu_synchronize_state(cs
);
2940 if (cs
->interrupt_request
& CPU_INTERRUPT_TPR
) {
2941 cs
->interrupt_request
&= ~CPU_INTERRUPT_TPR
;
2942 kvm_cpu_synchronize_state(cs
);
2943 apic_handle_tpr_access_report(cpu
->apic_state
, env
->eip
,
2944 env
->tpr_access_type
);
2950 static int kvm_handle_halt(X86CPU
*cpu
)
2952 CPUState
*cs
= CPU(cpu
);
2953 CPUX86State
*env
= &cpu
->env
;
2955 if (!((cs
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
2956 (env
->eflags
& IF_MASK
)) &&
2957 !(cs
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
2965 static int kvm_handle_tpr_access(X86CPU
*cpu
)
2967 CPUState
*cs
= CPU(cpu
);
2968 struct kvm_run
*run
= cs
->kvm_run
;
2970 apic_handle_tpr_access_report(cpu
->apic_state
, run
->tpr_access
.rip
,
2971 run
->tpr_access
.is_write
? TPR_ACCESS_WRITE
2976 int kvm_arch_insert_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
2978 static const uint8_t int3
= 0xcc;
2980 if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 0) ||
2981 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&int3
, 1, 1)) {
2987 int kvm_arch_remove_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
2991 if (cpu_memory_rw_debug(cs
, bp
->pc
, &int3
, 1, 0) || int3
!= 0xcc ||
2992 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 1)) {
3004 static int nb_hw_breakpoint
;
3006 static int find_hw_breakpoint(target_ulong addr
, int len
, int type
)
3010 for (n
= 0; n
< nb_hw_breakpoint
; n
++) {
3011 if (hw_breakpoint
[n
].addr
== addr
&& hw_breakpoint
[n
].type
== type
&&
3012 (hw_breakpoint
[n
].len
== len
|| len
== -1)) {
3019 int kvm_arch_insert_hw_breakpoint(target_ulong addr
,
3020 target_ulong len
, int type
)
3023 case GDB_BREAKPOINT_HW
:
3026 case GDB_WATCHPOINT_WRITE
:
3027 case GDB_WATCHPOINT_ACCESS
:
3034 if (addr
& (len
- 1)) {
3046 if (nb_hw_breakpoint
== 4) {
3049 if (find_hw_breakpoint(addr
, len
, type
) >= 0) {
3052 hw_breakpoint
[nb_hw_breakpoint
].addr
= addr
;
3053 hw_breakpoint
[nb_hw_breakpoint
].len
= len
;
3054 hw_breakpoint
[nb_hw_breakpoint
].type
= type
;
3060 int kvm_arch_remove_hw_breakpoint(target_ulong addr
,
3061 target_ulong len
, int type
)
3065 n
= find_hw_breakpoint(addr
, (type
== GDB_BREAKPOINT_HW
) ? 1 : len
, type
);
3070 hw_breakpoint
[n
] = hw_breakpoint
[nb_hw_breakpoint
];
3075 void kvm_arch_remove_all_hw_breakpoints(void)
3077 nb_hw_breakpoint
= 0;
3080 static CPUWatchpoint hw_watchpoint
;
3082 static int kvm_handle_debug(X86CPU
*cpu
,
3083 struct kvm_debug_exit_arch
*arch_info
)
3085 CPUState
*cs
= CPU(cpu
);
3086 CPUX86State
*env
= &cpu
->env
;
3090 if (arch_info
->exception
== 1) {
3091 if (arch_info
->dr6
& (1 << 14)) {
3092 if (cs
->singlestep_enabled
) {
3096 for (n
= 0; n
< 4; n
++) {
3097 if (arch_info
->dr6
& (1 << n
)) {
3098 switch ((arch_info
->dr7
>> (16 + n
*4)) & 0x3) {
3104 cs
->watchpoint_hit
= &hw_watchpoint
;
3105 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
3106 hw_watchpoint
.flags
= BP_MEM_WRITE
;
3110 cs
->watchpoint_hit
= &hw_watchpoint
;
3111 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
3112 hw_watchpoint
.flags
= BP_MEM_ACCESS
;
3118 } else if (kvm_find_sw_breakpoint(cs
, arch_info
->pc
)) {
3122 cpu_synchronize_state(cs
);
3123 assert(env
->exception_injected
== -1);
3126 env
->exception_injected
= arch_info
->exception
;
3127 env
->has_error_code
= 0;
3133 void kvm_arch_update_guest_debug(CPUState
*cpu
, struct kvm_guest_debug
*dbg
)
3135 const uint8_t type_code
[] = {
3136 [GDB_BREAKPOINT_HW
] = 0x0,
3137 [GDB_WATCHPOINT_WRITE
] = 0x1,
3138 [GDB_WATCHPOINT_ACCESS
] = 0x3
3140 const uint8_t len_code
[] = {
3141 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
3145 if (kvm_sw_breakpoints_active(cpu
)) {
3146 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_SW_BP
;
3148 if (nb_hw_breakpoint
> 0) {
3149 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_HW_BP
;
3150 dbg
->arch
.debugreg
[7] = 0x0600;
3151 for (n
= 0; n
< nb_hw_breakpoint
; n
++) {
3152 dbg
->arch
.debugreg
[n
] = hw_breakpoint
[n
].addr
;
3153 dbg
->arch
.debugreg
[7] |= (2 << (n
* 2)) |
3154 (type_code
[hw_breakpoint
[n
].type
] << (16 + n
*4)) |
3155 ((uint32_t)len_code
[hw_breakpoint
[n
].len
] << (18 + n
*4));
3160 static bool host_supports_vmx(void)
3162 uint32_t ecx
, unused
;
3164 host_cpuid(1, 0, &unused
, &unused
, &ecx
, &unused
);
3165 return ecx
& CPUID_EXT_VMX
;
3168 #define VMX_INVALID_GUEST_STATE 0x80000021
3170 int kvm_arch_handle_exit(CPUState
*cs
, struct kvm_run
*run
)
3172 X86CPU
*cpu
= X86_CPU(cs
);
3176 switch (run
->exit_reason
) {
3178 DPRINTF("handle_hlt\n");
3179 qemu_mutex_lock_iothread();
3180 ret
= kvm_handle_halt(cpu
);
3181 qemu_mutex_unlock_iothread();
3183 case KVM_EXIT_SET_TPR
:
3186 case KVM_EXIT_TPR_ACCESS
:
3187 qemu_mutex_lock_iothread();
3188 ret
= kvm_handle_tpr_access(cpu
);
3189 qemu_mutex_unlock_iothread();
3191 case KVM_EXIT_FAIL_ENTRY
:
3192 code
= run
->fail_entry
.hardware_entry_failure_reason
;
3193 fprintf(stderr
, "KVM: entry failed, hardware error 0x%" PRIx64
"\n",
3195 if (host_supports_vmx() && code
== VMX_INVALID_GUEST_STATE
) {
3197 "\nIf you're running a guest on an Intel machine without "
3198 "unrestricted mode\n"
3199 "support, the failure can be most likely due to the guest "
3200 "entering an invalid\n"
3201 "state for Intel VT. For example, the guest maybe running "
3202 "in big real mode\n"
3203 "which is not supported on less recent Intel processors."
3208 case KVM_EXIT_EXCEPTION
:
3209 fprintf(stderr
, "KVM: exception %d exit (error code 0x%x)\n",
3210 run
->ex
.exception
, run
->ex
.error_code
);
3213 case KVM_EXIT_DEBUG
:
3214 DPRINTF("kvm_exit_debug\n");
3215 qemu_mutex_lock_iothread();
3216 ret
= kvm_handle_debug(cpu
, &run
->debug
.arch
);
3217 qemu_mutex_unlock_iothread();
3219 case KVM_EXIT_HYPERV
:
3220 ret
= kvm_hv_handle_exit(cpu
, &run
->hyperv
);
3222 case KVM_EXIT_IOAPIC_EOI
:
3223 ioapic_eoi_broadcast(run
->eoi
.vector
);
3227 fprintf(stderr
, "KVM: unknown exit reason %d\n", run
->exit_reason
);
3235 bool kvm_arch_stop_on_emulation_error(CPUState
*cs
)
3237 X86CPU
*cpu
= X86_CPU(cs
);
3238 CPUX86State
*env
= &cpu
->env
;
3240 kvm_cpu_synchronize_state(cs
);
3241 return !(env
->cr
[0] & CR0_PE_MASK
) ||
3242 ((env
->segs
[R_CS
].selector
& 3) != 3);
3245 void kvm_arch_init_irq_routing(KVMState
*s
)
3247 if (!kvm_check_extension(s
, KVM_CAP_IRQ_ROUTING
)) {
3248 /* If kernel can't do irq routing, interrupt source
3249 * override 0->2 cannot be set up as required by HPET.
3250 * So we have to disable it.
3254 /* We know at this point that we're using the in-kernel
3255 * irqchip, so we can use irqfds, and on x86 we know
3256 * we can use msi via irqfd and GSI routing.
3258 kvm_msi_via_irqfd_allowed
= true;
3259 kvm_gsi_routing_allowed
= true;
3261 if (kvm_irqchip_is_split()) {
3264 /* If the ioapic is in QEMU and the lapics are in KVM, reserve
3265 MSI routes for signaling interrupts to the local apics. */
3266 for (i
= 0; i
< IOAPIC_NUM_PINS
; i
++) {
3267 if (kvm_irqchip_add_msi_route(s
, 0, NULL
) < 0) {
3268 error_report("Could not enable split IRQ mode.");
3275 int kvm_arch_irqchip_create(MachineState
*ms
, KVMState
*s
)
3278 if (machine_kernel_irqchip_split(ms
)) {
3279 ret
= kvm_vm_enable_cap(s
, KVM_CAP_SPLIT_IRQCHIP
, 0, 24);
3281 error_report("Could not enable split irqchip mode: %s",
3285 DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
3286 kvm_split_irqchip
= true;
3294 /* Classic KVM device assignment interface. Will remain x86 only. */
3295 int kvm_device_pci_assign(KVMState
*s
, PCIHostDeviceAddress
*dev_addr
,
3296 uint32_t flags
, uint32_t *dev_id
)
3298 struct kvm_assigned_pci_dev dev_data
= {
3299 .segnr
= dev_addr
->domain
,
3300 .busnr
= dev_addr
->bus
,
3301 .devfn
= PCI_DEVFN(dev_addr
->slot
, dev_addr
->function
),
3306 dev_data
.assigned_dev_id
=
3307 (dev_addr
->domain
<< 16) | (dev_addr
->bus
<< 8) | dev_data
.devfn
;
3309 ret
= kvm_vm_ioctl(s
, KVM_ASSIGN_PCI_DEVICE
, &dev_data
);
3314 *dev_id
= dev_data
.assigned_dev_id
;
3319 int kvm_device_pci_deassign(KVMState
*s
, uint32_t dev_id
)
3321 struct kvm_assigned_pci_dev dev_data
= {
3322 .assigned_dev_id
= dev_id
,
3325 return kvm_vm_ioctl(s
, KVM_DEASSIGN_PCI_DEVICE
, &dev_data
);
3328 static int kvm_assign_irq_internal(KVMState
*s
, uint32_t dev_id
,
3329 uint32_t irq_type
, uint32_t guest_irq
)
3331 struct kvm_assigned_irq assigned_irq
= {
3332 .assigned_dev_id
= dev_id
,
3333 .guest_irq
= guest_irq
,
3337 if (kvm_check_extension(s
, KVM_CAP_ASSIGN_DEV_IRQ
)) {
3338 return kvm_vm_ioctl(s
, KVM_ASSIGN_DEV_IRQ
, &assigned_irq
);
3340 return kvm_vm_ioctl(s
, KVM_ASSIGN_IRQ
, &assigned_irq
);
3344 int kvm_device_intx_assign(KVMState
*s
, uint32_t dev_id
, bool use_host_msi
,
3347 uint32_t irq_type
= KVM_DEV_IRQ_GUEST_INTX
|
3348 (use_host_msi
? KVM_DEV_IRQ_HOST_MSI
: KVM_DEV_IRQ_HOST_INTX
);
3350 return kvm_assign_irq_internal(s
, dev_id
, irq_type
, guest_irq
);
3353 int kvm_device_intx_set_mask(KVMState
*s
, uint32_t dev_id
, bool masked
)
3355 struct kvm_assigned_pci_dev dev_data
= {
3356 .assigned_dev_id
= dev_id
,
3357 .flags
= masked
? KVM_DEV_ASSIGN_MASK_INTX
: 0,
3360 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_INTX_MASK
, &dev_data
);
3363 static int kvm_deassign_irq_internal(KVMState
*s
, uint32_t dev_id
,
3366 struct kvm_assigned_irq assigned_irq
= {
3367 .assigned_dev_id
= dev_id
,
3371 return kvm_vm_ioctl(s
, KVM_DEASSIGN_DEV_IRQ
, &assigned_irq
);
3374 int kvm_device_intx_deassign(KVMState
*s
, uint32_t dev_id
, bool use_host_msi
)
3376 return kvm_deassign_irq_internal(s
, dev_id
, KVM_DEV_IRQ_GUEST_INTX
|
3377 (use_host_msi
? KVM_DEV_IRQ_HOST_MSI
: KVM_DEV_IRQ_HOST_INTX
));
3380 int kvm_device_msi_assign(KVMState
*s
, uint32_t dev_id
, int virq
)
3382 return kvm_assign_irq_internal(s
, dev_id
, KVM_DEV_IRQ_HOST_MSI
|
3383 KVM_DEV_IRQ_GUEST_MSI
, virq
);
3386 int kvm_device_msi_deassign(KVMState
*s
, uint32_t dev_id
)
3388 return kvm_deassign_irq_internal(s
, dev_id
, KVM_DEV_IRQ_GUEST_MSI
|
3389 KVM_DEV_IRQ_HOST_MSI
);
3392 bool kvm_device_msix_supported(KVMState
*s
)
3394 /* The kernel lacks a corresponding KVM_CAP, so we probe by calling
3395 * KVM_ASSIGN_SET_MSIX_NR with an invalid parameter. */
3396 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_MSIX_NR
, NULL
) == -EFAULT
;
3399 int kvm_device_msix_init_vectors(KVMState
*s
, uint32_t dev_id
,
3400 uint32_t nr_vectors
)
3402 struct kvm_assigned_msix_nr msix_nr
= {
3403 .assigned_dev_id
= dev_id
,
3404 .entry_nr
= nr_vectors
,
3407 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_MSIX_NR
, &msix_nr
);
3410 int kvm_device_msix_set_vector(KVMState
*s
, uint32_t dev_id
, uint32_t vector
,
3413 struct kvm_assigned_msix_entry msix_entry
= {
3414 .assigned_dev_id
= dev_id
,
3419 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_MSIX_ENTRY
, &msix_entry
);
3422 int kvm_device_msix_assign(KVMState
*s
, uint32_t dev_id
)
3424 return kvm_assign_irq_internal(s
, dev_id
, KVM_DEV_IRQ_HOST_MSIX
|
3425 KVM_DEV_IRQ_GUEST_MSIX
, 0);
3428 int kvm_device_msix_deassign(KVMState
*s
, uint32_t dev_id
)
3430 return kvm_deassign_irq_internal(s
, dev_id
, KVM_DEV_IRQ_GUEST_MSIX
|
3431 KVM_DEV_IRQ_HOST_MSIX
);
3434 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry
*route
,
3435 uint64_t address
, uint32_t data
, PCIDevice
*dev
)
3437 X86IOMMUState
*iommu
= x86_iommu_get_default();
3441 MSIMessage src
, dst
;
3442 X86IOMMUClass
*class = X86_IOMMU_GET_CLASS(iommu
);
3444 src
.address
= route
->u
.msi
.address_hi
;
3445 src
.address
<<= VTD_MSI_ADDR_HI_SHIFT
;
3446 src
.address
|= route
->u
.msi
.address_lo
;
3447 src
.data
= route
->u
.msi
.data
;
3449 ret
= class->int_remap(iommu
, &src
, &dst
, dev
? \
3450 pci_requester_id(dev
) : \
3451 X86_IOMMU_SID_INVALID
);
3453 trace_kvm_x86_fixup_msi_error(route
->gsi
);
3457 route
->u
.msi
.address_hi
= dst
.address
>> VTD_MSI_ADDR_HI_SHIFT
;
3458 route
->u
.msi
.address_lo
= dst
.address
& VTD_MSI_ADDR_LO_MASK
;
3459 route
->u
.msi
.data
= dst
.data
;
3465 typedef struct MSIRouteEntry MSIRouteEntry
;
3467 struct MSIRouteEntry
{
3468 PCIDevice
*dev
; /* Device pointer */
3469 int vector
; /* MSI/MSIX vector index */
3470 int virq
; /* Virtual IRQ index */
3471 QLIST_ENTRY(MSIRouteEntry
) list
;
3474 /* List of used GSI routes */
3475 static QLIST_HEAD(, MSIRouteEntry
) msi_route_list
= \
3476 QLIST_HEAD_INITIALIZER(msi_route_list
);
3478 static void kvm_update_msi_routes_all(void *private, bool global
,
3479 uint32_t index
, uint32_t mask
)
3482 MSIRouteEntry
*entry
;
3484 /* TODO: explicit route update */
3485 QLIST_FOREACH(entry
, &msi_route_list
, list
) {
3487 msg
= pci_get_msi_message(entry
->dev
, entry
->vector
);
3488 kvm_irqchip_update_msi_route(kvm_state
, entry
->virq
,
3491 kvm_irqchip_commit_routes(kvm_state
);
3492 trace_kvm_x86_update_msi_routes(cnt
);
3495 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry
*route
,
3496 int vector
, PCIDevice
*dev
)
3498 static bool notify_list_inited
= false;
3499 MSIRouteEntry
*entry
;
3502 /* These are (possibly) IOAPIC routes only used for split
3503 * kernel irqchip mode, while what we are housekeeping are
3504 * PCI devices only. */
3508 entry
= g_new0(MSIRouteEntry
, 1);
3510 entry
->vector
= vector
;
3511 entry
->virq
= route
->gsi
;
3512 QLIST_INSERT_HEAD(&msi_route_list
, entry
, list
);
3514 trace_kvm_x86_add_msi_route(route
->gsi
);
3516 if (!notify_list_inited
) {
3517 /* For the first time we do add route, add ourselves into
3518 * IOMMU's IEC notify list if needed. */
3519 X86IOMMUState
*iommu
= x86_iommu_get_default();
3521 x86_iommu_iec_register_notifier(iommu
,
3522 kvm_update_msi_routes_all
,
3525 notify_list_inited
= true;
3530 int kvm_arch_release_virq_post(int virq
)
3532 MSIRouteEntry
*entry
, *next
;
3533 QLIST_FOREACH_SAFE(entry
, &msi_route_list
, list
, next
) {
3534 if (entry
->virq
== virq
) {
3535 trace_kvm_x86_remove_msi_route(virq
);
3536 QLIST_REMOVE(entry
, list
);
3543 int kvm_arch_msi_data_to_gsi(uint32_t data
)