4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
15 #include <sys/types.h>
16 #include <sys/ioctl.h>
18 #include <sys/utsname.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_para.h>
23 #include "qemu-common.h"
24 #include "sysemu/sysemu.h"
25 #include "sysemu/kvm_int.h"
28 #include "exec/gdbstub.h"
29 #include "qemu/host-utils.h"
30 #include "qemu/config-file.h"
31 #include "hw/i386/pc.h"
32 #include "hw/i386/apic.h"
33 #include "hw/i386/apic_internal.h"
34 #include "hw/i386/apic-msidef.h"
35 #include "exec/ioport.h"
36 #include "standard-headers/asm-x86/hyperv.h"
37 #include "hw/pci/pci.h"
38 #include "migration/migration.h"
39 #include "exec/memattrs.h"
44 #define DPRINTF(fmt, ...) \
45 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
47 #define DPRINTF(fmt, ...) \
51 #define MSR_KVM_WALL_CLOCK 0x11
52 #define MSR_KVM_SYSTEM_TIME 0x12
55 #define BUS_MCEERR_AR 4
58 #define BUS_MCEERR_AO 5
61 const KVMCapabilityInfo kvm_arch_required_capabilities
[] = {
62 KVM_CAP_INFO(SET_TSS_ADDR
),
63 KVM_CAP_INFO(EXT_CPUID
),
64 KVM_CAP_INFO(MP_STATE
),
68 static bool has_msr_star
;
69 static bool has_msr_hsave_pa
;
70 static bool has_msr_tsc_aux
;
71 static bool has_msr_tsc_adjust
;
72 static bool has_msr_tsc_deadline
;
73 static bool has_msr_feature_control
;
74 static bool has_msr_async_pf_en
;
75 static bool has_msr_pv_eoi_en
;
76 static bool has_msr_misc_enable
;
77 static bool has_msr_smbase
;
78 static bool has_msr_bndcfgs
;
79 static bool has_msr_kvm_steal_time
;
80 static int lm_capable_kernel
;
81 static bool has_msr_hv_hypercall
;
82 static bool has_msr_hv_vapic
;
83 static bool has_msr_hv_tsc
;
84 static bool has_msr_hv_crash
;
85 static bool has_msr_hv_reset
;
86 static bool has_msr_hv_vpindex
;
87 static bool has_msr_hv_runtime
;
88 static bool has_msr_mtrr
;
89 static bool has_msr_xss
;
91 static bool has_msr_architectural_pmu
;
92 static uint32_t num_architectural_pmu_counters
;
96 static int has_pit_state2
;
98 int kvm_has_pit_state2(void)
100 return has_pit_state2
;
103 bool kvm_has_smm(void)
105 return kvm_check_extension(kvm_state
, KVM_CAP_X86_SMM
);
108 bool kvm_allows_irq0_override(void)
110 return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
113 static struct kvm_cpuid2
*try_get_cpuid(KVMState
*s
, int max
)
115 struct kvm_cpuid2
*cpuid
;
118 size
= sizeof(*cpuid
) + max
* sizeof(*cpuid
->entries
);
119 cpuid
= g_malloc0(size
);
121 r
= kvm_ioctl(s
, KVM_GET_SUPPORTED_CPUID
, cpuid
);
122 if (r
== 0 && cpuid
->nent
>= max
) {
130 fprintf(stderr
, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
138 /* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
141 static struct kvm_cpuid2
*get_supported_cpuid(KVMState
*s
)
143 struct kvm_cpuid2
*cpuid
;
145 while ((cpuid
= try_get_cpuid(s
, max
)) == NULL
) {
151 static const struct kvm_para_features
{
154 } para_features
[] = {
155 { KVM_CAP_CLOCKSOURCE
, KVM_FEATURE_CLOCKSOURCE
},
156 { KVM_CAP_NOP_IO_DELAY
, KVM_FEATURE_NOP_IO_DELAY
},
157 { KVM_CAP_PV_MMU
, KVM_FEATURE_MMU_OP
},
158 { KVM_CAP_ASYNC_PF
, KVM_FEATURE_ASYNC_PF
},
161 static int get_para_features(KVMState
*s
)
165 for (i
= 0; i
< ARRAY_SIZE(para_features
); i
++) {
166 if (kvm_check_extension(s
, para_features
[i
].cap
)) {
167 features
|= (1 << para_features
[i
].feature
);
175 /* Returns the value for a specific register on the cpuid entry
177 static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2
*entry
, int reg
)
197 /* Find matching entry for function/index on kvm_cpuid2 struct
199 static struct kvm_cpuid_entry2
*cpuid_find_entry(struct kvm_cpuid2
*cpuid
,
204 for (i
= 0; i
< cpuid
->nent
; ++i
) {
205 if (cpuid
->entries
[i
].function
== function
&&
206 cpuid
->entries
[i
].index
== index
) {
207 return &cpuid
->entries
[i
];
214 uint32_t kvm_arch_get_supported_cpuid(KVMState
*s
, uint32_t function
,
215 uint32_t index
, int reg
)
217 struct kvm_cpuid2
*cpuid
;
219 uint32_t cpuid_1_edx
;
222 cpuid
= get_supported_cpuid(s
);
224 struct kvm_cpuid_entry2
*entry
= cpuid_find_entry(cpuid
, function
, index
);
227 ret
= cpuid_entry_get_reg(entry
, reg
);
230 /* Fixups for the data returned by KVM, below */
232 if (function
== 1 && reg
== R_EDX
) {
233 /* KVM before 2.6.30 misreports the following features */
234 ret
|= CPUID_MTRR
| CPUID_PAT
| CPUID_MCE
| CPUID_MCA
;
235 } else if (function
== 1 && reg
== R_ECX
) {
236 /* We can set the hypervisor flag, even if KVM does not return it on
237 * GET_SUPPORTED_CPUID
239 ret
|= CPUID_EXT_HYPERVISOR
;
240 /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
241 * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
242 * and the irqchip is in the kernel.
244 if (kvm_irqchip_in_kernel() &&
245 kvm_check_extension(s
, KVM_CAP_TSC_DEADLINE_TIMER
)) {
246 ret
|= CPUID_EXT_TSC_DEADLINE_TIMER
;
249 /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
250 * without the in-kernel irqchip
252 if (!kvm_irqchip_in_kernel()) {
253 ret
&= ~CPUID_EXT_X2APIC
;
255 } else if (function
== 6 && reg
== R_EAX
) {
256 ret
|= CPUID_6_EAX_ARAT
; /* safe to allow because of emulated APIC */
257 } else if (function
== 0x80000001 && reg
== R_EDX
) {
258 /* On Intel, kvm returns cpuid according to the Intel spec,
259 * so add missing bits according to the AMD spec:
261 cpuid_1_edx
= kvm_arch_get_supported_cpuid(s
, 1, 0, R_EDX
);
262 ret
|= cpuid_1_edx
& CPUID_EXT2_AMD_ALIASES
;
267 /* fallback for older kernels */
268 if ((function
== KVM_CPUID_FEATURES
) && !found
) {
269 ret
= get_para_features(s
);
275 typedef struct HWPoisonPage
{
277 QLIST_ENTRY(HWPoisonPage
) list
;
280 static QLIST_HEAD(, HWPoisonPage
) hwpoison_page_list
=
281 QLIST_HEAD_INITIALIZER(hwpoison_page_list
);
283 static void kvm_unpoison_all(void *param
)
285 HWPoisonPage
*page
, *next_page
;
287 QLIST_FOREACH_SAFE(page
, &hwpoison_page_list
, list
, next_page
) {
288 QLIST_REMOVE(page
, list
);
289 qemu_ram_remap(page
->ram_addr
, TARGET_PAGE_SIZE
);
294 static void kvm_hwpoison_page_add(ram_addr_t ram_addr
)
298 QLIST_FOREACH(page
, &hwpoison_page_list
, list
) {
299 if (page
->ram_addr
== ram_addr
) {
303 page
= g_new(HWPoisonPage
, 1);
304 page
->ram_addr
= ram_addr
;
305 QLIST_INSERT_HEAD(&hwpoison_page_list
, page
, list
);
308 static int kvm_get_mce_cap_supported(KVMState
*s
, uint64_t *mce_cap
,
313 r
= kvm_check_extension(s
, KVM_CAP_MCE
);
316 return kvm_ioctl(s
, KVM_X86_GET_MCE_CAP_SUPPORTED
, mce_cap
);
321 static void kvm_mce_inject(X86CPU
*cpu
, hwaddr paddr
, int code
)
323 CPUX86State
*env
= &cpu
->env
;
324 uint64_t status
= MCI_STATUS_VAL
| MCI_STATUS_UC
| MCI_STATUS_EN
|
325 MCI_STATUS_MISCV
| MCI_STATUS_ADDRV
| MCI_STATUS_S
;
326 uint64_t mcg_status
= MCG_STATUS_MCIP
;
328 if (code
== BUS_MCEERR_AR
) {
329 status
|= MCI_STATUS_AR
| 0x134;
330 mcg_status
|= MCG_STATUS_EIPV
;
333 mcg_status
|= MCG_STATUS_RIPV
;
335 cpu_x86_inject_mce(NULL
, cpu
, 9, status
, mcg_status
, paddr
,
336 (MCM_ADDR_PHYS
<< 6) | 0xc,
337 cpu_x86_support_mca_broadcast(env
) ?
338 MCE_INJECT_BROADCAST
: 0);
341 static void hardware_memory_error(void)
343 fprintf(stderr
, "Hardware memory error!\n");
347 int kvm_arch_on_sigbus_vcpu(CPUState
*c
, int code
, void *addr
)
349 X86CPU
*cpu
= X86_CPU(c
);
350 CPUX86State
*env
= &cpu
->env
;
354 if ((env
->mcg_cap
& MCG_SER_P
) && addr
355 && (code
== BUS_MCEERR_AR
|| code
== BUS_MCEERR_AO
)) {
356 if (qemu_ram_addr_from_host(addr
, &ram_addr
) == NULL
||
357 !kvm_physical_memory_addr_from_host(c
->kvm_state
, addr
, &paddr
)) {
358 fprintf(stderr
, "Hardware memory error for memory used by "
359 "QEMU itself instead of guest system!\n");
360 /* Hope we are lucky for AO MCE */
361 if (code
== BUS_MCEERR_AO
) {
364 hardware_memory_error();
367 kvm_hwpoison_page_add(ram_addr
);
368 kvm_mce_inject(cpu
, paddr
, code
);
370 if (code
== BUS_MCEERR_AO
) {
372 } else if (code
== BUS_MCEERR_AR
) {
373 hardware_memory_error();
381 int kvm_arch_on_sigbus(int code
, void *addr
)
383 X86CPU
*cpu
= X86_CPU(first_cpu
);
385 if ((cpu
->env
.mcg_cap
& MCG_SER_P
) && addr
&& code
== BUS_MCEERR_AO
) {
389 /* Hope we are lucky for AO MCE */
390 if (qemu_ram_addr_from_host(addr
, &ram_addr
) == NULL
||
391 !kvm_physical_memory_addr_from_host(first_cpu
->kvm_state
,
393 fprintf(stderr
, "Hardware memory error for memory used by "
394 "QEMU itself instead of guest system!: %p\n", addr
);
397 kvm_hwpoison_page_add(ram_addr
);
398 kvm_mce_inject(X86_CPU(first_cpu
), paddr
, code
);
400 if (code
== BUS_MCEERR_AO
) {
402 } else if (code
== BUS_MCEERR_AR
) {
403 hardware_memory_error();
411 static int kvm_inject_mce_oldstyle(X86CPU
*cpu
)
413 CPUX86State
*env
= &cpu
->env
;
415 if (!kvm_has_vcpu_events() && env
->exception_injected
== EXCP12_MCHK
) {
416 unsigned int bank
, bank_num
= env
->mcg_cap
& 0xff;
417 struct kvm_x86_mce mce
;
419 env
->exception_injected
= -1;
422 * There must be at least one bank in use if an MCE is pending.
423 * Find it and use its values for the event injection.
425 for (bank
= 0; bank
< bank_num
; bank
++) {
426 if (env
->mce_banks
[bank
* 4 + 1] & MCI_STATUS_VAL
) {
430 assert(bank
< bank_num
);
433 mce
.status
= env
->mce_banks
[bank
* 4 + 1];
434 mce
.mcg_status
= env
->mcg_status
;
435 mce
.addr
= env
->mce_banks
[bank
* 4 + 2];
436 mce
.misc
= env
->mce_banks
[bank
* 4 + 3];
438 return kvm_vcpu_ioctl(CPU(cpu
), KVM_X86_SET_MCE
, &mce
);
443 static void cpu_update_state(void *opaque
, int running
, RunState state
)
445 CPUX86State
*env
= opaque
;
448 env
->tsc_valid
= false;
452 unsigned long kvm_arch_vcpu_id(CPUState
*cs
)
454 X86CPU
*cpu
= X86_CPU(cs
);
458 #ifndef KVM_CPUID_SIGNATURE_NEXT
459 #define KVM_CPUID_SIGNATURE_NEXT 0x40000100
462 static bool hyperv_hypercall_available(X86CPU
*cpu
)
464 return cpu
->hyperv_vapic
||
465 (cpu
->hyperv_spinlock_attempts
!= HYPERV_SPINLOCK_NEVER_RETRY
);
468 static bool hyperv_enabled(X86CPU
*cpu
)
470 CPUState
*cs
= CPU(cpu
);
471 return kvm_check_extension(cs
->kvm_state
, KVM_CAP_HYPERV
) > 0 &&
472 (hyperv_hypercall_available(cpu
) ||
474 cpu
->hyperv_relaxed_timing
||
477 cpu
->hyperv_vpindex
||
478 cpu
->hyperv_runtime
);
481 static Error
*invtsc_mig_blocker
;
483 #define KVM_MAX_CPUID_ENTRIES 100
485 int kvm_arch_init_vcpu(CPUState
*cs
)
488 struct kvm_cpuid2 cpuid
;
489 struct kvm_cpuid_entry2 entries
[KVM_MAX_CPUID_ENTRIES
];
490 } QEMU_PACKED cpuid_data
;
491 X86CPU
*cpu
= X86_CPU(cs
);
492 CPUX86State
*env
= &cpu
->env
;
493 uint32_t limit
, i
, j
, cpuid_i
;
495 struct kvm_cpuid_entry2
*c
;
496 uint32_t signature
[3];
497 int kvm_base
= KVM_CPUID_SIGNATURE
;
500 memset(&cpuid_data
, 0, sizeof(cpuid_data
));
504 /* Paravirtualization CPUIDs */
505 if (hyperv_enabled(cpu
)) {
506 c
= &cpuid_data
.entries
[cpuid_i
++];
507 c
->function
= HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS
;
508 memcpy(signature
, "Microsoft Hv", 12);
509 c
->eax
= HYPERV_CPUID_MIN
;
510 c
->ebx
= signature
[0];
511 c
->ecx
= signature
[1];
512 c
->edx
= signature
[2];
514 c
= &cpuid_data
.entries
[cpuid_i
++];
515 c
->function
= HYPERV_CPUID_INTERFACE
;
516 memcpy(signature
, "Hv#1\0\0\0\0\0\0\0\0", 12);
517 c
->eax
= signature
[0];
522 c
= &cpuid_data
.entries
[cpuid_i
++];
523 c
->function
= HYPERV_CPUID_VERSION
;
527 c
= &cpuid_data
.entries
[cpuid_i
++];
528 c
->function
= HYPERV_CPUID_FEATURES
;
529 if (cpu
->hyperv_relaxed_timing
) {
530 c
->eax
|= HV_X64_MSR_HYPERCALL_AVAILABLE
;
532 if (cpu
->hyperv_vapic
) {
533 c
->eax
|= HV_X64_MSR_HYPERCALL_AVAILABLE
;
534 c
->eax
|= HV_X64_MSR_APIC_ACCESS_AVAILABLE
;
535 has_msr_hv_vapic
= true;
537 if (cpu
->hyperv_time
&&
538 kvm_check_extension(cs
->kvm_state
, KVM_CAP_HYPERV_TIME
) > 0) {
539 c
->eax
|= HV_X64_MSR_HYPERCALL_AVAILABLE
;
540 c
->eax
|= HV_X64_MSR_TIME_REF_COUNT_AVAILABLE
;
542 has_msr_hv_tsc
= true;
544 if (cpu
->hyperv_crash
&& has_msr_hv_crash
) {
545 c
->edx
|= HV_X64_GUEST_CRASH_MSR_AVAILABLE
;
547 if (cpu
->hyperv_reset
&& has_msr_hv_reset
) {
548 c
->eax
|= HV_X64_MSR_RESET_AVAILABLE
;
550 if (cpu
->hyperv_vpindex
&& has_msr_hv_vpindex
) {
551 c
->eax
|= HV_X64_MSR_VP_INDEX_AVAILABLE
;
553 if (cpu
->hyperv_runtime
&& has_msr_hv_runtime
) {
554 c
->eax
|= HV_X64_MSR_VP_RUNTIME_AVAILABLE
;
556 c
= &cpuid_data
.entries
[cpuid_i
++];
557 c
->function
= HYPERV_CPUID_ENLIGHTMENT_INFO
;
558 if (cpu
->hyperv_relaxed_timing
) {
559 c
->eax
|= HV_X64_RELAXED_TIMING_RECOMMENDED
;
561 if (has_msr_hv_vapic
) {
562 c
->eax
|= HV_X64_APIC_ACCESS_RECOMMENDED
;
564 c
->ebx
= cpu
->hyperv_spinlock_attempts
;
566 c
= &cpuid_data
.entries
[cpuid_i
++];
567 c
->function
= HYPERV_CPUID_IMPLEMENT_LIMITS
;
571 kvm_base
= KVM_CPUID_SIGNATURE_NEXT
;
572 has_msr_hv_hypercall
= true;
575 if (cpu
->expose_kvm
) {
576 memcpy(signature
, "KVMKVMKVM\0\0\0", 12);
577 c
= &cpuid_data
.entries
[cpuid_i
++];
578 c
->function
= KVM_CPUID_SIGNATURE
| kvm_base
;
579 c
->eax
= KVM_CPUID_FEATURES
| kvm_base
;
580 c
->ebx
= signature
[0];
581 c
->ecx
= signature
[1];
582 c
->edx
= signature
[2];
584 c
= &cpuid_data
.entries
[cpuid_i
++];
585 c
->function
= KVM_CPUID_FEATURES
| kvm_base
;
586 c
->eax
= env
->features
[FEAT_KVM
];
588 has_msr_async_pf_en
= c
->eax
& (1 << KVM_FEATURE_ASYNC_PF
);
590 has_msr_pv_eoi_en
= c
->eax
& (1 << KVM_FEATURE_PV_EOI
);
592 has_msr_kvm_steal_time
= c
->eax
& (1 << KVM_FEATURE_STEAL_TIME
);
595 cpu_x86_cpuid(env
, 0, 0, &limit
, &unused
, &unused
, &unused
);
597 for (i
= 0; i
<= limit
; i
++) {
598 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
599 fprintf(stderr
, "unsupported level value: 0x%x\n", limit
);
602 c
= &cpuid_data
.entries
[cpuid_i
++];
606 /* Keep reading function 2 till all the input is received */
610 c
->flags
= KVM_CPUID_FLAG_STATEFUL_FUNC
|
611 KVM_CPUID_FLAG_STATE_READ_NEXT
;
612 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
613 times
= c
->eax
& 0xff;
615 for (j
= 1; j
< times
; ++j
) {
616 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
617 fprintf(stderr
, "cpuid_data is full, no space for "
618 "cpuid(eax:2):eax & 0xf = 0x%x\n", times
);
621 c
= &cpuid_data
.entries
[cpuid_i
++];
623 c
->flags
= KVM_CPUID_FLAG_STATEFUL_FUNC
;
624 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
632 if (i
== 0xd && j
== 64) {
636 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
638 cpu_x86_cpuid(env
, i
, j
, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
640 if (i
== 4 && c
->eax
== 0) {
643 if (i
== 0xb && !(c
->ecx
& 0xff00)) {
646 if (i
== 0xd && c
->eax
== 0) {
649 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
650 fprintf(stderr
, "cpuid_data is full, no space for "
651 "cpuid(eax:0x%x,ecx:0x%x)\n", i
, j
);
654 c
= &cpuid_data
.entries
[cpuid_i
++];
660 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
668 cpu_x86_cpuid(env
, 0x0a, 0, &ver
, &unused
, &unused
, &unused
);
669 if ((ver
& 0xff) > 0) {
670 has_msr_architectural_pmu
= true;
671 num_architectural_pmu_counters
= (ver
& 0xff00) >> 8;
673 /* Shouldn't be more than 32, since that's the number of bits
674 * available in EBX to tell us _which_ counters are available.
677 if (num_architectural_pmu_counters
> MAX_GP_COUNTERS
) {
678 num_architectural_pmu_counters
= MAX_GP_COUNTERS
;
683 cpu_x86_cpuid(env
, 0x80000000, 0, &limit
, &unused
, &unused
, &unused
);
685 for (i
= 0x80000000; i
<= limit
; i
++) {
686 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
687 fprintf(stderr
, "unsupported xlevel value: 0x%x\n", limit
);
690 c
= &cpuid_data
.entries
[cpuid_i
++];
694 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
697 /* Call Centaur's CPUID instructions they are supported. */
698 if (env
->cpuid_xlevel2
> 0) {
699 cpu_x86_cpuid(env
, 0xC0000000, 0, &limit
, &unused
, &unused
, &unused
);
701 for (i
= 0xC0000000; i
<= limit
; i
++) {
702 if (cpuid_i
== KVM_MAX_CPUID_ENTRIES
) {
703 fprintf(stderr
, "unsupported xlevel2 value: 0x%x\n", limit
);
706 c
= &cpuid_data
.entries
[cpuid_i
++];
710 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
714 cpuid_data
.cpuid
.nent
= cpuid_i
;
716 if (((env
->cpuid_version
>> 8)&0xF) >= 6
717 && (env
->features
[FEAT_1_EDX
] & (CPUID_MCE
| CPUID_MCA
)) ==
718 (CPUID_MCE
| CPUID_MCA
)
719 && kvm_check_extension(cs
->kvm_state
, KVM_CAP_MCE
) > 0) {
724 ret
= kvm_get_mce_cap_supported(cs
->kvm_state
, &mcg_cap
, &banks
);
726 fprintf(stderr
, "kvm_get_mce_cap_supported: %s", strerror(-ret
));
730 if (banks
> MCE_BANKS_DEF
) {
731 banks
= MCE_BANKS_DEF
;
733 mcg_cap
&= MCE_CAP_DEF
;
735 ret
= kvm_vcpu_ioctl(cs
, KVM_X86_SETUP_MCE
, &mcg_cap
);
737 fprintf(stderr
, "KVM_X86_SETUP_MCE: %s", strerror(-ret
));
741 env
->mcg_cap
= mcg_cap
;
744 qemu_add_vm_change_state_handler(cpu_update_state
, env
);
746 c
= cpuid_find_entry(&cpuid_data
.cpuid
, 1, 0);
748 has_msr_feature_control
= !!(c
->ecx
& CPUID_EXT_VMX
) ||
749 !!(c
->ecx
& CPUID_EXT_SMX
);
752 c
= cpuid_find_entry(&cpuid_data
.cpuid
, 0x80000007, 0);
753 if (c
&& (c
->edx
& 1<<8) && invtsc_mig_blocker
== NULL
) {
755 error_setg(&invtsc_mig_blocker
,
756 "State blocked by non-migratable CPU device"
758 migrate_add_blocker(invtsc_mig_blocker
);
760 vmstate_x86_cpu
.unmigratable
= 1;
763 cpuid_data
.cpuid
.padding
= 0;
764 r
= kvm_vcpu_ioctl(cs
, KVM_SET_CPUID2
, &cpuid_data
);
769 r
= kvm_check_extension(cs
->kvm_state
, KVM_CAP_TSC_CONTROL
);
770 if (r
&& env
->tsc_khz
) {
771 r
= kvm_vcpu_ioctl(cs
, KVM_SET_TSC_KHZ
, env
->tsc_khz
);
773 fprintf(stderr
, "KVM_SET_TSC_KHZ failed\n");
779 env
->kvm_xsave_buf
= qemu_memalign(4096, sizeof(struct kvm_xsave
));
782 if (env
->features
[FEAT_1_EDX
] & CPUID_MTRR
) {
789 void kvm_arch_reset_vcpu(X86CPU
*cpu
)
791 CPUX86State
*env
= &cpu
->env
;
793 env
->exception_injected
= -1;
794 env
->interrupt_injected
= -1;
796 if (kvm_irqchip_in_kernel()) {
797 env
->mp_state
= cpu_is_bsp(cpu
) ? KVM_MP_STATE_RUNNABLE
:
798 KVM_MP_STATE_UNINITIALIZED
;
800 env
->mp_state
= KVM_MP_STATE_RUNNABLE
;
804 void kvm_arch_do_init_vcpu(X86CPU
*cpu
)
806 CPUX86State
*env
= &cpu
->env
;
808 /* APs get directly into wait-for-SIPI state. */
809 if (env
->mp_state
== KVM_MP_STATE_UNINITIALIZED
) {
810 env
->mp_state
= KVM_MP_STATE_INIT_RECEIVED
;
814 static int kvm_get_supported_msrs(KVMState
*s
)
816 static int kvm_supported_msrs
;
820 if (kvm_supported_msrs
== 0) {
821 struct kvm_msr_list msr_list
, *kvm_msr_list
;
823 kvm_supported_msrs
= -1;
825 /* Obtain MSR list from KVM. These are the MSRs that we must
828 ret
= kvm_ioctl(s
, KVM_GET_MSR_INDEX_LIST
, &msr_list
);
829 if (ret
< 0 && ret
!= -E2BIG
) {
832 /* Old kernel modules had a bug and could write beyond the provided
833 memory. Allocate at least a safe amount of 1K. */
834 kvm_msr_list
= g_malloc0(MAX(1024, sizeof(msr_list
) +
836 sizeof(msr_list
.indices
[0])));
838 kvm_msr_list
->nmsrs
= msr_list
.nmsrs
;
839 ret
= kvm_ioctl(s
, KVM_GET_MSR_INDEX_LIST
, kvm_msr_list
);
843 for (i
= 0; i
< kvm_msr_list
->nmsrs
; i
++) {
844 if (kvm_msr_list
->indices
[i
] == MSR_STAR
) {
848 if (kvm_msr_list
->indices
[i
] == MSR_VM_HSAVE_PA
) {
849 has_msr_hsave_pa
= true;
852 if (kvm_msr_list
->indices
[i
] == MSR_TSC_AUX
) {
853 has_msr_tsc_aux
= true;
856 if (kvm_msr_list
->indices
[i
] == MSR_TSC_ADJUST
) {
857 has_msr_tsc_adjust
= true;
860 if (kvm_msr_list
->indices
[i
] == MSR_IA32_TSCDEADLINE
) {
861 has_msr_tsc_deadline
= true;
864 if (kvm_msr_list
->indices
[i
] == MSR_IA32_SMBASE
) {
865 has_msr_smbase
= true;
868 if (kvm_msr_list
->indices
[i
] == MSR_IA32_MISC_ENABLE
) {
869 has_msr_misc_enable
= true;
872 if (kvm_msr_list
->indices
[i
] == MSR_IA32_BNDCFGS
) {
873 has_msr_bndcfgs
= true;
876 if (kvm_msr_list
->indices
[i
] == MSR_IA32_XSS
) {
880 if (kvm_msr_list
->indices
[i
] == HV_X64_MSR_CRASH_CTL
) {
881 has_msr_hv_crash
= true;
884 if (kvm_msr_list
->indices
[i
] == HV_X64_MSR_RESET
) {
885 has_msr_hv_reset
= true;
888 if (kvm_msr_list
->indices
[i
] == HV_X64_MSR_VP_INDEX
) {
889 has_msr_hv_vpindex
= true;
892 if (kvm_msr_list
->indices
[i
] == HV_X64_MSR_VP_RUNTIME
) {
893 has_msr_hv_runtime
= true;
899 g_free(kvm_msr_list
);
905 static Notifier smram_machine_done
;
906 static KVMMemoryListener smram_listener
;
907 static AddressSpace smram_address_space
;
908 static MemoryRegion smram_as_root
;
909 static MemoryRegion smram_as_mem
;
911 static void register_smram_listener(Notifier
*n
, void *unused
)
913 MemoryRegion
*smram
=
914 (MemoryRegion
*) object_resolve_path("/machine/smram", NULL
);
916 /* Outer container... */
917 memory_region_init(&smram_as_root
, OBJECT(kvm_state
), "mem-container-smram", ~0ull);
918 memory_region_set_enabled(&smram_as_root
, true);
920 /* ... with two regions inside: normal system memory with low
923 memory_region_init_alias(&smram_as_mem
, OBJECT(kvm_state
), "mem-smram",
924 get_system_memory(), 0, ~0ull);
925 memory_region_add_subregion_overlap(&smram_as_root
, 0, &smram_as_mem
, 0);
926 memory_region_set_enabled(&smram_as_mem
, true);
929 /* ... SMRAM with higher priority */
930 memory_region_add_subregion_overlap(&smram_as_root
, 0, smram
, 10);
931 memory_region_set_enabled(smram
, true);
934 address_space_init(&smram_address_space
, &smram_as_root
, "KVM-SMRAM");
935 kvm_memory_listener_register(kvm_state
, &smram_listener
,
936 &smram_address_space
, 1);
939 int kvm_arch_init(MachineState
*ms
, KVMState
*s
)
941 uint64_t identity_base
= 0xfffbc000;
944 struct utsname utsname
;
947 has_xsave
= kvm_check_extension(s
, KVM_CAP_XSAVE
);
951 has_xcrs
= kvm_check_extension(s
, KVM_CAP_XCRS
);
954 #ifdef KVM_CAP_PIT_STATE2
955 has_pit_state2
= kvm_check_extension(s
, KVM_CAP_PIT_STATE2
);
958 ret
= kvm_get_supported_msrs(s
);
964 lm_capable_kernel
= strcmp(utsname
.machine
, "x86_64") == 0;
967 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
968 * In order to use vm86 mode, an EPT identity map and a TSS are needed.
969 * Since these must be part of guest physical memory, we need to allocate
970 * them, both by setting their start addresses in the kernel and by
971 * creating a corresponding e820 entry. We need 4 pages before the BIOS.
973 * Older KVM versions may not support setting the identity map base. In
974 * that case we need to stick with the default, i.e. a 256K maximum BIOS
977 if (kvm_check_extension(s
, KVM_CAP_SET_IDENTITY_MAP_ADDR
)) {
978 /* Allows up to 16M BIOSes. */
979 identity_base
= 0xfeffc000;
981 ret
= kvm_vm_ioctl(s
, KVM_SET_IDENTITY_MAP_ADDR
, &identity_base
);
987 /* Set TSS base one page after EPT identity map. */
988 ret
= kvm_vm_ioctl(s
, KVM_SET_TSS_ADDR
, identity_base
+ 0x1000);
993 /* Tell fw_cfg to notify the BIOS to reserve the range. */
994 ret
= e820_add_entry(identity_base
, 0x4000, E820_RESERVED
);
996 fprintf(stderr
, "e820_add_entry() table is full\n");
999 qemu_register_reset(kvm_unpoison_all
, NULL
);
1001 shadow_mem
= machine_kvm_shadow_mem(ms
);
1002 if (shadow_mem
!= -1) {
1004 ret
= kvm_vm_ioctl(s
, KVM_SET_NR_MMU_PAGES
, shadow_mem
);
1010 if (kvm_check_extension(s
, KVM_CAP_X86_SMM
)) {
1011 smram_machine_done
.notify
= register_smram_listener
;
1012 qemu_add_machine_init_done_notifier(&smram_machine_done
);
1017 static void set_v8086_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
1019 lhs
->selector
= rhs
->selector
;
1020 lhs
->base
= rhs
->base
;
1021 lhs
->limit
= rhs
->limit
;
1033 static void set_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
1035 unsigned flags
= rhs
->flags
;
1036 lhs
->selector
= rhs
->selector
;
1037 lhs
->base
= rhs
->base
;
1038 lhs
->limit
= rhs
->limit
;
1039 lhs
->type
= (flags
>> DESC_TYPE_SHIFT
) & 15;
1040 lhs
->present
= (flags
& DESC_P_MASK
) != 0;
1041 lhs
->dpl
= (flags
>> DESC_DPL_SHIFT
) & 3;
1042 lhs
->db
= (flags
>> DESC_B_SHIFT
) & 1;
1043 lhs
->s
= (flags
& DESC_S_MASK
) != 0;
1044 lhs
->l
= (flags
>> DESC_L_SHIFT
) & 1;
1045 lhs
->g
= (flags
& DESC_G_MASK
) != 0;
1046 lhs
->avl
= (flags
& DESC_AVL_MASK
) != 0;
1051 static void get_seg(SegmentCache
*lhs
, const struct kvm_segment
*rhs
)
1053 lhs
->selector
= rhs
->selector
;
1054 lhs
->base
= rhs
->base
;
1055 lhs
->limit
= rhs
->limit
;
1056 lhs
->flags
= (rhs
->type
<< DESC_TYPE_SHIFT
) |
1057 (rhs
->present
* DESC_P_MASK
) |
1058 (rhs
->dpl
<< DESC_DPL_SHIFT
) |
1059 (rhs
->db
<< DESC_B_SHIFT
) |
1060 (rhs
->s
* DESC_S_MASK
) |
1061 (rhs
->l
<< DESC_L_SHIFT
) |
1062 (rhs
->g
* DESC_G_MASK
) |
1063 (rhs
->avl
* DESC_AVL_MASK
);
1066 static void kvm_getput_reg(__u64
*kvm_reg
, target_ulong
*qemu_reg
, int set
)
1069 *kvm_reg
= *qemu_reg
;
1071 *qemu_reg
= *kvm_reg
;
1075 static int kvm_getput_regs(X86CPU
*cpu
, int set
)
1077 CPUX86State
*env
= &cpu
->env
;
1078 struct kvm_regs regs
;
1082 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_REGS
, ®s
);
1088 kvm_getput_reg(®s
.rax
, &env
->regs
[R_EAX
], set
);
1089 kvm_getput_reg(®s
.rbx
, &env
->regs
[R_EBX
], set
);
1090 kvm_getput_reg(®s
.rcx
, &env
->regs
[R_ECX
], set
);
1091 kvm_getput_reg(®s
.rdx
, &env
->regs
[R_EDX
], set
);
1092 kvm_getput_reg(®s
.rsi
, &env
->regs
[R_ESI
], set
);
1093 kvm_getput_reg(®s
.rdi
, &env
->regs
[R_EDI
], set
);
1094 kvm_getput_reg(®s
.rsp
, &env
->regs
[R_ESP
], set
);
1095 kvm_getput_reg(®s
.rbp
, &env
->regs
[R_EBP
], set
);
1096 #ifdef TARGET_X86_64
1097 kvm_getput_reg(®s
.r8
, &env
->regs
[8], set
);
1098 kvm_getput_reg(®s
.r9
, &env
->regs
[9], set
);
1099 kvm_getput_reg(®s
.r10
, &env
->regs
[10], set
);
1100 kvm_getput_reg(®s
.r11
, &env
->regs
[11], set
);
1101 kvm_getput_reg(®s
.r12
, &env
->regs
[12], set
);
1102 kvm_getput_reg(®s
.r13
, &env
->regs
[13], set
);
1103 kvm_getput_reg(®s
.r14
, &env
->regs
[14], set
);
1104 kvm_getput_reg(®s
.r15
, &env
->regs
[15], set
);
1107 kvm_getput_reg(®s
.rflags
, &env
->eflags
, set
);
1108 kvm_getput_reg(®s
.rip
, &env
->eip
, set
);
1111 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_REGS
, ®s
);
1117 static int kvm_put_fpu(X86CPU
*cpu
)
1119 CPUX86State
*env
= &cpu
->env
;
1123 memset(&fpu
, 0, sizeof fpu
);
1124 fpu
.fsw
= env
->fpus
& ~(7 << 11);
1125 fpu
.fsw
|= (env
->fpstt
& 7) << 11;
1126 fpu
.fcw
= env
->fpuc
;
1127 fpu
.last_opcode
= env
->fpop
;
1128 fpu
.last_ip
= env
->fpip
;
1129 fpu
.last_dp
= env
->fpdp
;
1130 for (i
= 0; i
< 8; ++i
) {
1131 fpu
.ftwx
|= (!env
->fptags
[i
]) << i
;
1133 memcpy(fpu
.fpr
, env
->fpregs
, sizeof env
->fpregs
);
1134 for (i
= 0; i
< CPU_NB_REGS
; i
++) {
1135 stq_p(&fpu
.xmm
[i
][0], env
->xmm_regs
[i
].XMM_Q(0));
1136 stq_p(&fpu
.xmm
[i
][8], env
->xmm_regs
[i
].XMM_Q(1));
1138 fpu
.mxcsr
= env
->mxcsr
;
1140 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_FPU
, &fpu
);
1143 #define XSAVE_FCW_FSW 0
1144 #define XSAVE_FTW_FOP 1
1145 #define XSAVE_CWD_RIP 2
1146 #define XSAVE_CWD_RDP 4
1147 #define XSAVE_MXCSR 6
1148 #define XSAVE_ST_SPACE 8
1149 #define XSAVE_XMM_SPACE 40
1150 #define XSAVE_XSTATE_BV 128
1151 #define XSAVE_YMMH_SPACE 144
1152 #define XSAVE_BNDREGS 240
1153 #define XSAVE_BNDCSR 256
1154 #define XSAVE_OPMASK 272
1155 #define XSAVE_ZMM_Hi256 288
1156 #define XSAVE_Hi16_ZMM 416
1158 static int kvm_put_xsave(X86CPU
*cpu
)
1160 CPUX86State
*env
= &cpu
->env
;
1161 struct kvm_xsave
* xsave
= env
->kvm_xsave_buf
;
1162 uint16_t cwd
, swd
, twd
;
1163 uint8_t *xmm
, *ymmh
, *zmmh
;
1167 return kvm_put_fpu(cpu
);
1170 memset(xsave
, 0, sizeof(struct kvm_xsave
));
1172 swd
= env
->fpus
& ~(7 << 11);
1173 swd
|= (env
->fpstt
& 7) << 11;
1175 for (i
= 0; i
< 8; ++i
) {
1176 twd
|= (!env
->fptags
[i
]) << i
;
1178 xsave
->region
[XSAVE_FCW_FSW
] = (uint32_t)(swd
<< 16) + cwd
;
1179 xsave
->region
[XSAVE_FTW_FOP
] = (uint32_t)(env
->fpop
<< 16) + twd
;
1180 memcpy(&xsave
->region
[XSAVE_CWD_RIP
], &env
->fpip
, sizeof(env
->fpip
));
1181 memcpy(&xsave
->region
[XSAVE_CWD_RDP
], &env
->fpdp
, sizeof(env
->fpdp
));
1182 memcpy(&xsave
->region
[XSAVE_ST_SPACE
], env
->fpregs
,
1183 sizeof env
->fpregs
);
1184 xsave
->region
[XSAVE_MXCSR
] = env
->mxcsr
;
1185 *(uint64_t *)&xsave
->region
[XSAVE_XSTATE_BV
] = env
->xstate_bv
;
1186 memcpy(&xsave
->region
[XSAVE_BNDREGS
], env
->bnd_regs
,
1187 sizeof env
->bnd_regs
);
1188 memcpy(&xsave
->region
[XSAVE_BNDCSR
], &env
->bndcs_regs
,
1189 sizeof(env
->bndcs_regs
));
1190 memcpy(&xsave
->region
[XSAVE_OPMASK
], env
->opmask_regs
,
1191 sizeof env
->opmask_regs
);
1193 xmm
= (uint8_t *)&xsave
->region
[XSAVE_XMM_SPACE
];
1194 ymmh
= (uint8_t *)&xsave
->region
[XSAVE_YMMH_SPACE
];
1195 zmmh
= (uint8_t *)&xsave
->region
[XSAVE_ZMM_Hi256
];
1196 for (i
= 0; i
< CPU_NB_REGS
; i
++, xmm
+= 16, ymmh
+= 16, zmmh
+= 32) {
1197 stq_p(xmm
, env
->xmm_regs
[i
].XMM_Q(0));
1198 stq_p(xmm
+8, env
->xmm_regs
[i
].XMM_Q(1));
1199 stq_p(ymmh
, env
->xmm_regs
[i
].XMM_Q(2));
1200 stq_p(ymmh
+8, env
->xmm_regs
[i
].XMM_Q(3));
1201 stq_p(zmmh
, env
->xmm_regs
[i
].XMM_Q(4));
1202 stq_p(zmmh
+8, env
->xmm_regs
[i
].XMM_Q(5));
1203 stq_p(zmmh
+16, env
->xmm_regs
[i
].XMM_Q(6));
1204 stq_p(zmmh
+24, env
->xmm_regs
[i
].XMM_Q(7));
1207 #ifdef TARGET_X86_64
1208 memcpy(&xsave
->region
[XSAVE_Hi16_ZMM
], &env
->xmm_regs
[16],
1209 16 * sizeof env
->xmm_regs
[16]);
1211 r
= kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_XSAVE
, xsave
);
1215 static int kvm_put_xcrs(X86CPU
*cpu
)
1217 CPUX86State
*env
= &cpu
->env
;
1218 struct kvm_xcrs xcrs
= {};
1226 xcrs
.xcrs
[0].xcr
= 0;
1227 xcrs
.xcrs
[0].value
= env
->xcr0
;
1228 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_XCRS
, &xcrs
);
1231 static int kvm_put_sregs(X86CPU
*cpu
)
1233 CPUX86State
*env
= &cpu
->env
;
1234 struct kvm_sregs sregs
;
1236 memset(sregs
.interrupt_bitmap
, 0, sizeof(sregs
.interrupt_bitmap
));
1237 if (env
->interrupt_injected
>= 0) {
1238 sregs
.interrupt_bitmap
[env
->interrupt_injected
/ 64] |=
1239 (uint64_t)1 << (env
->interrupt_injected
% 64);
1242 if ((env
->eflags
& VM_MASK
)) {
1243 set_v8086_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
1244 set_v8086_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
1245 set_v8086_seg(&sregs
.es
, &env
->segs
[R_ES
]);
1246 set_v8086_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
1247 set_v8086_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
1248 set_v8086_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
1250 set_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
1251 set_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
1252 set_seg(&sregs
.es
, &env
->segs
[R_ES
]);
1253 set_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
1254 set_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
1255 set_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
1258 set_seg(&sregs
.tr
, &env
->tr
);
1259 set_seg(&sregs
.ldt
, &env
->ldt
);
1261 sregs
.idt
.limit
= env
->idt
.limit
;
1262 sregs
.idt
.base
= env
->idt
.base
;
1263 memset(sregs
.idt
.padding
, 0, sizeof sregs
.idt
.padding
);
1264 sregs
.gdt
.limit
= env
->gdt
.limit
;
1265 sregs
.gdt
.base
= env
->gdt
.base
;
1266 memset(sregs
.gdt
.padding
, 0, sizeof sregs
.gdt
.padding
);
1268 sregs
.cr0
= env
->cr
[0];
1269 sregs
.cr2
= env
->cr
[2];
1270 sregs
.cr3
= env
->cr
[3];
1271 sregs
.cr4
= env
->cr
[4];
1273 sregs
.cr8
= cpu_get_apic_tpr(cpu
->apic_state
);
1274 sregs
.apic_base
= cpu_get_apic_base(cpu
->apic_state
);
1276 sregs
.efer
= env
->efer
;
1278 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_SREGS
, &sregs
);
1281 static void kvm_msr_entry_set(struct kvm_msr_entry
*entry
,
1282 uint32_t index
, uint64_t value
)
1284 entry
->index
= index
;
1285 entry
->reserved
= 0;
1286 entry
->data
= value
;
1289 static int kvm_put_tscdeadline_msr(X86CPU
*cpu
)
1291 CPUX86State
*env
= &cpu
->env
;
1293 struct kvm_msrs info
;
1294 struct kvm_msr_entry entries
[1];
1296 struct kvm_msr_entry
*msrs
= msr_data
.entries
;
1298 if (!has_msr_tsc_deadline
) {
1302 kvm_msr_entry_set(&msrs
[0], MSR_IA32_TSCDEADLINE
, env
->tsc_deadline
);
1304 msr_data
.info
= (struct kvm_msrs
) {
1308 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MSRS
, &msr_data
);
1312 * Provide a separate write service for the feature control MSR in order to
1313 * kick the VCPU out of VMXON or even guest mode on reset. This has to be done
1314 * before writing any other state because forcibly leaving nested mode
1315 * invalidates the VCPU state.
1317 static int kvm_put_msr_feature_control(X86CPU
*cpu
)
1320 struct kvm_msrs info
;
1321 struct kvm_msr_entry entry
;
1324 kvm_msr_entry_set(&msr_data
.entry
, MSR_IA32_FEATURE_CONTROL
,
1325 cpu
->env
.msr_ia32_feature_control
);
1327 msr_data
.info
= (struct kvm_msrs
) {
1331 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MSRS
, &msr_data
);
1334 static int kvm_put_msrs(X86CPU
*cpu
, int level
)
1336 CPUX86State
*env
= &cpu
->env
;
1338 struct kvm_msrs info
;
1339 struct kvm_msr_entry entries
[150];
1341 struct kvm_msr_entry
*msrs
= msr_data
.entries
;
1344 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_CS
, env
->sysenter_cs
);
1345 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_ESP
, env
->sysenter_esp
);
1346 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_EIP
, env
->sysenter_eip
);
1347 kvm_msr_entry_set(&msrs
[n
++], MSR_PAT
, env
->pat
);
1349 kvm_msr_entry_set(&msrs
[n
++], MSR_STAR
, env
->star
);
1351 if (has_msr_hsave_pa
) {
1352 kvm_msr_entry_set(&msrs
[n
++], MSR_VM_HSAVE_PA
, env
->vm_hsave
);
1354 if (has_msr_tsc_aux
) {
1355 kvm_msr_entry_set(&msrs
[n
++], MSR_TSC_AUX
, env
->tsc_aux
);
1357 if (has_msr_tsc_adjust
) {
1358 kvm_msr_entry_set(&msrs
[n
++], MSR_TSC_ADJUST
, env
->tsc_adjust
);
1360 if (has_msr_misc_enable
) {
1361 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_MISC_ENABLE
,
1362 env
->msr_ia32_misc_enable
);
1364 if (has_msr_smbase
) {
1365 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_SMBASE
, env
->smbase
);
1367 if (has_msr_bndcfgs
) {
1368 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_BNDCFGS
, env
->msr_bndcfgs
);
1371 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_XSS
, env
->xss
);
1373 #ifdef TARGET_X86_64
1374 if (lm_capable_kernel
) {
1375 kvm_msr_entry_set(&msrs
[n
++], MSR_CSTAR
, env
->cstar
);
1376 kvm_msr_entry_set(&msrs
[n
++], MSR_KERNELGSBASE
, env
->kernelgsbase
);
1377 kvm_msr_entry_set(&msrs
[n
++], MSR_FMASK
, env
->fmask
);
1378 kvm_msr_entry_set(&msrs
[n
++], MSR_LSTAR
, env
->lstar
);
1382 * The following MSRs have side effects on the guest or are too heavy
1383 * for normal writeback. Limit them to reset or full state updates.
1385 if (level
>= KVM_PUT_RESET_STATE
) {
1386 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_TSC
, env
->tsc
);
1387 kvm_msr_entry_set(&msrs
[n
++], MSR_KVM_SYSTEM_TIME
,
1388 env
->system_time_msr
);
1389 kvm_msr_entry_set(&msrs
[n
++], MSR_KVM_WALL_CLOCK
, env
->wall_clock_msr
);
1390 if (has_msr_async_pf_en
) {
1391 kvm_msr_entry_set(&msrs
[n
++], MSR_KVM_ASYNC_PF_EN
,
1392 env
->async_pf_en_msr
);
1394 if (has_msr_pv_eoi_en
) {
1395 kvm_msr_entry_set(&msrs
[n
++], MSR_KVM_PV_EOI_EN
,
1396 env
->pv_eoi_en_msr
);
1398 if (has_msr_kvm_steal_time
) {
1399 kvm_msr_entry_set(&msrs
[n
++], MSR_KVM_STEAL_TIME
,
1400 env
->steal_time_msr
);
1402 if (has_msr_architectural_pmu
) {
1403 /* Stop the counter. */
1404 kvm_msr_entry_set(&msrs
[n
++], MSR_CORE_PERF_FIXED_CTR_CTRL
, 0);
1405 kvm_msr_entry_set(&msrs
[n
++], MSR_CORE_PERF_GLOBAL_CTRL
, 0);
1407 /* Set the counter values. */
1408 for (i
= 0; i
< MAX_FIXED_COUNTERS
; i
++) {
1409 kvm_msr_entry_set(&msrs
[n
++], MSR_CORE_PERF_FIXED_CTR0
+ i
,
1410 env
->msr_fixed_counters
[i
]);
1412 for (i
= 0; i
< num_architectural_pmu_counters
; i
++) {
1413 kvm_msr_entry_set(&msrs
[n
++], MSR_P6_PERFCTR0
+ i
,
1414 env
->msr_gp_counters
[i
]);
1415 kvm_msr_entry_set(&msrs
[n
++], MSR_P6_EVNTSEL0
+ i
,
1416 env
->msr_gp_evtsel
[i
]);
1418 kvm_msr_entry_set(&msrs
[n
++], MSR_CORE_PERF_GLOBAL_STATUS
,
1419 env
->msr_global_status
);
1420 kvm_msr_entry_set(&msrs
[n
++], MSR_CORE_PERF_GLOBAL_OVF_CTRL
,
1421 env
->msr_global_ovf_ctrl
);
1423 /* Now start the PMU. */
1424 kvm_msr_entry_set(&msrs
[n
++], MSR_CORE_PERF_FIXED_CTR_CTRL
,
1425 env
->msr_fixed_ctr_ctrl
);
1426 kvm_msr_entry_set(&msrs
[n
++], MSR_CORE_PERF_GLOBAL_CTRL
,
1427 env
->msr_global_ctrl
);
1429 if (has_msr_hv_hypercall
) {
1430 kvm_msr_entry_set(&msrs
[n
++], HV_X64_MSR_GUEST_OS_ID
,
1431 env
->msr_hv_guest_os_id
);
1432 kvm_msr_entry_set(&msrs
[n
++], HV_X64_MSR_HYPERCALL
,
1433 env
->msr_hv_hypercall
);
1435 if (has_msr_hv_vapic
) {
1436 kvm_msr_entry_set(&msrs
[n
++], HV_X64_MSR_APIC_ASSIST_PAGE
,
1439 if (has_msr_hv_tsc
) {
1440 kvm_msr_entry_set(&msrs
[n
++], HV_X64_MSR_REFERENCE_TSC
,
1443 if (has_msr_hv_crash
) {
1446 for (j
= 0; j
< HV_X64_MSR_CRASH_PARAMS
; j
++)
1447 kvm_msr_entry_set(&msrs
[n
++], HV_X64_MSR_CRASH_P0
+ j
,
1448 env
->msr_hv_crash_params
[j
]);
1450 kvm_msr_entry_set(&msrs
[n
++], HV_X64_MSR_CRASH_CTL
,
1451 HV_X64_MSR_CRASH_CTL_NOTIFY
);
1453 if (has_msr_hv_runtime
) {
1454 kvm_msr_entry_set(&msrs
[n
++], HV_X64_MSR_VP_RUNTIME
,
1455 env
->msr_hv_runtime
);
1458 kvm_msr_entry_set(&msrs
[n
++], MSR_MTRRdefType
, env
->mtrr_deftype
);
1459 kvm_msr_entry_set(&msrs
[n
++],
1460 MSR_MTRRfix64K_00000
, env
->mtrr_fixed
[0]);
1461 kvm_msr_entry_set(&msrs
[n
++],
1462 MSR_MTRRfix16K_80000
, env
->mtrr_fixed
[1]);
1463 kvm_msr_entry_set(&msrs
[n
++],
1464 MSR_MTRRfix16K_A0000
, env
->mtrr_fixed
[2]);
1465 kvm_msr_entry_set(&msrs
[n
++],
1466 MSR_MTRRfix4K_C0000
, env
->mtrr_fixed
[3]);
1467 kvm_msr_entry_set(&msrs
[n
++],
1468 MSR_MTRRfix4K_C8000
, env
->mtrr_fixed
[4]);
1469 kvm_msr_entry_set(&msrs
[n
++],
1470 MSR_MTRRfix4K_D0000
, env
->mtrr_fixed
[5]);
1471 kvm_msr_entry_set(&msrs
[n
++],
1472 MSR_MTRRfix4K_D8000
, env
->mtrr_fixed
[6]);
1473 kvm_msr_entry_set(&msrs
[n
++],
1474 MSR_MTRRfix4K_E0000
, env
->mtrr_fixed
[7]);
1475 kvm_msr_entry_set(&msrs
[n
++],
1476 MSR_MTRRfix4K_E8000
, env
->mtrr_fixed
[8]);
1477 kvm_msr_entry_set(&msrs
[n
++],
1478 MSR_MTRRfix4K_F0000
, env
->mtrr_fixed
[9]);
1479 kvm_msr_entry_set(&msrs
[n
++],
1480 MSR_MTRRfix4K_F8000
, env
->mtrr_fixed
[10]);
1481 for (i
= 0; i
< MSR_MTRRcap_VCNT
; i
++) {
1482 kvm_msr_entry_set(&msrs
[n
++],
1483 MSR_MTRRphysBase(i
), env
->mtrr_var
[i
].base
);
1484 kvm_msr_entry_set(&msrs
[n
++],
1485 MSR_MTRRphysMask(i
), env
->mtrr_var
[i
].mask
);
1489 /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
1490 * kvm_put_msr_feature_control. */
1495 kvm_msr_entry_set(&msrs
[n
++], MSR_MCG_STATUS
, env
->mcg_status
);
1496 kvm_msr_entry_set(&msrs
[n
++], MSR_MCG_CTL
, env
->mcg_ctl
);
1497 for (i
= 0; i
< (env
->mcg_cap
& 0xff) * 4; i
++) {
1498 kvm_msr_entry_set(&msrs
[n
++], MSR_MC0_CTL
+ i
, env
->mce_banks
[i
]);
1502 msr_data
.info
= (struct kvm_msrs
) {
1506 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MSRS
, &msr_data
);
1511 static int kvm_get_fpu(X86CPU
*cpu
)
1513 CPUX86State
*env
= &cpu
->env
;
1517 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_FPU
, &fpu
);
1522 env
->fpstt
= (fpu
.fsw
>> 11) & 7;
1523 env
->fpus
= fpu
.fsw
;
1524 env
->fpuc
= fpu
.fcw
;
1525 env
->fpop
= fpu
.last_opcode
;
1526 env
->fpip
= fpu
.last_ip
;
1527 env
->fpdp
= fpu
.last_dp
;
1528 for (i
= 0; i
< 8; ++i
) {
1529 env
->fptags
[i
] = !((fpu
.ftwx
>> i
) & 1);
1531 memcpy(env
->fpregs
, fpu
.fpr
, sizeof env
->fpregs
);
1532 for (i
= 0; i
< CPU_NB_REGS
; i
++) {
1533 env
->xmm_regs
[i
].XMM_Q(0) = ldq_p(&fpu
.xmm
[i
][0]);
1534 env
->xmm_regs
[i
].XMM_Q(1) = ldq_p(&fpu
.xmm
[i
][8]);
1536 env
->mxcsr
= fpu
.mxcsr
;
1541 static int kvm_get_xsave(X86CPU
*cpu
)
1543 CPUX86State
*env
= &cpu
->env
;
1544 struct kvm_xsave
* xsave
= env
->kvm_xsave_buf
;
1546 const uint8_t *xmm
, *ymmh
, *zmmh
;
1547 uint16_t cwd
, swd
, twd
;
1550 return kvm_get_fpu(cpu
);
1553 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_XSAVE
, xsave
);
1558 cwd
= (uint16_t)xsave
->region
[XSAVE_FCW_FSW
];
1559 swd
= (uint16_t)(xsave
->region
[XSAVE_FCW_FSW
] >> 16);
1560 twd
= (uint16_t)xsave
->region
[XSAVE_FTW_FOP
];
1561 env
->fpop
= (uint16_t)(xsave
->region
[XSAVE_FTW_FOP
] >> 16);
1562 env
->fpstt
= (swd
>> 11) & 7;
1565 for (i
= 0; i
< 8; ++i
) {
1566 env
->fptags
[i
] = !((twd
>> i
) & 1);
1568 memcpy(&env
->fpip
, &xsave
->region
[XSAVE_CWD_RIP
], sizeof(env
->fpip
));
1569 memcpy(&env
->fpdp
, &xsave
->region
[XSAVE_CWD_RDP
], sizeof(env
->fpdp
));
1570 env
->mxcsr
= xsave
->region
[XSAVE_MXCSR
];
1571 memcpy(env
->fpregs
, &xsave
->region
[XSAVE_ST_SPACE
],
1572 sizeof env
->fpregs
);
1573 env
->xstate_bv
= *(uint64_t *)&xsave
->region
[XSAVE_XSTATE_BV
];
1574 memcpy(env
->bnd_regs
, &xsave
->region
[XSAVE_BNDREGS
],
1575 sizeof env
->bnd_regs
);
1576 memcpy(&env
->bndcs_regs
, &xsave
->region
[XSAVE_BNDCSR
],
1577 sizeof(env
->bndcs_regs
));
1578 memcpy(env
->opmask_regs
, &xsave
->region
[XSAVE_OPMASK
],
1579 sizeof env
->opmask_regs
);
1581 xmm
= (const uint8_t *)&xsave
->region
[XSAVE_XMM_SPACE
];
1582 ymmh
= (const uint8_t *)&xsave
->region
[XSAVE_YMMH_SPACE
];
1583 zmmh
= (const uint8_t *)&xsave
->region
[XSAVE_ZMM_Hi256
];
1584 for (i
= 0; i
< CPU_NB_REGS
; i
++, xmm
+= 16, ymmh
+= 16, zmmh
+= 32) {
1585 env
->xmm_regs
[i
].XMM_Q(0) = ldq_p(xmm
);
1586 env
->xmm_regs
[i
].XMM_Q(1) = ldq_p(xmm
+8);
1587 env
->xmm_regs
[i
].XMM_Q(2) = ldq_p(ymmh
);
1588 env
->xmm_regs
[i
].XMM_Q(3) = ldq_p(ymmh
+8);
1589 env
->xmm_regs
[i
].XMM_Q(4) = ldq_p(zmmh
);
1590 env
->xmm_regs
[i
].XMM_Q(5) = ldq_p(zmmh
+8);
1591 env
->xmm_regs
[i
].XMM_Q(6) = ldq_p(zmmh
+16);
1592 env
->xmm_regs
[i
].XMM_Q(7) = ldq_p(zmmh
+24);
1595 #ifdef TARGET_X86_64
1596 memcpy(&env
->xmm_regs
[16], &xsave
->region
[XSAVE_Hi16_ZMM
],
1597 16 * sizeof env
->xmm_regs
[16]);
1602 static int kvm_get_xcrs(X86CPU
*cpu
)
1604 CPUX86State
*env
= &cpu
->env
;
1606 struct kvm_xcrs xcrs
;
1612 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_XCRS
, &xcrs
);
1617 for (i
= 0; i
< xcrs
.nr_xcrs
; i
++) {
1618 /* Only support xcr0 now */
1619 if (xcrs
.xcrs
[i
].xcr
== 0) {
1620 env
->xcr0
= xcrs
.xcrs
[i
].value
;
1627 static int kvm_get_sregs(X86CPU
*cpu
)
1629 CPUX86State
*env
= &cpu
->env
;
1630 struct kvm_sregs sregs
;
1634 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_SREGS
, &sregs
);
1639 /* There can only be one pending IRQ set in the bitmap at a time, so try
1640 to find it and save its number instead (-1 for none). */
1641 env
->interrupt_injected
= -1;
1642 for (i
= 0; i
< ARRAY_SIZE(sregs
.interrupt_bitmap
); i
++) {
1643 if (sregs
.interrupt_bitmap
[i
]) {
1644 bit
= ctz64(sregs
.interrupt_bitmap
[i
]);
1645 env
->interrupt_injected
= i
* 64 + bit
;
1650 get_seg(&env
->segs
[R_CS
], &sregs
.cs
);
1651 get_seg(&env
->segs
[R_DS
], &sregs
.ds
);
1652 get_seg(&env
->segs
[R_ES
], &sregs
.es
);
1653 get_seg(&env
->segs
[R_FS
], &sregs
.fs
);
1654 get_seg(&env
->segs
[R_GS
], &sregs
.gs
);
1655 get_seg(&env
->segs
[R_SS
], &sregs
.ss
);
1657 get_seg(&env
->tr
, &sregs
.tr
);
1658 get_seg(&env
->ldt
, &sregs
.ldt
);
1660 env
->idt
.limit
= sregs
.idt
.limit
;
1661 env
->idt
.base
= sregs
.idt
.base
;
1662 env
->gdt
.limit
= sregs
.gdt
.limit
;
1663 env
->gdt
.base
= sregs
.gdt
.base
;
1665 env
->cr
[0] = sregs
.cr0
;
1666 env
->cr
[2] = sregs
.cr2
;
1667 env
->cr
[3] = sregs
.cr3
;
1668 env
->cr
[4] = sregs
.cr4
;
1670 env
->efer
= sregs
.efer
;
1672 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
1674 #define HFLAG_COPY_MASK \
1675 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
1676 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
1677 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
1678 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
1680 hflags
= (env
->segs
[R_SS
].flags
>> DESC_DPL_SHIFT
) & HF_CPL_MASK
;
1681 hflags
|= (env
->cr
[0] & CR0_PE_MASK
) << (HF_PE_SHIFT
- CR0_PE_SHIFT
);
1682 hflags
|= (env
->cr
[0] << (HF_MP_SHIFT
- CR0_MP_SHIFT
)) &
1683 (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
);
1684 hflags
|= (env
->eflags
& (HF_TF_MASK
| HF_VM_MASK
| HF_IOPL_MASK
));
1685 hflags
|= (env
->cr
[4] & CR4_OSFXSR_MASK
) <<
1686 (HF_OSFXSR_SHIFT
- CR4_OSFXSR_SHIFT
);
1688 if (env
->efer
& MSR_EFER_LMA
) {
1689 hflags
|= HF_LMA_MASK
;
1692 if ((hflags
& HF_LMA_MASK
) && (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
1693 hflags
|= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
1695 hflags
|= (env
->segs
[R_CS
].flags
& DESC_B_MASK
) >>
1696 (DESC_B_SHIFT
- HF_CS32_SHIFT
);
1697 hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
) >>
1698 (DESC_B_SHIFT
- HF_SS32_SHIFT
);
1699 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
) ||
1700 !(hflags
& HF_CS32_MASK
)) {
1701 hflags
|= HF_ADDSEG_MASK
;
1703 hflags
|= ((env
->segs
[R_DS
].base
| env
->segs
[R_ES
].base
|
1704 env
->segs
[R_SS
].base
) != 0) << HF_ADDSEG_SHIFT
;
1707 env
->hflags
= (env
->hflags
& HFLAG_COPY_MASK
) | hflags
;
1712 static int kvm_get_msrs(X86CPU
*cpu
)
1714 CPUX86State
*env
= &cpu
->env
;
1716 struct kvm_msrs info
;
1717 struct kvm_msr_entry entries
[150];
1719 struct kvm_msr_entry
*msrs
= msr_data
.entries
;
1723 msrs
[n
++].index
= MSR_IA32_SYSENTER_CS
;
1724 msrs
[n
++].index
= MSR_IA32_SYSENTER_ESP
;
1725 msrs
[n
++].index
= MSR_IA32_SYSENTER_EIP
;
1726 msrs
[n
++].index
= MSR_PAT
;
1728 msrs
[n
++].index
= MSR_STAR
;
1730 if (has_msr_hsave_pa
) {
1731 msrs
[n
++].index
= MSR_VM_HSAVE_PA
;
1733 if (has_msr_tsc_aux
) {
1734 msrs
[n
++].index
= MSR_TSC_AUX
;
1736 if (has_msr_tsc_adjust
) {
1737 msrs
[n
++].index
= MSR_TSC_ADJUST
;
1739 if (has_msr_tsc_deadline
) {
1740 msrs
[n
++].index
= MSR_IA32_TSCDEADLINE
;
1742 if (has_msr_misc_enable
) {
1743 msrs
[n
++].index
= MSR_IA32_MISC_ENABLE
;
1745 if (has_msr_smbase
) {
1746 msrs
[n
++].index
= MSR_IA32_SMBASE
;
1748 if (has_msr_feature_control
) {
1749 msrs
[n
++].index
= MSR_IA32_FEATURE_CONTROL
;
1751 if (has_msr_bndcfgs
) {
1752 msrs
[n
++].index
= MSR_IA32_BNDCFGS
;
1755 msrs
[n
++].index
= MSR_IA32_XSS
;
1759 if (!env
->tsc_valid
) {
1760 msrs
[n
++].index
= MSR_IA32_TSC
;
1761 env
->tsc_valid
= !runstate_is_running();
1764 #ifdef TARGET_X86_64
1765 if (lm_capable_kernel
) {
1766 msrs
[n
++].index
= MSR_CSTAR
;
1767 msrs
[n
++].index
= MSR_KERNELGSBASE
;
1768 msrs
[n
++].index
= MSR_FMASK
;
1769 msrs
[n
++].index
= MSR_LSTAR
;
1772 msrs
[n
++].index
= MSR_KVM_SYSTEM_TIME
;
1773 msrs
[n
++].index
= MSR_KVM_WALL_CLOCK
;
1774 if (has_msr_async_pf_en
) {
1775 msrs
[n
++].index
= MSR_KVM_ASYNC_PF_EN
;
1777 if (has_msr_pv_eoi_en
) {
1778 msrs
[n
++].index
= MSR_KVM_PV_EOI_EN
;
1780 if (has_msr_kvm_steal_time
) {
1781 msrs
[n
++].index
= MSR_KVM_STEAL_TIME
;
1783 if (has_msr_architectural_pmu
) {
1784 msrs
[n
++].index
= MSR_CORE_PERF_FIXED_CTR_CTRL
;
1785 msrs
[n
++].index
= MSR_CORE_PERF_GLOBAL_CTRL
;
1786 msrs
[n
++].index
= MSR_CORE_PERF_GLOBAL_STATUS
;
1787 msrs
[n
++].index
= MSR_CORE_PERF_GLOBAL_OVF_CTRL
;
1788 for (i
= 0; i
< MAX_FIXED_COUNTERS
; i
++) {
1789 msrs
[n
++].index
= MSR_CORE_PERF_FIXED_CTR0
+ i
;
1791 for (i
= 0; i
< num_architectural_pmu_counters
; i
++) {
1792 msrs
[n
++].index
= MSR_P6_PERFCTR0
+ i
;
1793 msrs
[n
++].index
= MSR_P6_EVNTSEL0
+ i
;
1798 msrs
[n
++].index
= MSR_MCG_STATUS
;
1799 msrs
[n
++].index
= MSR_MCG_CTL
;
1800 for (i
= 0; i
< (env
->mcg_cap
& 0xff) * 4; i
++) {
1801 msrs
[n
++].index
= MSR_MC0_CTL
+ i
;
1805 if (has_msr_hv_hypercall
) {
1806 msrs
[n
++].index
= HV_X64_MSR_HYPERCALL
;
1807 msrs
[n
++].index
= HV_X64_MSR_GUEST_OS_ID
;
1809 if (has_msr_hv_vapic
) {
1810 msrs
[n
++].index
= HV_X64_MSR_APIC_ASSIST_PAGE
;
1812 if (has_msr_hv_tsc
) {
1813 msrs
[n
++].index
= HV_X64_MSR_REFERENCE_TSC
;
1815 if (has_msr_hv_crash
) {
1818 for (j
= 0; j
< HV_X64_MSR_CRASH_PARAMS
; j
++) {
1819 msrs
[n
++].index
= HV_X64_MSR_CRASH_P0
+ j
;
1822 if (has_msr_hv_runtime
) {
1823 msrs
[n
++].index
= HV_X64_MSR_VP_RUNTIME
;
1826 msrs
[n
++].index
= MSR_MTRRdefType
;
1827 msrs
[n
++].index
= MSR_MTRRfix64K_00000
;
1828 msrs
[n
++].index
= MSR_MTRRfix16K_80000
;
1829 msrs
[n
++].index
= MSR_MTRRfix16K_A0000
;
1830 msrs
[n
++].index
= MSR_MTRRfix4K_C0000
;
1831 msrs
[n
++].index
= MSR_MTRRfix4K_C8000
;
1832 msrs
[n
++].index
= MSR_MTRRfix4K_D0000
;
1833 msrs
[n
++].index
= MSR_MTRRfix4K_D8000
;
1834 msrs
[n
++].index
= MSR_MTRRfix4K_E0000
;
1835 msrs
[n
++].index
= MSR_MTRRfix4K_E8000
;
1836 msrs
[n
++].index
= MSR_MTRRfix4K_F0000
;
1837 msrs
[n
++].index
= MSR_MTRRfix4K_F8000
;
1838 for (i
= 0; i
< MSR_MTRRcap_VCNT
; i
++) {
1839 msrs
[n
++].index
= MSR_MTRRphysBase(i
);
1840 msrs
[n
++].index
= MSR_MTRRphysMask(i
);
1844 msr_data
.info
= (struct kvm_msrs
) {
1848 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_MSRS
, &msr_data
);
1853 for (i
= 0; i
< ret
; i
++) {
1854 uint32_t index
= msrs
[i
].index
;
1856 case MSR_IA32_SYSENTER_CS
:
1857 env
->sysenter_cs
= msrs
[i
].data
;
1859 case MSR_IA32_SYSENTER_ESP
:
1860 env
->sysenter_esp
= msrs
[i
].data
;
1862 case MSR_IA32_SYSENTER_EIP
:
1863 env
->sysenter_eip
= msrs
[i
].data
;
1866 env
->pat
= msrs
[i
].data
;
1869 env
->star
= msrs
[i
].data
;
1871 #ifdef TARGET_X86_64
1873 env
->cstar
= msrs
[i
].data
;
1875 case MSR_KERNELGSBASE
:
1876 env
->kernelgsbase
= msrs
[i
].data
;
1879 env
->fmask
= msrs
[i
].data
;
1882 env
->lstar
= msrs
[i
].data
;
1886 env
->tsc
= msrs
[i
].data
;
1889 env
->tsc_aux
= msrs
[i
].data
;
1891 case MSR_TSC_ADJUST
:
1892 env
->tsc_adjust
= msrs
[i
].data
;
1894 case MSR_IA32_TSCDEADLINE
:
1895 env
->tsc_deadline
= msrs
[i
].data
;
1897 case MSR_VM_HSAVE_PA
:
1898 env
->vm_hsave
= msrs
[i
].data
;
1900 case MSR_KVM_SYSTEM_TIME
:
1901 env
->system_time_msr
= msrs
[i
].data
;
1903 case MSR_KVM_WALL_CLOCK
:
1904 env
->wall_clock_msr
= msrs
[i
].data
;
1906 case MSR_MCG_STATUS
:
1907 env
->mcg_status
= msrs
[i
].data
;
1910 env
->mcg_ctl
= msrs
[i
].data
;
1912 case MSR_IA32_MISC_ENABLE
:
1913 env
->msr_ia32_misc_enable
= msrs
[i
].data
;
1915 case MSR_IA32_SMBASE
:
1916 env
->smbase
= msrs
[i
].data
;
1918 case MSR_IA32_FEATURE_CONTROL
:
1919 env
->msr_ia32_feature_control
= msrs
[i
].data
;
1921 case MSR_IA32_BNDCFGS
:
1922 env
->msr_bndcfgs
= msrs
[i
].data
;
1925 env
->xss
= msrs
[i
].data
;
1928 if (msrs
[i
].index
>= MSR_MC0_CTL
&&
1929 msrs
[i
].index
< MSR_MC0_CTL
+ (env
->mcg_cap
& 0xff) * 4) {
1930 env
->mce_banks
[msrs
[i
].index
- MSR_MC0_CTL
] = msrs
[i
].data
;
1933 case MSR_KVM_ASYNC_PF_EN
:
1934 env
->async_pf_en_msr
= msrs
[i
].data
;
1936 case MSR_KVM_PV_EOI_EN
:
1937 env
->pv_eoi_en_msr
= msrs
[i
].data
;
1939 case MSR_KVM_STEAL_TIME
:
1940 env
->steal_time_msr
= msrs
[i
].data
;
1942 case MSR_CORE_PERF_FIXED_CTR_CTRL
:
1943 env
->msr_fixed_ctr_ctrl
= msrs
[i
].data
;
1945 case MSR_CORE_PERF_GLOBAL_CTRL
:
1946 env
->msr_global_ctrl
= msrs
[i
].data
;
1948 case MSR_CORE_PERF_GLOBAL_STATUS
:
1949 env
->msr_global_status
= msrs
[i
].data
;
1951 case MSR_CORE_PERF_GLOBAL_OVF_CTRL
:
1952 env
->msr_global_ovf_ctrl
= msrs
[i
].data
;
1954 case MSR_CORE_PERF_FIXED_CTR0
... MSR_CORE_PERF_FIXED_CTR0
+ MAX_FIXED_COUNTERS
- 1:
1955 env
->msr_fixed_counters
[index
- MSR_CORE_PERF_FIXED_CTR0
] = msrs
[i
].data
;
1957 case MSR_P6_PERFCTR0
... MSR_P6_PERFCTR0
+ MAX_GP_COUNTERS
- 1:
1958 env
->msr_gp_counters
[index
- MSR_P6_PERFCTR0
] = msrs
[i
].data
;
1960 case MSR_P6_EVNTSEL0
... MSR_P6_EVNTSEL0
+ MAX_GP_COUNTERS
- 1:
1961 env
->msr_gp_evtsel
[index
- MSR_P6_EVNTSEL0
] = msrs
[i
].data
;
1963 case HV_X64_MSR_HYPERCALL
:
1964 env
->msr_hv_hypercall
= msrs
[i
].data
;
1966 case HV_X64_MSR_GUEST_OS_ID
:
1967 env
->msr_hv_guest_os_id
= msrs
[i
].data
;
1969 case HV_X64_MSR_APIC_ASSIST_PAGE
:
1970 env
->msr_hv_vapic
= msrs
[i
].data
;
1972 case HV_X64_MSR_REFERENCE_TSC
:
1973 env
->msr_hv_tsc
= msrs
[i
].data
;
1975 case HV_X64_MSR_CRASH_P0
... HV_X64_MSR_CRASH_P4
:
1976 env
->msr_hv_crash_params
[index
- HV_X64_MSR_CRASH_P0
] = msrs
[i
].data
;
1978 case HV_X64_MSR_VP_RUNTIME
:
1979 env
->msr_hv_runtime
= msrs
[i
].data
;
1981 case MSR_MTRRdefType
:
1982 env
->mtrr_deftype
= msrs
[i
].data
;
1984 case MSR_MTRRfix64K_00000
:
1985 env
->mtrr_fixed
[0] = msrs
[i
].data
;
1987 case MSR_MTRRfix16K_80000
:
1988 env
->mtrr_fixed
[1] = msrs
[i
].data
;
1990 case MSR_MTRRfix16K_A0000
:
1991 env
->mtrr_fixed
[2] = msrs
[i
].data
;
1993 case MSR_MTRRfix4K_C0000
:
1994 env
->mtrr_fixed
[3] = msrs
[i
].data
;
1996 case MSR_MTRRfix4K_C8000
:
1997 env
->mtrr_fixed
[4] = msrs
[i
].data
;
1999 case MSR_MTRRfix4K_D0000
:
2000 env
->mtrr_fixed
[5] = msrs
[i
].data
;
2002 case MSR_MTRRfix4K_D8000
:
2003 env
->mtrr_fixed
[6] = msrs
[i
].data
;
2005 case MSR_MTRRfix4K_E0000
:
2006 env
->mtrr_fixed
[7] = msrs
[i
].data
;
2008 case MSR_MTRRfix4K_E8000
:
2009 env
->mtrr_fixed
[8] = msrs
[i
].data
;
2011 case MSR_MTRRfix4K_F0000
:
2012 env
->mtrr_fixed
[9] = msrs
[i
].data
;
2014 case MSR_MTRRfix4K_F8000
:
2015 env
->mtrr_fixed
[10] = msrs
[i
].data
;
2017 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT
- 1):
2019 env
->mtrr_var
[MSR_MTRRphysIndex(index
)].mask
= msrs
[i
].data
;
2021 env
->mtrr_var
[MSR_MTRRphysIndex(index
)].base
= msrs
[i
].data
;
2030 static int kvm_put_mp_state(X86CPU
*cpu
)
2032 struct kvm_mp_state mp_state
= { .mp_state
= cpu
->env
.mp_state
};
2034 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MP_STATE
, &mp_state
);
2037 static int kvm_get_mp_state(X86CPU
*cpu
)
2039 CPUState
*cs
= CPU(cpu
);
2040 CPUX86State
*env
= &cpu
->env
;
2041 struct kvm_mp_state mp_state
;
2044 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_MP_STATE
, &mp_state
);
2048 env
->mp_state
= mp_state
.mp_state
;
2049 if (kvm_irqchip_in_kernel()) {
2050 cs
->halted
= (mp_state
.mp_state
== KVM_MP_STATE_HALTED
);
2055 static int kvm_get_apic(X86CPU
*cpu
)
2057 DeviceState
*apic
= cpu
->apic_state
;
2058 struct kvm_lapic_state kapic
;
2061 if (apic
&& kvm_irqchip_in_kernel()) {
2062 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_LAPIC
, &kapic
);
2067 kvm_get_apic_state(apic
, &kapic
);
2072 static int kvm_put_apic(X86CPU
*cpu
)
2074 DeviceState
*apic
= cpu
->apic_state
;
2075 struct kvm_lapic_state kapic
;
2077 if (apic
&& kvm_irqchip_in_kernel()) {
2078 kvm_put_apic_state(apic
, &kapic
);
2080 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_LAPIC
, &kapic
);
2085 static int kvm_put_vcpu_events(X86CPU
*cpu
, int level
)
2087 CPUState
*cs
= CPU(cpu
);
2088 CPUX86State
*env
= &cpu
->env
;
2089 struct kvm_vcpu_events events
= {};
2091 if (!kvm_has_vcpu_events()) {
2095 events
.exception
.injected
= (env
->exception_injected
>= 0);
2096 events
.exception
.nr
= env
->exception_injected
;
2097 events
.exception
.has_error_code
= env
->has_error_code
;
2098 events
.exception
.error_code
= env
->error_code
;
2099 events
.exception
.pad
= 0;
2101 events
.interrupt
.injected
= (env
->interrupt_injected
>= 0);
2102 events
.interrupt
.nr
= env
->interrupt_injected
;
2103 events
.interrupt
.soft
= env
->soft_interrupt
;
2105 events
.nmi
.injected
= env
->nmi_injected
;
2106 events
.nmi
.pending
= env
->nmi_pending
;
2107 events
.nmi
.masked
= !!(env
->hflags2
& HF2_NMI_MASK
);
2110 events
.sipi_vector
= env
->sipi_vector
;
2112 if (has_msr_smbase
) {
2113 events
.smi
.smm
= !!(env
->hflags
& HF_SMM_MASK
);
2114 events
.smi
.smm_inside_nmi
= !!(env
->hflags2
& HF2_SMM_INSIDE_NMI_MASK
);
2115 if (kvm_irqchip_in_kernel()) {
2116 /* As soon as these are moved to the kernel, remove them
2117 * from cs->interrupt_request.
2119 events
.smi
.pending
= cs
->interrupt_request
& CPU_INTERRUPT_SMI
;
2120 events
.smi
.latched_init
= cs
->interrupt_request
& CPU_INTERRUPT_INIT
;
2121 cs
->interrupt_request
&= ~(CPU_INTERRUPT_INIT
| CPU_INTERRUPT_SMI
);
2123 /* Keep these in cs->interrupt_request. */
2124 events
.smi
.pending
= 0;
2125 events
.smi
.latched_init
= 0;
2127 events
.flags
|= KVM_VCPUEVENT_VALID_SMM
;
2131 if (level
>= KVM_PUT_RESET_STATE
) {
2133 KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SIPI_VECTOR
;
2136 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_VCPU_EVENTS
, &events
);
2139 static int kvm_get_vcpu_events(X86CPU
*cpu
)
2141 CPUX86State
*env
= &cpu
->env
;
2142 struct kvm_vcpu_events events
;
2145 if (!kvm_has_vcpu_events()) {
2149 memset(&events
, 0, sizeof(events
));
2150 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_VCPU_EVENTS
, &events
);
2154 env
->exception_injected
=
2155 events
.exception
.injected
? events
.exception
.nr
: -1;
2156 env
->has_error_code
= events
.exception
.has_error_code
;
2157 env
->error_code
= events
.exception
.error_code
;
2159 env
->interrupt_injected
=
2160 events
.interrupt
.injected
? events
.interrupt
.nr
: -1;
2161 env
->soft_interrupt
= events
.interrupt
.soft
;
2163 env
->nmi_injected
= events
.nmi
.injected
;
2164 env
->nmi_pending
= events
.nmi
.pending
;
2165 if (events
.nmi
.masked
) {
2166 env
->hflags2
|= HF2_NMI_MASK
;
2168 env
->hflags2
&= ~HF2_NMI_MASK
;
2171 if (events
.flags
& KVM_VCPUEVENT_VALID_SMM
) {
2172 if (events
.smi
.smm
) {
2173 env
->hflags
|= HF_SMM_MASK
;
2175 env
->hflags
&= ~HF_SMM_MASK
;
2177 if (events
.smi
.pending
) {
2178 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_SMI
);
2180 cpu_reset_interrupt(CPU(cpu
), CPU_INTERRUPT_SMI
);
2182 if (events
.smi
.smm_inside_nmi
) {
2183 env
->hflags2
|= HF2_SMM_INSIDE_NMI_MASK
;
2185 env
->hflags2
&= ~HF2_SMM_INSIDE_NMI_MASK
;
2187 if (events
.smi
.latched_init
) {
2188 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_INIT
);
2190 cpu_reset_interrupt(CPU(cpu
), CPU_INTERRUPT_INIT
);
2194 env
->sipi_vector
= events
.sipi_vector
;
2199 static int kvm_guest_debug_workarounds(X86CPU
*cpu
)
2201 CPUState
*cs
= CPU(cpu
);
2202 CPUX86State
*env
= &cpu
->env
;
2204 unsigned long reinject_trap
= 0;
2206 if (!kvm_has_vcpu_events()) {
2207 if (env
->exception_injected
== 1) {
2208 reinject_trap
= KVM_GUESTDBG_INJECT_DB
;
2209 } else if (env
->exception_injected
== 3) {
2210 reinject_trap
= KVM_GUESTDBG_INJECT_BP
;
2212 env
->exception_injected
= -1;
2216 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
2217 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
2218 * by updating the debug state once again if single-stepping is on.
2219 * Another reason to call kvm_update_guest_debug here is a pending debug
2220 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
2221 * reinject them via SET_GUEST_DEBUG.
2223 if (reinject_trap
||
2224 (!kvm_has_robust_singlestep() && cs
->singlestep_enabled
)) {
2225 ret
= kvm_update_guest_debug(cs
, reinject_trap
);
2230 static int kvm_put_debugregs(X86CPU
*cpu
)
2232 CPUX86State
*env
= &cpu
->env
;
2233 struct kvm_debugregs dbgregs
;
2236 if (!kvm_has_debugregs()) {
2240 for (i
= 0; i
< 4; i
++) {
2241 dbgregs
.db
[i
] = env
->dr
[i
];
2243 dbgregs
.dr6
= env
->dr
[6];
2244 dbgregs
.dr7
= env
->dr
[7];
2247 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_DEBUGREGS
, &dbgregs
);
2250 static int kvm_get_debugregs(X86CPU
*cpu
)
2252 CPUX86State
*env
= &cpu
->env
;
2253 struct kvm_debugregs dbgregs
;
2256 if (!kvm_has_debugregs()) {
2260 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_DEBUGREGS
, &dbgregs
);
2264 for (i
= 0; i
< 4; i
++) {
2265 env
->dr
[i
] = dbgregs
.db
[i
];
2267 env
->dr
[4] = env
->dr
[6] = dbgregs
.dr6
;
2268 env
->dr
[5] = env
->dr
[7] = dbgregs
.dr7
;
2273 int kvm_arch_put_registers(CPUState
*cpu
, int level
)
2275 X86CPU
*x86_cpu
= X86_CPU(cpu
);
2278 assert(cpu_is_stopped(cpu
) || qemu_cpu_is_self(cpu
));
2280 if (level
>= KVM_PUT_RESET_STATE
&& has_msr_feature_control
) {
2281 ret
= kvm_put_msr_feature_control(x86_cpu
);
2287 ret
= kvm_getput_regs(x86_cpu
, 1);
2291 ret
= kvm_put_xsave(x86_cpu
);
2295 ret
= kvm_put_xcrs(x86_cpu
);
2299 ret
= kvm_put_sregs(x86_cpu
);
2303 /* must be before kvm_put_msrs */
2304 ret
= kvm_inject_mce_oldstyle(x86_cpu
);
2308 ret
= kvm_put_msrs(x86_cpu
, level
);
2312 if (level
>= KVM_PUT_RESET_STATE
) {
2313 ret
= kvm_put_mp_state(x86_cpu
);
2317 ret
= kvm_put_apic(x86_cpu
);
2323 ret
= kvm_put_tscdeadline_msr(x86_cpu
);
2328 ret
= kvm_put_vcpu_events(x86_cpu
, level
);
2332 ret
= kvm_put_debugregs(x86_cpu
);
2337 ret
= kvm_guest_debug_workarounds(x86_cpu
);
2344 int kvm_arch_get_registers(CPUState
*cs
)
2346 X86CPU
*cpu
= X86_CPU(cs
);
2349 assert(cpu_is_stopped(cs
) || qemu_cpu_is_self(cs
));
2351 ret
= kvm_getput_regs(cpu
, 0);
2355 ret
= kvm_get_xsave(cpu
);
2359 ret
= kvm_get_xcrs(cpu
);
2363 ret
= kvm_get_sregs(cpu
);
2367 ret
= kvm_get_msrs(cpu
);
2371 ret
= kvm_get_mp_state(cpu
);
2375 ret
= kvm_get_apic(cpu
);
2379 ret
= kvm_get_vcpu_events(cpu
);
2383 ret
= kvm_get_debugregs(cpu
);
2390 void kvm_arch_pre_run(CPUState
*cpu
, struct kvm_run
*run
)
2392 X86CPU
*x86_cpu
= X86_CPU(cpu
);
2393 CPUX86State
*env
= &x86_cpu
->env
;
2397 if (cpu
->interrupt_request
& (CPU_INTERRUPT_NMI
| CPU_INTERRUPT_SMI
)) {
2398 if (cpu
->interrupt_request
& CPU_INTERRUPT_NMI
) {
2399 qemu_mutex_lock_iothread();
2400 cpu
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
2401 qemu_mutex_unlock_iothread();
2402 DPRINTF("injected NMI\n");
2403 ret
= kvm_vcpu_ioctl(cpu
, KVM_NMI
);
2405 fprintf(stderr
, "KVM: injection failed, NMI lost (%s)\n",
2409 if (cpu
->interrupt_request
& CPU_INTERRUPT_SMI
) {
2410 qemu_mutex_lock_iothread();
2411 cpu
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
2412 qemu_mutex_unlock_iothread();
2413 DPRINTF("injected SMI\n");
2414 ret
= kvm_vcpu_ioctl(cpu
, KVM_SMI
);
2416 fprintf(stderr
, "KVM: injection failed, SMI lost (%s)\n",
2422 if (!kvm_irqchip_in_kernel()) {
2423 qemu_mutex_lock_iothread();
2426 /* Force the VCPU out of its inner loop to process any INIT requests
2427 * or (for userspace APIC, but it is cheap to combine the checks here)
2428 * pending TPR access reports.
2430 if (cpu
->interrupt_request
& (CPU_INTERRUPT_INIT
| CPU_INTERRUPT_TPR
)) {
2431 if ((cpu
->interrupt_request
& CPU_INTERRUPT_INIT
) &&
2432 !(env
->hflags
& HF_SMM_MASK
)) {
2433 cpu
->exit_request
= 1;
2435 if (cpu
->interrupt_request
& CPU_INTERRUPT_TPR
) {
2436 cpu
->exit_request
= 1;
2440 if (!kvm_irqchip_in_kernel()) {
2441 /* Try to inject an interrupt if the guest can accept it */
2442 if (run
->ready_for_interrupt_injection
&&
2443 (cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
2444 (env
->eflags
& IF_MASK
)) {
2447 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
2448 irq
= cpu_get_pic_interrupt(env
);
2450 struct kvm_interrupt intr
;
2453 DPRINTF("injected interrupt %d\n", irq
);
2454 ret
= kvm_vcpu_ioctl(cpu
, KVM_INTERRUPT
, &intr
);
2457 "KVM: injection failed, interrupt lost (%s)\n",
2463 /* If we have an interrupt but the guest is not ready to receive an
2464 * interrupt, request an interrupt window exit. This will
2465 * cause a return to userspace as soon as the guest is ready to
2466 * receive interrupts. */
2467 if ((cpu
->interrupt_request
& CPU_INTERRUPT_HARD
)) {
2468 run
->request_interrupt_window
= 1;
2470 run
->request_interrupt_window
= 0;
2473 DPRINTF("setting tpr\n");
2474 run
->cr8
= cpu_get_apic_tpr(x86_cpu
->apic_state
);
2476 qemu_mutex_unlock_iothread();
2480 MemTxAttrs
kvm_arch_post_run(CPUState
*cpu
, struct kvm_run
*run
)
2482 X86CPU
*x86_cpu
= X86_CPU(cpu
);
2483 CPUX86State
*env
= &x86_cpu
->env
;
2485 if (run
->flags
& KVM_RUN_X86_SMM
) {
2486 env
->hflags
|= HF_SMM_MASK
;
2488 env
->hflags
&= HF_SMM_MASK
;
2491 env
->eflags
|= IF_MASK
;
2493 env
->eflags
&= ~IF_MASK
;
2496 /* We need to protect the apic state against concurrent accesses from
2497 * different threads in case the userspace irqchip is used. */
2498 if (!kvm_irqchip_in_kernel()) {
2499 qemu_mutex_lock_iothread();
2501 cpu_set_apic_tpr(x86_cpu
->apic_state
, run
->cr8
);
2502 cpu_set_apic_base(x86_cpu
->apic_state
, run
->apic_base
);
2503 if (!kvm_irqchip_in_kernel()) {
2504 qemu_mutex_unlock_iothread();
2506 return cpu_get_mem_attrs(env
);
2509 int kvm_arch_process_async_events(CPUState
*cs
)
2511 X86CPU
*cpu
= X86_CPU(cs
);
2512 CPUX86State
*env
= &cpu
->env
;
2514 if (cs
->interrupt_request
& CPU_INTERRUPT_MCE
) {
2515 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
2516 assert(env
->mcg_cap
);
2518 cs
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
2520 kvm_cpu_synchronize_state(cs
);
2522 if (env
->exception_injected
== EXCP08_DBLE
) {
2523 /* this means triple fault */
2524 qemu_system_reset_request();
2525 cs
->exit_request
= 1;
2528 env
->exception_injected
= EXCP12_MCHK
;
2529 env
->has_error_code
= 0;
2532 if (kvm_irqchip_in_kernel() && env
->mp_state
== KVM_MP_STATE_HALTED
) {
2533 env
->mp_state
= KVM_MP_STATE_RUNNABLE
;
2537 if ((cs
->interrupt_request
& CPU_INTERRUPT_INIT
) &&
2538 !(env
->hflags
& HF_SMM_MASK
)) {
2539 kvm_cpu_synchronize_state(cs
);
2543 if (kvm_irqchip_in_kernel()) {
2547 if (cs
->interrupt_request
& CPU_INTERRUPT_POLL
) {
2548 cs
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
2549 apic_poll_irq(cpu
->apic_state
);
2551 if (((cs
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
2552 (env
->eflags
& IF_MASK
)) ||
2553 (cs
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
2556 if (cs
->interrupt_request
& CPU_INTERRUPT_SIPI
) {
2557 kvm_cpu_synchronize_state(cs
);
2560 if (cs
->interrupt_request
& CPU_INTERRUPT_TPR
) {
2561 cs
->interrupt_request
&= ~CPU_INTERRUPT_TPR
;
2562 kvm_cpu_synchronize_state(cs
);
2563 apic_handle_tpr_access_report(cpu
->apic_state
, env
->eip
,
2564 env
->tpr_access_type
);
2570 static int kvm_handle_halt(X86CPU
*cpu
)
2572 CPUState
*cs
= CPU(cpu
);
2573 CPUX86State
*env
= &cpu
->env
;
2575 if (!((cs
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
2576 (env
->eflags
& IF_MASK
)) &&
2577 !(cs
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
2585 static int kvm_handle_tpr_access(X86CPU
*cpu
)
2587 CPUState
*cs
= CPU(cpu
);
2588 struct kvm_run
*run
= cs
->kvm_run
;
2590 apic_handle_tpr_access_report(cpu
->apic_state
, run
->tpr_access
.rip
,
2591 run
->tpr_access
.is_write
? TPR_ACCESS_WRITE
2596 int kvm_arch_insert_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
2598 static const uint8_t int3
= 0xcc;
2600 if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 0) ||
2601 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&int3
, 1, 1)) {
2607 int kvm_arch_remove_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
2611 if (cpu_memory_rw_debug(cs
, bp
->pc
, &int3
, 1, 0) || int3
!= 0xcc ||
2612 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 1)) {
2624 static int nb_hw_breakpoint
;
2626 static int find_hw_breakpoint(target_ulong addr
, int len
, int type
)
2630 for (n
= 0; n
< nb_hw_breakpoint
; n
++) {
2631 if (hw_breakpoint
[n
].addr
== addr
&& hw_breakpoint
[n
].type
== type
&&
2632 (hw_breakpoint
[n
].len
== len
|| len
== -1)) {
2639 int kvm_arch_insert_hw_breakpoint(target_ulong addr
,
2640 target_ulong len
, int type
)
2643 case GDB_BREAKPOINT_HW
:
2646 case GDB_WATCHPOINT_WRITE
:
2647 case GDB_WATCHPOINT_ACCESS
:
2654 if (addr
& (len
- 1)) {
2666 if (nb_hw_breakpoint
== 4) {
2669 if (find_hw_breakpoint(addr
, len
, type
) >= 0) {
2672 hw_breakpoint
[nb_hw_breakpoint
].addr
= addr
;
2673 hw_breakpoint
[nb_hw_breakpoint
].len
= len
;
2674 hw_breakpoint
[nb_hw_breakpoint
].type
= type
;
2680 int kvm_arch_remove_hw_breakpoint(target_ulong addr
,
2681 target_ulong len
, int type
)
2685 n
= find_hw_breakpoint(addr
, (type
== GDB_BREAKPOINT_HW
) ? 1 : len
, type
);
2690 hw_breakpoint
[n
] = hw_breakpoint
[nb_hw_breakpoint
];
2695 void kvm_arch_remove_all_hw_breakpoints(void)
2697 nb_hw_breakpoint
= 0;
2700 static CPUWatchpoint hw_watchpoint
;
2702 static int kvm_handle_debug(X86CPU
*cpu
,
2703 struct kvm_debug_exit_arch
*arch_info
)
2705 CPUState
*cs
= CPU(cpu
);
2706 CPUX86State
*env
= &cpu
->env
;
2710 if (arch_info
->exception
== 1) {
2711 if (arch_info
->dr6
& (1 << 14)) {
2712 if (cs
->singlestep_enabled
) {
2716 for (n
= 0; n
< 4; n
++) {
2717 if (arch_info
->dr6
& (1 << n
)) {
2718 switch ((arch_info
->dr7
>> (16 + n
*4)) & 0x3) {
2724 cs
->watchpoint_hit
= &hw_watchpoint
;
2725 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
2726 hw_watchpoint
.flags
= BP_MEM_WRITE
;
2730 cs
->watchpoint_hit
= &hw_watchpoint
;
2731 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
2732 hw_watchpoint
.flags
= BP_MEM_ACCESS
;
2738 } else if (kvm_find_sw_breakpoint(cs
, arch_info
->pc
)) {
2742 cpu_synchronize_state(cs
);
2743 assert(env
->exception_injected
== -1);
2746 env
->exception_injected
= arch_info
->exception
;
2747 env
->has_error_code
= 0;
2753 void kvm_arch_update_guest_debug(CPUState
*cpu
, struct kvm_guest_debug
*dbg
)
2755 const uint8_t type_code
[] = {
2756 [GDB_BREAKPOINT_HW
] = 0x0,
2757 [GDB_WATCHPOINT_WRITE
] = 0x1,
2758 [GDB_WATCHPOINT_ACCESS
] = 0x3
2760 const uint8_t len_code
[] = {
2761 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
2765 if (kvm_sw_breakpoints_active(cpu
)) {
2766 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_SW_BP
;
2768 if (nb_hw_breakpoint
> 0) {
2769 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_HW_BP
;
2770 dbg
->arch
.debugreg
[7] = 0x0600;
2771 for (n
= 0; n
< nb_hw_breakpoint
; n
++) {
2772 dbg
->arch
.debugreg
[n
] = hw_breakpoint
[n
].addr
;
2773 dbg
->arch
.debugreg
[7] |= (2 << (n
* 2)) |
2774 (type_code
[hw_breakpoint
[n
].type
] << (16 + n
*4)) |
2775 ((uint32_t)len_code
[hw_breakpoint
[n
].len
] << (18 + n
*4));
2780 static bool host_supports_vmx(void)
2782 uint32_t ecx
, unused
;
2784 host_cpuid(1, 0, &unused
, &unused
, &ecx
, &unused
);
2785 return ecx
& CPUID_EXT_VMX
;
2788 #define VMX_INVALID_GUEST_STATE 0x80000021
2790 int kvm_arch_handle_exit(CPUState
*cs
, struct kvm_run
*run
)
2792 X86CPU
*cpu
= X86_CPU(cs
);
2796 switch (run
->exit_reason
) {
2798 DPRINTF("handle_hlt\n");
2799 qemu_mutex_lock_iothread();
2800 ret
= kvm_handle_halt(cpu
);
2801 qemu_mutex_unlock_iothread();
2803 case KVM_EXIT_SET_TPR
:
2806 case KVM_EXIT_TPR_ACCESS
:
2807 qemu_mutex_lock_iothread();
2808 ret
= kvm_handle_tpr_access(cpu
);
2809 qemu_mutex_unlock_iothread();
2811 case KVM_EXIT_FAIL_ENTRY
:
2812 code
= run
->fail_entry
.hardware_entry_failure_reason
;
2813 fprintf(stderr
, "KVM: entry failed, hardware error 0x%" PRIx64
"\n",
2815 if (host_supports_vmx() && code
== VMX_INVALID_GUEST_STATE
) {
2817 "\nIf you're running a guest on an Intel machine without "
2818 "unrestricted mode\n"
2819 "support, the failure can be most likely due to the guest "
2820 "entering an invalid\n"
2821 "state for Intel VT. For example, the guest maybe running "
2822 "in big real mode\n"
2823 "which is not supported on less recent Intel processors."
2828 case KVM_EXIT_EXCEPTION
:
2829 fprintf(stderr
, "KVM: exception %d exit (error code 0x%x)\n",
2830 run
->ex
.exception
, run
->ex
.error_code
);
2833 case KVM_EXIT_DEBUG
:
2834 DPRINTF("kvm_exit_debug\n");
2835 qemu_mutex_lock_iothread();
2836 ret
= kvm_handle_debug(cpu
, &run
->debug
.arch
);
2837 qemu_mutex_unlock_iothread();
2840 fprintf(stderr
, "KVM: unknown exit reason %d\n", run
->exit_reason
);
2848 bool kvm_arch_stop_on_emulation_error(CPUState
*cs
)
2850 X86CPU
*cpu
= X86_CPU(cs
);
2851 CPUX86State
*env
= &cpu
->env
;
2853 kvm_cpu_synchronize_state(cs
);
2854 return !(env
->cr
[0] & CR0_PE_MASK
) ||
2855 ((env
->segs
[R_CS
].selector
& 3) != 3);
2858 void kvm_arch_init_irq_routing(KVMState
*s
)
2860 if (!kvm_check_extension(s
, KVM_CAP_IRQ_ROUTING
)) {
2861 /* If kernel can't do irq routing, interrupt source
2862 * override 0->2 cannot be set up as required by HPET.
2863 * So we have to disable it.
2867 /* We know at this point that we're using the in-kernel
2868 * irqchip, so we can use irqfds, and on x86 we know
2869 * we can use msi via irqfd and GSI routing.
2871 kvm_msi_via_irqfd_allowed
= true;
2872 kvm_gsi_routing_allowed
= true;
2875 /* Classic KVM device assignment interface. Will remain x86 only. */
2876 int kvm_device_pci_assign(KVMState
*s
, PCIHostDeviceAddress
*dev_addr
,
2877 uint32_t flags
, uint32_t *dev_id
)
2879 struct kvm_assigned_pci_dev dev_data
= {
2880 .segnr
= dev_addr
->domain
,
2881 .busnr
= dev_addr
->bus
,
2882 .devfn
= PCI_DEVFN(dev_addr
->slot
, dev_addr
->function
),
2887 dev_data
.assigned_dev_id
=
2888 (dev_addr
->domain
<< 16) | (dev_addr
->bus
<< 8) | dev_data
.devfn
;
2890 ret
= kvm_vm_ioctl(s
, KVM_ASSIGN_PCI_DEVICE
, &dev_data
);
2895 *dev_id
= dev_data
.assigned_dev_id
;
2900 int kvm_device_pci_deassign(KVMState
*s
, uint32_t dev_id
)
2902 struct kvm_assigned_pci_dev dev_data
= {
2903 .assigned_dev_id
= dev_id
,
2906 return kvm_vm_ioctl(s
, KVM_DEASSIGN_PCI_DEVICE
, &dev_data
);
2909 static int kvm_assign_irq_internal(KVMState
*s
, uint32_t dev_id
,
2910 uint32_t irq_type
, uint32_t guest_irq
)
2912 struct kvm_assigned_irq assigned_irq
= {
2913 .assigned_dev_id
= dev_id
,
2914 .guest_irq
= guest_irq
,
2918 if (kvm_check_extension(s
, KVM_CAP_ASSIGN_DEV_IRQ
)) {
2919 return kvm_vm_ioctl(s
, KVM_ASSIGN_DEV_IRQ
, &assigned_irq
);
2921 return kvm_vm_ioctl(s
, KVM_ASSIGN_IRQ
, &assigned_irq
);
2925 int kvm_device_intx_assign(KVMState
*s
, uint32_t dev_id
, bool use_host_msi
,
2928 uint32_t irq_type
= KVM_DEV_IRQ_GUEST_INTX
|
2929 (use_host_msi
? KVM_DEV_IRQ_HOST_MSI
: KVM_DEV_IRQ_HOST_INTX
);
2931 return kvm_assign_irq_internal(s
, dev_id
, irq_type
, guest_irq
);
2934 int kvm_device_intx_set_mask(KVMState
*s
, uint32_t dev_id
, bool masked
)
2936 struct kvm_assigned_pci_dev dev_data
= {
2937 .assigned_dev_id
= dev_id
,
2938 .flags
= masked
? KVM_DEV_ASSIGN_MASK_INTX
: 0,
2941 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_INTX_MASK
, &dev_data
);
2944 static int kvm_deassign_irq_internal(KVMState
*s
, uint32_t dev_id
,
2947 struct kvm_assigned_irq assigned_irq
= {
2948 .assigned_dev_id
= dev_id
,
2952 return kvm_vm_ioctl(s
, KVM_DEASSIGN_DEV_IRQ
, &assigned_irq
);
2955 int kvm_device_intx_deassign(KVMState
*s
, uint32_t dev_id
, bool use_host_msi
)
2957 return kvm_deassign_irq_internal(s
, dev_id
, KVM_DEV_IRQ_GUEST_INTX
|
2958 (use_host_msi
? KVM_DEV_IRQ_HOST_MSI
: KVM_DEV_IRQ_HOST_INTX
));
2961 int kvm_device_msi_assign(KVMState
*s
, uint32_t dev_id
, int virq
)
2963 return kvm_assign_irq_internal(s
, dev_id
, KVM_DEV_IRQ_HOST_MSI
|
2964 KVM_DEV_IRQ_GUEST_MSI
, virq
);
2967 int kvm_device_msi_deassign(KVMState
*s
, uint32_t dev_id
)
2969 return kvm_deassign_irq_internal(s
, dev_id
, KVM_DEV_IRQ_GUEST_MSI
|
2970 KVM_DEV_IRQ_HOST_MSI
);
2973 bool kvm_device_msix_supported(KVMState
*s
)
2975 /* The kernel lacks a corresponding KVM_CAP, so we probe by calling
2976 * KVM_ASSIGN_SET_MSIX_NR with an invalid parameter. */
2977 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_MSIX_NR
, NULL
) == -EFAULT
;
2980 int kvm_device_msix_init_vectors(KVMState
*s
, uint32_t dev_id
,
2981 uint32_t nr_vectors
)
2983 struct kvm_assigned_msix_nr msix_nr
= {
2984 .assigned_dev_id
= dev_id
,
2985 .entry_nr
= nr_vectors
,
2988 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_MSIX_NR
, &msix_nr
);
2991 int kvm_device_msix_set_vector(KVMState
*s
, uint32_t dev_id
, uint32_t vector
,
2994 struct kvm_assigned_msix_entry msix_entry
= {
2995 .assigned_dev_id
= dev_id
,
3000 return kvm_vm_ioctl(s
, KVM_ASSIGN_SET_MSIX_ENTRY
, &msix_entry
);
3003 int kvm_device_msix_assign(KVMState
*s
, uint32_t dev_id
)
3005 return kvm_assign_irq_internal(s
, dev_id
, KVM_DEV_IRQ_HOST_MSIX
|
3006 KVM_DEV_IRQ_GUEST_MSIX
, 0);
3009 int kvm_device_msix_deassign(KVMState
*s
, uint32_t dev_id
)
3011 return kvm_deassign_irq_internal(s
, dev_id
, KVM_DEV_IRQ_GUEST_MSIX
|
3012 KVM_DEV_IRQ_HOST_MSIX
);
3015 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry
*route
,
3016 uint64_t address
, uint32_t data
, PCIDevice
*dev
)
3021 int kvm_arch_msi_data_to_gsi(uint32_t data
)