gt64120: convert to realize()
[qemu/ar7.git] / target-i386 / kvm.c
blobab65a6ebe5b12c0cc8a50a26da405ee9a1e5bef9
1 /*
2 * QEMU KVM support
4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
15 #include <sys/types.h>
16 #include <sys/ioctl.h>
17 #include <sys/mman.h>
18 #include <sys/utsname.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_para.h>
23 #include "qemu-common.h"
24 #include "sysemu/sysemu.h"
25 #include "sysemu/kvm_int.h"
26 #include "kvm_i386.h"
27 #include "cpu.h"
28 #include "hyperv.h"
30 #include "exec/gdbstub.h"
31 #include "qemu/host-utils.h"
32 #include "qemu/config-file.h"
33 #include "qemu/error-report.h"
34 #include "hw/i386/pc.h"
35 #include "hw/i386/apic.h"
36 #include "hw/i386/apic_internal.h"
37 #include "hw/i386/apic-msidef.h"
39 #include "exec/ioport.h"
40 #include "standard-headers/asm-x86/hyperv.h"
41 #include "hw/pci/pci.h"
42 #include "hw/pci/msi.h"
43 #include "migration/migration.h"
44 #include "exec/memattrs.h"
46 //#define DEBUG_KVM
48 #ifdef DEBUG_KVM
49 #define DPRINTF(fmt, ...) \
50 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
51 #else
52 #define DPRINTF(fmt, ...) \
53 do { } while (0)
54 #endif
56 #define MSR_KVM_WALL_CLOCK 0x11
57 #define MSR_KVM_SYSTEM_TIME 0x12
59 #ifndef BUS_MCEERR_AR
60 #define BUS_MCEERR_AR 4
61 #endif
62 #ifndef BUS_MCEERR_AO
63 #define BUS_MCEERR_AO 5
64 #endif
66 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
67 KVM_CAP_INFO(SET_TSS_ADDR),
68 KVM_CAP_INFO(EXT_CPUID),
69 KVM_CAP_INFO(MP_STATE),
70 KVM_CAP_LAST_INFO
73 static bool has_msr_star;
74 static bool has_msr_hsave_pa;
75 static bool has_msr_tsc_aux;
76 static bool has_msr_tsc_adjust;
77 static bool has_msr_tsc_deadline;
78 static bool has_msr_feature_control;
79 static bool has_msr_async_pf_en;
80 static bool has_msr_pv_eoi_en;
81 static bool has_msr_misc_enable;
82 static bool has_msr_smbase;
83 static bool has_msr_bndcfgs;
84 static bool has_msr_kvm_steal_time;
85 static int lm_capable_kernel;
86 static bool has_msr_hv_hypercall;
87 static bool has_msr_hv_vapic;
88 static bool has_msr_hv_tsc;
89 static bool has_msr_hv_crash;
90 static bool has_msr_hv_reset;
91 static bool has_msr_hv_vpindex;
92 static bool has_msr_hv_runtime;
93 static bool has_msr_hv_synic;
94 static bool has_msr_hv_stimer;
95 static bool has_msr_mtrr;
96 static bool has_msr_xss;
98 static bool has_msr_architectural_pmu;
99 static uint32_t num_architectural_pmu_counters;
101 static int has_xsave;
102 static int has_xcrs;
103 static int has_pit_state2;
105 int kvm_has_pit_state2(void)
107 return has_pit_state2;
110 bool kvm_has_smm(void)
112 return kvm_check_extension(kvm_state, KVM_CAP_X86_SMM);
115 bool kvm_allows_irq0_override(void)
117 return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
120 static int kvm_get_tsc(CPUState *cs)
122 X86CPU *cpu = X86_CPU(cs);
123 CPUX86State *env = &cpu->env;
124 struct {
125 struct kvm_msrs info;
126 struct kvm_msr_entry entries[1];
127 } msr_data;
128 int ret;
130 if (env->tsc_valid) {
131 return 0;
134 msr_data.info.nmsrs = 1;
135 msr_data.entries[0].index = MSR_IA32_TSC;
136 env->tsc_valid = !runstate_is_running();
138 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
139 if (ret < 0) {
140 return ret;
143 env->tsc = msr_data.entries[0].data;
144 return 0;
147 static inline void do_kvm_synchronize_tsc(void *arg)
149 CPUState *cpu = arg;
151 kvm_get_tsc(cpu);
154 void kvm_synchronize_all_tsc(void)
156 CPUState *cpu;
158 if (kvm_enabled()) {
159 CPU_FOREACH(cpu) {
160 run_on_cpu(cpu, do_kvm_synchronize_tsc, cpu);
165 static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
167 struct kvm_cpuid2 *cpuid;
168 int r, size;
170 size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
171 cpuid = g_malloc0(size);
172 cpuid->nent = max;
173 r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
174 if (r == 0 && cpuid->nent >= max) {
175 r = -E2BIG;
177 if (r < 0) {
178 if (r == -E2BIG) {
179 g_free(cpuid);
180 return NULL;
181 } else {
182 fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
183 strerror(-r));
184 exit(1);
187 return cpuid;
190 /* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
191 * for all entries.
193 static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s)
195 struct kvm_cpuid2 *cpuid;
196 int max = 1;
197 while ((cpuid = try_get_cpuid(s, max)) == NULL) {
198 max *= 2;
200 return cpuid;
203 static const struct kvm_para_features {
204 int cap;
205 int feature;
206 } para_features[] = {
207 { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
208 { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
209 { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
210 { KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF },
213 static int get_para_features(KVMState *s)
215 int i, features = 0;
217 for (i = 0; i < ARRAY_SIZE(para_features); i++) {
218 if (kvm_check_extension(s, para_features[i].cap)) {
219 features |= (1 << para_features[i].feature);
223 return features;
227 /* Returns the value for a specific register on the cpuid entry
229 static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
231 uint32_t ret = 0;
232 switch (reg) {
233 case R_EAX:
234 ret = entry->eax;
235 break;
236 case R_EBX:
237 ret = entry->ebx;
238 break;
239 case R_ECX:
240 ret = entry->ecx;
241 break;
242 case R_EDX:
243 ret = entry->edx;
244 break;
246 return ret;
249 /* Find matching entry for function/index on kvm_cpuid2 struct
251 static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
252 uint32_t function,
253 uint32_t index)
255 int i;
256 for (i = 0; i < cpuid->nent; ++i) {
257 if (cpuid->entries[i].function == function &&
258 cpuid->entries[i].index == index) {
259 return &cpuid->entries[i];
262 /* not found: */
263 return NULL;
266 uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
267 uint32_t index, int reg)
269 struct kvm_cpuid2 *cpuid;
270 uint32_t ret = 0;
271 uint32_t cpuid_1_edx;
272 bool found = false;
274 cpuid = get_supported_cpuid(s);
276 struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index);
277 if (entry) {
278 found = true;
279 ret = cpuid_entry_get_reg(entry, reg);
282 /* Fixups for the data returned by KVM, below */
284 if (function == 1 && reg == R_EDX) {
285 /* KVM before 2.6.30 misreports the following features */
286 ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
287 } else if (function == 1 && reg == R_ECX) {
288 /* We can set the hypervisor flag, even if KVM does not return it on
289 * GET_SUPPORTED_CPUID
291 ret |= CPUID_EXT_HYPERVISOR;
292 /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
293 * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
294 * and the irqchip is in the kernel.
296 if (kvm_irqchip_in_kernel() &&
297 kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
298 ret |= CPUID_EXT_TSC_DEADLINE_TIMER;
301 /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
302 * without the in-kernel irqchip
304 if (!kvm_irqchip_in_kernel()) {
305 ret &= ~CPUID_EXT_X2APIC;
307 } else if (function == 6 && reg == R_EAX) {
308 ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */
309 } else if (function == 0x80000001 && reg == R_EDX) {
310 /* On Intel, kvm returns cpuid according to the Intel spec,
311 * so add missing bits according to the AMD spec:
313 cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
314 ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
317 g_free(cpuid);
319 /* fallback for older kernels */
320 if ((function == KVM_CPUID_FEATURES) && !found) {
321 ret = get_para_features(s);
324 return ret;
327 typedef struct HWPoisonPage {
328 ram_addr_t ram_addr;
329 QLIST_ENTRY(HWPoisonPage) list;
330 } HWPoisonPage;
332 static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
333 QLIST_HEAD_INITIALIZER(hwpoison_page_list);
335 static void kvm_unpoison_all(void *param)
337 HWPoisonPage *page, *next_page;
339 QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
340 QLIST_REMOVE(page, list);
341 qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
342 g_free(page);
346 static void kvm_hwpoison_page_add(ram_addr_t ram_addr)
348 HWPoisonPage *page;
350 QLIST_FOREACH(page, &hwpoison_page_list, list) {
351 if (page->ram_addr == ram_addr) {
352 return;
355 page = g_new(HWPoisonPage, 1);
356 page->ram_addr = ram_addr;
357 QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
360 static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
361 int *max_banks)
363 int r;
365 r = kvm_check_extension(s, KVM_CAP_MCE);
366 if (r > 0) {
367 *max_banks = r;
368 return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
370 return -ENOSYS;
373 static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
375 CPUX86State *env = &cpu->env;
376 uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
377 MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
378 uint64_t mcg_status = MCG_STATUS_MCIP;
380 if (code == BUS_MCEERR_AR) {
381 status |= MCI_STATUS_AR | 0x134;
382 mcg_status |= MCG_STATUS_EIPV;
383 } else {
384 status |= 0xc0;
385 mcg_status |= MCG_STATUS_RIPV;
387 cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
388 (MCM_ADDR_PHYS << 6) | 0xc,
389 cpu_x86_support_mca_broadcast(env) ?
390 MCE_INJECT_BROADCAST : 0);
393 static void hardware_memory_error(void)
395 fprintf(stderr, "Hardware memory error!\n");
396 exit(1);
399 int kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
401 X86CPU *cpu = X86_CPU(c);
402 CPUX86State *env = &cpu->env;
403 ram_addr_t ram_addr;
404 hwaddr paddr;
406 if ((env->mcg_cap & MCG_SER_P) && addr
407 && (code == BUS_MCEERR_AR || code == BUS_MCEERR_AO)) {
408 if (qemu_ram_addr_from_host(addr, &ram_addr) == NULL ||
409 !kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
410 fprintf(stderr, "Hardware memory error for memory used by "
411 "QEMU itself instead of guest system!\n");
412 /* Hope we are lucky for AO MCE */
413 if (code == BUS_MCEERR_AO) {
414 return 0;
415 } else {
416 hardware_memory_error();
419 kvm_hwpoison_page_add(ram_addr);
420 kvm_mce_inject(cpu, paddr, code);
421 } else {
422 if (code == BUS_MCEERR_AO) {
423 return 0;
424 } else if (code == BUS_MCEERR_AR) {
425 hardware_memory_error();
426 } else {
427 return 1;
430 return 0;
433 int kvm_arch_on_sigbus(int code, void *addr)
435 X86CPU *cpu = X86_CPU(first_cpu);
437 if ((cpu->env.mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
438 ram_addr_t ram_addr;
439 hwaddr paddr;
441 /* Hope we are lucky for AO MCE */
442 if (qemu_ram_addr_from_host(addr, &ram_addr) == NULL ||
443 !kvm_physical_memory_addr_from_host(first_cpu->kvm_state,
444 addr, &paddr)) {
445 fprintf(stderr, "Hardware memory error for memory used by "
446 "QEMU itself instead of guest system!: %p\n", addr);
447 return 0;
449 kvm_hwpoison_page_add(ram_addr);
450 kvm_mce_inject(X86_CPU(first_cpu), paddr, code);
451 } else {
452 if (code == BUS_MCEERR_AO) {
453 return 0;
454 } else if (code == BUS_MCEERR_AR) {
455 hardware_memory_error();
456 } else {
457 return 1;
460 return 0;
463 static int kvm_inject_mce_oldstyle(X86CPU *cpu)
465 CPUX86State *env = &cpu->env;
467 if (!kvm_has_vcpu_events() && env->exception_injected == EXCP12_MCHK) {
468 unsigned int bank, bank_num = env->mcg_cap & 0xff;
469 struct kvm_x86_mce mce;
471 env->exception_injected = -1;
474 * There must be at least one bank in use if an MCE is pending.
475 * Find it and use its values for the event injection.
477 for (bank = 0; bank < bank_num; bank++) {
478 if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
479 break;
482 assert(bank < bank_num);
484 mce.bank = bank;
485 mce.status = env->mce_banks[bank * 4 + 1];
486 mce.mcg_status = env->mcg_status;
487 mce.addr = env->mce_banks[bank * 4 + 2];
488 mce.misc = env->mce_banks[bank * 4 + 3];
490 return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce);
492 return 0;
495 static void cpu_update_state(void *opaque, int running, RunState state)
497 CPUX86State *env = opaque;
499 if (running) {
500 env->tsc_valid = false;
504 unsigned long kvm_arch_vcpu_id(CPUState *cs)
506 X86CPU *cpu = X86_CPU(cs);
507 return cpu->apic_id;
510 #ifndef KVM_CPUID_SIGNATURE_NEXT
511 #define KVM_CPUID_SIGNATURE_NEXT 0x40000100
512 #endif
514 static bool hyperv_hypercall_available(X86CPU *cpu)
516 return cpu->hyperv_vapic ||
517 (cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY);
520 static bool hyperv_enabled(X86CPU *cpu)
522 CPUState *cs = CPU(cpu);
523 return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 &&
524 (hyperv_hypercall_available(cpu) ||
525 cpu->hyperv_time ||
526 cpu->hyperv_relaxed_timing ||
527 cpu->hyperv_crash ||
528 cpu->hyperv_reset ||
529 cpu->hyperv_vpindex ||
530 cpu->hyperv_runtime ||
531 cpu->hyperv_synic ||
532 cpu->hyperv_stimer);
535 static Error *invtsc_mig_blocker;
537 #define KVM_MAX_CPUID_ENTRIES 100
539 int kvm_arch_init_vcpu(CPUState *cs)
541 struct {
542 struct kvm_cpuid2 cpuid;
543 struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
544 } QEMU_PACKED cpuid_data;
545 X86CPU *cpu = X86_CPU(cs);
546 CPUX86State *env = &cpu->env;
547 uint32_t limit, i, j, cpuid_i;
548 uint32_t unused;
549 struct kvm_cpuid_entry2 *c;
550 uint32_t signature[3];
551 int kvm_base = KVM_CPUID_SIGNATURE;
552 int r;
554 memset(&cpuid_data, 0, sizeof(cpuid_data));
556 cpuid_i = 0;
558 /* Paravirtualization CPUIDs */
559 if (hyperv_enabled(cpu)) {
560 c = &cpuid_data.entries[cpuid_i++];
561 c->function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
562 if (!cpu->hyperv_vendor_id) {
563 memcpy(signature, "Microsoft Hv", 12);
564 } else {
565 size_t len = strlen(cpu->hyperv_vendor_id);
567 if (len > 12) {
568 error_report("hv-vendor-id truncated to 12 characters");
569 len = 12;
571 memset(signature, 0, 12);
572 memcpy(signature, cpu->hyperv_vendor_id, len);
574 c->eax = HYPERV_CPUID_MIN;
575 c->ebx = signature[0];
576 c->ecx = signature[1];
577 c->edx = signature[2];
579 c = &cpuid_data.entries[cpuid_i++];
580 c->function = HYPERV_CPUID_INTERFACE;
581 memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
582 c->eax = signature[0];
583 c->ebx = 0;
584 c->ecx = 0;
585 c->edx = 0;
587 c = &cpuid_data.entries[cpuid_i++];
588 c->function = HYPERV_CPUID_VERSION;
589 c->eax = 0x00001bbc;
590 c->ebx = 0x00060001;
592 c = &cpuid_data.entries[cpuid_i++];
593 c->function = HYPERV_CPUID_FEATURES;
594 if (cpu->hyperv_relaxed_timing) {
595 c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
597 if (cpu->hyperv_vapic) {
598 c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
599 c->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
600 has_msr_hv_vapic = true;
602 if (cpu->hyperv_time &&
603 kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) {
604 c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
605 c->eax |= HV_X64_MSR_TIME_REF_COUNT_AVAILABLE;
606 c->eax |= 0x200;
607 has_msr_hv_tsc = true;
609 if (cpu->hyperv_crash && has_msr_hv_crash) {
610 c->edx |= HV_X64_GUEST_CRASH_MSR_AVAILABLE;
612 if (cpu->hyperv_reset && has_msr_hv_reset) {
613 c->eax |= HV_X64_MSR_RESET_AVAILABLE;
615 if (cpu->hyperv_vpindex && has_msr_hv_vpindex) {
616 c->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE;
618 if (cpu->hyperv_runtime && has_msr_hv_runtime) {
619 c->eax |= HV_X64_MSR_VP_RUNTIME_AVAILABLE;
621 if (cpu->hyperv_synic) {
622 int sint;
624 if (!has_msr_hv_synic ||
625 kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_SYNIC, 0)) {
626 fprintf(stderr, "Hyper-V SynIC is not supported by kernel\n");
627 return -ENOSYS;
630 c->eax |= HV_X64_MSR_SYNIC_AVAILABLE;
631 env->msr_hv_synic_version = HV_SYNIC_VERSION_1;
632 for (sint = 0; sint < ARRAY_SIZE(env->msr_hv_synic_sint); sint++) {
633 env->msr_hv_synic_sint[sint] = HV_SYNIC_SINT_MASKED;
636 if (cpu->hyperv_stimer) {
637 if (!has_msr_hv_stimer) {
638 fprintf(stderr, "Hyper-V timers aren't supported by kernel\n");
639 return -ENOSYS;
641 c->eax |= HV_X64_MSR_SYNTIMER_AVAILABLE;
643 c = &cpuid_data.entries[cpuid_i++];
644 c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;
645 if (cpu->hyperv_relaxed_timing) {
646 c->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
648 if (has_msr_hv_vapic) {
649 c->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
651 c->ebx = cpu->hyperv_spinlock_attempts;
653 c = &cpuid_data.entries[cpuid_i++];
654 c->function = HYPERV_CPUID_IMPLEMENT_LIMITS;
655 c->eax = 0x40;
656 c->ebx = 0x40;
658 kvm_base = KVM_CPUID_SIGNATURE_NEXT;
659 has_msr_hv_hypercall = true;
662 if (cpu->expose_kvm) {
663 memcpy(signature, "KVMKVMKVM\0\0\0", 12);
664 c = &cpuid_data.entries[cpuid_i++];
665 c->function = KVM_CPUID_SIGNATURE | kvm_base;
666 c->eax = KVM_CPUID_FEATURES | kvm_base;
667 c->ebx = signature[0];
668 c->ecx = signature[1];
669 c->edx = signature[2];
671 c = &cpuid_data.entries[cpuid_i++];
672 c->function = KVM_CPUID_FEATURES | kvm_base;
673 c->eax = env->features[FEAT_KVM];
675 has_msr_async_pf_en = c->eax & (1 << KVM_FEATURE_ASYNC_PF);
677 has_msr_pv_eoi_en = c->eax & (1 << KVM_FEATURE_PV_EOI);
679 has_msr_kvm_steal_time = c->eax & (1 << KVM_FEATURE_STEAL_TIME);
682 cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
684 for (i = 0; i <= limit; i++) {
685 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
686 fprintf(stderr, "unsupported level value: 0x%x\n", limit);
687 abort();
689 c = &cpuid_data.entries[cpuid_i++];
691 switch (i) {
692 case 2: {
693 /* Keep reading function 2 till all the input is received */
694 int times;
696 c->function = i;
697 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
698 KVM_CPUID_FLAG_STATE_READ_NEXT;
699 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
700 times = c->eax & 0xff;
702 for (j = 1; j < times; ++j) {
703 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
704 fprintf(stderr, "cpuid_data is full, no space for "
705 "cpuid(eax:2):eax & 0xf = 0x%x\n", times);
706 abort();
708 c = &cpuid_data.entries[cpuid_i++];
709 c->function = i;
710 c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
711 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
713 break;
715 case 4:
716 case 0xb:
717 case 0xd:
718 for (j = 0; ; j++) {
719 if (i == 0xd && j == 64) {
720 break;
722 c->function = i;
723 c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
724 c->index = j;
725 cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
727 if (i == 4 && c->eax == 0) {
728 break;
730 if (i == 0xb && !(c->ecx & 0xff00)) {
731 break;
733 if (i == 0xd && c->eax == 0) {
734 continue;
736 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
737 fprintf(stderr, "cpuid_data is full, no space for "
738 "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
739 abort();
741 c = &cpuid_data.entries[cpuid_i++];
743 break;
744 default:
745 c->function = i;
746 c->flags = 0;
747 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
748 break;
752 if (limit >= 0x0a) {
753 uint32_t ver;
755 cpu_x86_cpuid(env, 0x0a, 0, &ver, &unused, &unused, &unused);
756 if ((ver & 0xff) > 0) {
757 has_msr_architectural_pmu = true;
758 num_architectural_pmu_counters = (ver & 0xff00) >> 8;
760 /* Shouldn't be more than 32, since that's the number of bits
761 * available in EBX to tell us _which_ counters are available.
762 * Play it safe.
764 if (num_architectural_pmu_counters > MAX_GP_COUNTERS) {
765 num_architectural_pmu_counters = MAX_GP_COUNTERS;
770 cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
772 for (i = 0x80000000; i <= limit; i++) {
773 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
774 fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
775 abort();
777 c = &cpuid_data.entries[cpuid_i++];
779 c->function = i;
780 c->flags = 0;
781 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
784 /* Call Centaur's CPUID instructions they are supported. */
785 if (env->cpuid_xlevel2 > 0) {
786 cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
788 for (i = 0xC0000000; i <= limit; i++) {
789 if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
790 fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
791 abort();
793 c = &cpuid_data.entries[cpuid_i++];
795 c->function = i;
796 c->flags = 0;
797 cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
801 cpuid_data.cpuid.nent = cpuid_i;
803 if (((env->cpuid_version >> 8)&0xF) >= 6
804 && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
805 (CPUID_MCE | CPUID_MCA)
806 && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) {
807 uint64_t mcg_cap, unsupported_caps;
808 int banks;
809 int ret;
811 ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks);
812 if (ret < 0) {
813 fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
814 return ret;
817 if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) {
818 error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
819 (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks);
820 return -ENOTSUP;
823 unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK);
824 if (unsupported_caps) {
825 error_report("warning: Unsupported MCG_CAP bits: 0x%" PRIx64,
826 unsupported_caps);
829 env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK;
830 ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap);
831 if (ret < 0) {
832 fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
833 return ret;
837 qemu_add_vm_change_state_handler(cpu_update_state, env);
839 c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0);
840 if (c) {
841 has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) ||
842 !!(c->ecx & CPUID_EXT_SMX);
845 c = cpuid_find_entry(&cpuid_data.cpuid, 0x80000007, 0);
846 if (c && (c->edx & 1<<8) && invtsc_mig_blocker == NULL) {
847 /* for migration */
848 error_setg(&invtsc_mig_blocker,
849 "State blocked by non-migratable CPU device"
850 " (invtsc flag)");
851 migrate_add_blocker(invtsc_mig_blocker);
852 /* for savevm */
853 vmstate_x86_cpu.unmigratable = 1;
856 cpuid_data.cpuid.padding = 0;
857 r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
858 if (r) {
859 return r;
862 r = kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL);
863 if (r && env->tsc_khz) {
864 r = kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz);
865 if (r < 0) {
866 fprintf(stderr, "KVM_SET_TSC_KHZ failed\n");
867 return r;
871 if (has_xsave) {
872 env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
875 if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
876 has_msr_mtrr = true;
879 return 0;
882 void kvm_arch_reset_vcpu(X86CPU *cpu)
884 CPUX86State *env = &cpu->env;
886 env->exception_injected = -1;
887 env->interrupt_injected = -1;
888 env->xcr0 = 1;
889 if (kvm_irqchip_in_kernel()) {
890 env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
891 KVM_MP_STATE_UNINITIALIZED;
892 } else {
893 env->mp_state = KVM_MP_STATE_RUNNABLE;
897 void kvm_arch_do_init_vcpu(X86CPU *cpu)
899 CPUX86State *env = &cpu->env;
901 /* APs get directly into wait-for-SIPI state. */
902 if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) {
903 env->mp_state = KVM_MP_STATE_INIT_RECEIVED;
907 static int kvm_get_supported_msrs(KVMState *s)
909 static int kvm_supported_msrs;
910 int ret = 0;
912 /* first time */
913 if (kvm_supported_msrs == 0) {
914 struct kvm_msr_list msr_list, *kvm_msr_list;
916 kvm_supported_msrs = -1;
918 /* Obtain MSR list from KVM. These are the MSRs that we must
919 * save/restore */
920 msr_list.nmsrs = 0;
921 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
922 if (ret < 0 && ret != -E2BIG) {
923 return ret;
925 /* Old kernel modules had a bug and could write beyond the provided
926 memory. Allocate at least a safe amount of 1K. */
927 kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
928 msr_list.nmsrs *
929 sizeof(msr_list.indices[0])));
931 kvm_msr_list->nmsrs = msr_list.nmsrs;
932 ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
933 if (ret >= 0) {
934 int i;
936 for (i = 0; i < kvm_msr_list->nmsrs; i++) {
937 if (kvm_msr_list->indices[i] == MSR_STAR) {
938 has_msr_star = true;
939 continue;
941 if (kvm_msr_list->indices[i] == MSR_VM_HSAVE_PA) {
942 has_msr_hsave_pa = true;
943 continue;
945 if (kvm_msr_list->indices[i] == MSR_TSC_AUX) {
946 has_msr_tsc_aux = true;
947 continue;
949 if (kvm_msr_list->indices[i] == MSR_TSC_ADJUST) {
950 has_msr_tsc_adjust = true;
951 continue;
953 if (kvm_msr_list->indices[i] == MSR_IA32_TSCDEADLINE) {
954 has_msr_tsc_deadline = true;
955 continue;
957 if (kvm_msr_list->indices[i] == MSR_IA32_SMBASE) {
958 has_msr_smbase = true;
959 continue;
961 if (kvm_msr_list->indices[i] == MSR_IA32_MISC_ENABLE) {
962 has_msr_misc_enable = true;
963 continue;
965 if (kvm_msr_list->indices[i] == MSR_IA32_BNDCFGS) {
966 has_msr_bndcfgs = true;
967 continue;
969 if (kvm_msr_list->indices[i] == MSR_IA32_XSS) {
970 has_msr_xss = true;
971 continue;
973 if (kvm_msr_list->indices[i] == HV_X64_MSR_CRASH_CTL) {
974 has_msr_hv_crash = true;
975 continue;
977 if (kvm_msr_list->indices[i] == HV_X64_MSR_RESET) {
978 has_msr_hv_reset = true;
979 continue;
981 if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_INDEX) {
982 has_msr_hv_vpindex = true;
983 continue;
985 if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_RUNTIME) {
986 has_msr_hv_runtime = true;
987 continue;
989 if (kvm_msr_list->indices[i] == HV_X64_MSR_SCONTROL) {
990 has_msr_hv_synic = true;
991 continue;
993 if (kvm_msr_list->indices[i] == HV_X64_MSR_STIMER0_CONFIG) {
994 has_msr_hv_stimer = true;
995 continue;
1000 g_free(kvm_msr_list);
1003 return ret;
1006 static Notifier smram_machine_done;
1007 static KVMMemoryListener smram_listener;
1008 static AddressSpace smram_address_space;
1009 static MemoryRegion smram_as_root;
1010 static MemoryRegion smram_as_mem;
1012 static void register_smram_listener(Notifier *n, void *unused)
1014 MemoryRegion *smram =
1015 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
1017 /* Outer container... */
1018 memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull);
1019 memory_region_set_enabled(&smram_as_root, true);
1021 /* ... with two regions inside: normal system memory with low
1022 * priority, and...
1024 memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram",
1025 get_system_memory(), 0, ~0ull);
1026 memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0);
1027 memory_region_set_enabled(&smram_as_mem, true);
1029 if (smram) {
1030 /* ... SMRAM with higher priority */
1031 memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10);
1032 memory_region_set_enabled(smram, true);
1035 address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM");
1036 kvm_memory_listener_register(kvm_state, &smram_listener,
1037 &smram_address_space, 1);
1040 int kvm_arch_init(MachineState *ms, KVMState *s)
1042 uint64_t identity_base = 0xfffbc000;
1043 uint64_t shadow_mem;
1044 int ret;
1045 struct utsname utsname;
1047 #ifdef KVM_CAP_XSAVE
1048 has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
1049 #endif
1051 #ifdef KVM_CAP_XCRS
1052 has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
1053 #endif
1055 #ifdef KVM_CAP_PIT_STATE2
1056 has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
1057 #endif
1059 ret = kvm_get_supported_msrs(s);
1060 if (ret < 0) {
1061 return ret;
1064 uname(&utsname);
1065 lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
1068 * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
1069 * In order to use vm86 mode, an EPT identity map and a TSS are needed.
1070 * Since these must be part of guest physical memory, we need to allocate
1071 * them, both by setting their start addresses in the kernel and by
1072 * creating a corresponding e820 entry. We need 4 pages before the BIOS.
1074 * Older KVM versions may not support setting the identity map base. In
1075 * that case we need to stick with the default, i.e. a 256K maximum BIOS
1076 * size.
1078 if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
1079 /* Allows up to 16M BIOSes. */
1080 identity_base = 0xfeffc000;
1082 ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
1083 if (ret < 0) {
1084 return ret;
1088 /* Set TSS base one page after EPT identity map. */
1089 ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
1090 if (ret < 0) {
1091 return ret;
1094 /* Tell fw_cfg to notify the BIOS to reserve the range. */
1095 ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
1096 if (ret < 0) {
1097 fprintf(stderr, "e820_add_entry() table is full\n");
1098 return ret;
1100 qemu_register_reset(kvm_unpoison_all, NULL);
1102 shadow_mem = machine_kvm_shadow_mem(ms);
1103 if (shadow_mem != -1) {
1104 shadow_mem /= 4096;
1105 ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
1106 if (ret < 0) {
1107 return ret;
1111 if (kvm_check_extension(s, KVM_CAP_X86_SMM)) {
1112 smram_machine_done.notify = register_smram_listener;
1113 qemu_add_machine_init_done_notifier(&smram_machine_done);
1115 return 0;
1118 static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
1120 lhs->selector = rhs->selector;
1121 lhs->base = rhs->base;
1122 lhs->limit = rhs->limit;
1123 lhs->type = 3;
1124 lhs->present = 1;
1125 lhs->dpl = 3;
1126 lhs->db = 0;
1127 lhs->s = 1;
1128 lhs->l = 0;
1129 lhs->g = 0;
1130 lhs->avl = 0;
1131 lhs->unusable = 0;
1134 static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
1136 unsigned flags = rhs->flags;
1137 lhs->selector = rhs->selector;
1138 lhs->base = rhs->base;
1139 lhs->limit = rhs->limit;
1140 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
1141 lhs->present = (flags & DESC_P_MASK) != 0;
1142 lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
1143 lhs->db = (flags >> DESC_B_SHIFT) & 1;
1144 lhs->s = (flags & DESC_S_MASK) != 0;
1145 lhs->l = (flags >> DESC_L_SHIFT) & 1;
1146 lhs->g = (flags & DESC_G_MASK) != 0;
1147 lhs->avl = (flags & DESC_AVL_MASK) != 0;
1148 lhs->unusable = !lhs->present;
1149 lhs->padding = 0;
1152 static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
1154 lhs->selector = rhs->selector;
1155 lhs->base = rhs->base;
1156 lhs->limit = rhs->limit;
1157 if (rhs->unusable) {
1158 lhs->flags = 0;
1159 } else {
1160 lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
1161 (rhs->present * DESC_P_MASK) |
1162 (rhs->dpl << DESC_DPL_SHIFT) |
1163 (rhs->db << DESC_B_SHIFT) |
1164 (rhs->s * DESC_S_MASK) |
1165 (rhs->l << DESC_L_SHIFT) |
1166 (rhs->g * DESC_G_MASK) |
1167 (rhs->avl * DESC_AVL_MASK);
1171 static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
1173 if (set) {
1174 *kvm_reg = *qemu_reg;
1175 } else {
1176 *qemu_reg = *kvm_reg;
1180 static int kvm_getput_regs(X86CPU *cpu, int set)
1182 CPUX86State *env = &cpu->env;
1183 struct kvm_regs regs;
1184 int ret = 0;
1186 if (!set) {
1187 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, &regs);
1188 if (ret < 0) {
1189 return ret;
1193 kvm_getput_reg(&regs.rax, &env->regs[R_EAX], set);
1194 kvm_getput_reg(&regs.rbx, &env->regs[R_EBX], set);
1195 kvm_getput_reg(&regs.rcx, &env->regs[R_ECX], set);
1196 kvm_getput_reg(&regs.rdx, &env->regs[R_EDX], set);
1197 kvm_getput_reg(&regs.rsi, &env->regs[R_ESI], set);
1198 kvm_getput_reg(&regs.rdi, &env->regs[R_EDI], set);
1199 kvm_getput_reg(&regs.rsp, &env->regs[R_ESP], set);
1200 kvm_getput_reg(&regs.rbp, &env->regs[R_EBP], set);
1201 #ifdef TARGET_X86_64
1202 kvm_getput_reg(&regs.r8, &env->regs[8], set);
1203 kvm_getput_reg(&regs.r9, &env->regs[9], set);
1204 kvm_getput_reg(&regs.r10, &env->regs[10], set);
1205 kvm_getput_reg(&regs.r11, &env->regs[11], set);
1206 kvm_getput_reg(&regs.r12, &env->regs[12], set);
1207 kvm_getput_reg(&regs.r13, &env->regs[13], set);
1208 kvm_getput_reg(&regs.r14, &env->regs[14], set);
1209 kvm_getput_reg(&regs.r15, &env->regs[15], set);
1210 #endif
1212 kvm_getput_reg(&regs.rflags, &env->eflags, set);
1213 kvm_getput_reg(&regs.rip, &env->eip, set);
1215 if (set) {
1216 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, &regs);
1219 return ret;
1222 static int kvm_put_fpu(X86CPU *cpu)
1224 CPUX86State *env = &cpu->env;
1225 struct kvm_fpu fpu;
1226 int i;
1228 memset(&fpu, 0, sizeof fpu);
1229 fpu.fsw = env->fpus & ~(7 << 11);
1230 fpu.fsw |= (env->fpstt & 7) << 11;
1231 fpu.fcw = env->fpuc;
1232 fpu.last_opcode = env->fpop;
1233 fpu.last_ip = env->fpip;
1234 fpu.last_dp = env->fpdp;
1235 for (i = 0; i < 8; ++i) {
1236 fpu.ftwx |= (!env->fptags[i]) << i;
1238 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
1239 for (i = 0; i < CPU_NB_REGS; i++) {
1240 stq_p(&fpu.xmm[i][0], env->xmm_regs[i].XMM_Q(0));
1241 stq_p(&fpu.xmm[i][8], env->xmm_regs[i].XMM_Q(1));
1243 fpu.mxcsr = env->mxcsr;
1245 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu);
1248 #define XSAVE_FCW_FSW 0
1249 #define XSAVE_FTW_FOP 1
1250 #define XSAVE_CWD_RIP 2
1251 #define XSAVE_CWD_RDP 4
1252 #define XSAVE_MXCSR 6
1253 #define XSAVE_ST_SPACE 8
1254 #define XSAVE_XMM_SPACE 40
1255 #define XSAVE_XSTATE_BV 128
1256 #define XSAVE_YMMH_SPACE 144
1257 #define XSAVE_BNDREGS 240
1258 #define XSAVE_BNDCSR 256
1259 #define XSAVE_OPMASK 272
1260 #define XSAVE_ZMM_Hi256 288
1261 #define XSAVE_Hi16_ZMM 416
1263 static int kvm_put_xsave(X86CPU *cpu)
1265 CPUX86State *env = &cpu->env;
1266 struct kvm_xsave* xsave = env->kvm_xsave_buf;
1267 uint16_t cwd, swd, twd;
1268 uint8_t *xmm, *ymmh, *zmmh;
1269 int i, r;
1271 if (!has_xsave) {
1272 return kvm_put_fpu(cpu);
1275 memset(xsave, 0, sizeof(struct kvm_xsave));
1276 twd = 0;
1277 swd = env->fpus & ~(7 << 11);
1278 swd |= (env->fpstt & 7) << 11;
1279 cwd = env->fpuc;
1280 for (i = 0; i < 8; ++i) {
1281 twd |= (!env->fptags[i]) << i;
1283 xsave->region[XSAVE_FCW_FSW] = (uint32_t)(swd << 16) + cwd;
1284 xsave->region[XSAVE_FTW_FOP] = (uint32_t)(env->fpop << 16) + twd;
1285 memcpy(&xsave->region[XSAVE_CWD_RIP], &env->fpip, sizeof(env->fpip));
1286 memcpy(&xsave->region[XSAVE_CWD_RDP], &env->fpdp, sizeof(env->fpdp));
1287 memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs,
1288 sizeof env->fpregs);
1289 xsave->region[XSAVE_MXCSR] = env->mxcsr;
1290 *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv;
1291 memcpy(&xsave->region[XSAVE_BNDREGS], env->bnd_regs,
1292 sizeof env->bnd_regs);
1293 memcpy(&xsave->region[XSAVE_BNDCSR], &env->bndcs_regs,
1294 sizeof(env->bndcs_regs));
1295 memcpy(&xsave->region[XSAVE_OPMASK], env->opmask_regs,
1296 sizeof env->opmask_regs);
1298 xmm = (uint8_t *)&xsave->region[XSAVE_XMM_SPACE];
1299 ymmh = (uint8_t *)&xsave->region[XSAVE_YMMH_SPACE];
1300 zmmh = (uint8_t *)&xsave->region[XSAVE_ZMM_Hi256];
1301 for (i = 0; i < CPU_NB_REGS; i++, xmm += 16, ymmh += 16, zmmh += 32) {
1302 stq_p(xmm, env->xmm_regs[i].XMM_Q(0));
1303 stq_p(xmm+8, env->xmm_regs[i].XMM_Q(1));
1304 stq_p(ymmh, env->xmm_regs[i].XMM_Q(2));
1305 stq_p(ymmh+8, env->xmm_regs[i].XMM_Q(3));
1306 stq_p(zmmh, env->xmm_regs[i].XMM_Q(4));
1307 stq_p(zmmh+8, env->xmm_regs[i].XMM_Q(5));
1308 stq_p(zmmh+16, env->xmm_regs[i].XMM_Q(6));
1309 stq_p(zmmh+24, env->xmm_regs[i].XMM_Q(7));
1312 #ifdef TARGET_X86_64
1313 memcpy(&xsave->region[XSAVE_Hi16_ZMM], &env->xmm_regs[16],
1314 16 * sizeof env->xmm_regs[16]);
1315 #endif
1316 r = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
1317 return r;
1320 static int kvm_put_xcrs(X86CPU *cpu)
1322 CPUX86State *env = &cpu->env;
1323 struct kvm_xcrs xcrs = {};
1325 if (!has_xcrs) {
1326 return 0;
1329 xcrs.nr_xcrs = 1;
1330 xcrs.flags = 0;
1331 xcrs.xcrs[0].xcr = 0;
1332 xcrs.xcrs[0].value = env->xcr0;
1333 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs);
1336 static int kvm_put_sregs(X86CPU *cpu)
1338 CPUX86State *env = &cpu->env;
1339 struct kvm_sregs sregs;
1341 memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
1342 if (env->interrupt_injected >= 0) {
1343 sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
1344 (uint64_t)1 << (env->interrupt_injected % 64);
1347 if ((env->eflags & VM_MASK)) {
1348 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
1349 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
1350 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
1351 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
1352 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
1353 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
1354 } else {
1355 set_seg(&sregs.cs, &env->segs[R_CS]);
1356 set_seg(&sregs.ds, &env->segs[R_DS]);
1357 set_seg(&sregs.es, &env->segs[R_ES]);
1358 set_seg(&sregs.fs, &env->segs[R_FS]);
1359 set_seg(&sregs.gs, &env->segs[R_GS]);
1360 set_seg(&sregs.ss, &env->segs[R_SS]);
1363 set_seg(&sregs.tr, &env->tr);
1364 set_seg(&sregs.ldt, &env->ldt);
1366 sregs.idt.limit = env->idt.limit;
1367 sregs.idt.base = env->idt.base;
1368 memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
1369 sregs.gdt.limit = env->gdt.limit;
1370 sregs.gdt.base = env->gdt.base;
1371 memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
1373 sregs.cr0 = env->cr[0];
1374 sregs.cr2 = env->cr[2];
1375 sregs.cr3 = env->cr[3];
1376 sregs.cr4 = env->cr[4];
1378 sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
1379 sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
1381 sregs.efer = env->efer;
1383 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
1386 static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
1387 uint32_t index, uint64_t value)
1389 entry->index = index;
1390 entry->reserved = 0;
1391 entry->data = value;
1394 static int kvm_put_tscdeadline_msr(X86CPU *cpu)
1396 CPUX86State *env = &cpu->env;
1397 struct {
1398 struct kvm_msrs info;
1399 struct kvm_msr_entry entries[1];
1400 } msr_data;
1401 struct kvm_msr_entry *msrs = msr_data.entries;
1403 if (!has_msr_tsc_deadline) {
1404 return 0;
1407 kvm_msr_entry_set(&msrs[0], MSR_IA32_TSCDEADLINE, env->tsc_deadline);
1409 msr_data.info = (struct kvm_msrs) {
1410 .nmsrs = 1,
1413 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
1417 * Provide a separate write service for the feature control MSR in order to
1418 * kick the VCPU out of VMXON or even guest mode on reset. This has to be done
1419 * before writing any other state because forcibly leaving nested mode
1420 * invalidates the VCPU state.
1422 static int kvm_put_msr_feature_control(X86CPU *cpu)
1424 struct {
1425 struct kvm_msrs info;
1426 struct kvm_msr_entry entry;
1427 } msr_data;
1429 kvm_msr_entry_set(&msr_data.entry, MSR_IA32_FEATURE_CONTROL,
1430 cpu->env.msr_ia32_feature_control);
1432 msr_data.info = (struct kvm_msrs) {
1433 .nmsrs = 1,
1436 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
1439 static int kvm_put_msrs(X86CPU *cpu, int level)
1441 CPUX86State *env = &cpu->env;
1442 struct {
1443 struct kvm_msrs info;
1444 struct kvm_msr_entry entries[150];
1445 } msr_data;
1446 struct kvm_msr_entry *msrs = msr_data.entries;
1447 int n = 0, i;
1449 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
1450 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
1451 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
1452 kvm_msr_entry_set(&msrs[n++], MSR_PAT, env->pat);
1453 if (has_msr_star) {
1454 kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
1456 if (has_msr_hsave_pa) {
1457 kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave);
1459 if (has_msr_tsc_aux) {
1460 kvm_msr_entry_set(&msrs[n++], MSR_TSC_AUX, env->tsc_aux);
1462 if (has_msr_tsc_adjust) {
1463 kvm_msr_entry_set(&msrs[n++], MSR_TSC_ADJUST, env->tsc_adjust);
1465 if (has_msr_misc_enable) {
1466 kvm_msr_entry_set(&msrs[n++], MSR_IA32_MISC_ENABLE,
1467 env->msr_ia32_misc_enable);
1469 if (has_msr_smbase) {
1470 kvm_msr_entry_set(&msrs[n++], MSR_IA32_SMBASE, env->smbase);
1472 if (has_msr_bndcfgs) {
1473 kvm_msr_entry_set(&msrs[n++], MSR_IA32_BNDCFGS, env->msr_bndcfgs);
1475 if (has_msr_xss) {
1476 kvm_msr_entry_set(&msrs[n++], MSR_IA32_XSS, env->xss);
1478 #ifdef TARGET_X86_64
1479 if (lm_capable_kernel) {
1480 kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
1481 kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
1482 kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
1483 kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
1485 #endif
1487 * The following MSRs have side effects on the guest or are too heavy
1488 * for normal writeback. Limit them to reset or full state updates.
1490 if (level >= KVM_PUT_RESET_STATE) {
1491 kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
1492 kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME,
1493 env->system_time_msr);
1494 kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
1495 if (has_msr_async_pf_en) {
1496 kvm_msr_entry_set(&msrs[n++], MSR_KVM_ASYNC_PF_EN,
1497 env->async_pf_en_msr);
1499 if (has_msr_pv_eoi_en) {
1500 kvm_msr_entry_set(&msrs[n++], MSR_KVM_PV_EOI_EN,
1501 env->pv_eoi_en_msr);
1503 if (has_msr_kvm_steal_time) {
1504 kvm_msr_entry_set(&msrs[n++], MSR_KVM_STEAL_TIME,
1505 env->steal_time_msr);
1507 if (has_msr_architectural_pmu) {
1508 /* Stop the counter. */
1509 kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
1510 kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_CTRL, 0);
1512 /* Set the counter values. */
1513 for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
1514 kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_FIXED_CTR0 + i,
1515 env->msr_fixed_counters[i]);
1517 for (i = 0; i < num_architectural_pmu_counters; i++) {
1518 kvm_msr_entry_set(&msrs[n++], MSR_P6_PERFCTR0 + i,
1519 env->msr_gp_counters[i]);
1520 kvm_msr_entry_set(&msrs[n++], MSR_P6_EVNTSEL0 + i,
1521 env->msr_gp_evtsel[i]);
1523 kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_STATUS,
1524 env->msr_global_status);
1525 kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_OVF_CTRL,
1526 env->msr_global_ovf_ctrl);
1528 /* Now start the PMU. */
1529 kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_FIXED_CTR_CTRL,
1530 env->msr_fixed_ctr_ctrl);
1531 kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_CTRL,
1532 env->msr_global_ctrl);
1534 if (has_msr_hv_hypercall) {
1535 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_GUEST_OS_ID,
1536 env->msr_hv_guest_os_id);
1537 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_HYPERCALL,
1538 env->msr_hv_hypercall);
1540 if (has_msr_hv_vapic) {
1541 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_APIC_ASSIST_PAGE,
1542 env->msr_hv_vapic);
1544 if (has_msr_hv_tsc) {
1545 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_REFERENCE_TSC,
1546 env->msr_hv_tsc);
1548 if (has_msr_hv_crash) {
1549 int j;
1551 for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++)
1552 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_CRASH_P0 + j,
1553 env->msr_hv_crash_params[j]);
1555 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_CRASH_CTL,
1556 HV_X64_MSR_CRASH_CTL_NOTIFY);
1558 if (has_msr_hv_runtime) {
1559 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_VP_RUNTIME,
1560 env->msr_hv_runtime);
1562 if (cpu->hyperv_synic) {
1563 int j;
1565 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SCONTROL,
1566 env->msr_hv_synic_control);
1567 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SVERSION,
1568 env->msr_hv_synic_version);
1569 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SIEFP,
1570 env->msr_hv_synic_evt_page);
1571 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SIMP,
1572 env->msr_hv_synic_msg_page);
1574 for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
1575 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SINT0 + j,
1576 env->msr_hv_synic_sint[j]);
1579 if (has_msr_hv_stimer) {
1580 int j;
1582 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) {
1583 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_STIMER0_CONFIG + j*2,
1584 env->msr_hv_stimer_config[j]);
1587 for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) {
1588 kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_STIMER0_COUNT + j*2,
1589 env->msr_hv_stimer_count[j]);
1592 if (has_msr_mtrr) {
1593 kvm_msr_entry_set(&msrs[n++], MSR_MTRRdefType, env->mtrr_deftype);
1594 kvm_msr_entry_set(&msrs[n++],
1595 MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
1596 kvm_msr_entry_set(&msrs[n++],
1597 MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
1598 kvm_msr_entry_set(&msrs[n++],
1599 MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
1600 kvm_msr_entry_set(&msrs[n++],
1601 MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
1602 kvm_msr_entry_set(&msrs[n++],
1603 MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
1604 kvm_msr_entry_set(&msrs[n++],
1605 MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
1606 kvm_msr_entry_set(&msrs[n++],
1607 MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
1608 kvm_msr_entry_set(&msrs[n++],
1609 MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
1610 kvm_msr_entry_set(&msrs[n++],
1611 MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
1612 kvm_msr_entry_set(&msrs[n++],
1613 MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
1614 kvm_msr_entry_set(&msrs[n++],
1615 MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
1616 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
1617 kvm_msr_entry_set(&msrs[n++],
1618 MSR_MTRRphysBase(i), env->mtrr_var[i].base);
1619 kvm_msr_entry_set(&msrs[n++],
1620 MSR_MTRRphysMask(i), env->mtrr_var[i].mask);
1624 /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
1625 * kvm_put_msr_feature_control. */
1627 if (env->mcg_cap) {
1628 int i;
1630 kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
1631 kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl);
1632 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
1633 kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]);
1637 msr_data.info = (struct kvm_msrs) {
1638 .nmsrs = n,
1641 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
1646 static int kvm_get_fpu(X86CPU *cpu)
1648 CPUX86State *env = &cpu->env;
1649 struct kvm_fpu fpu;
1650 int i, ret;
1652 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu);
1653 if (ret < 0) {
1654 return ret;
1657 env->fpstt = (fpu.fsw >> 11) & 7;
1658 env->fpus = fpu.fsw;
1659 env->fpuc = fpu.fcw;
1660 env->fpop = fpu.last_opcode;
1661 env->fpip = fpu.last_ip;
1662 env->fpdp = fpu.last_dp;
1663 for (i = 0; i < 8; ++i) {
1664 env->fptags[i] = !((fpu.ftwx >> i) & 1);
1666 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
1667 for (i = 0; i < CPU_NB_REGS; i++) {
1668 env->xmm_regs[i].XMM_Q(0) = ldq_p(&fpu.xmm[i][0]);
1669 env->xmm_regs[i].XMM_Q(1) = ldq_p(&fpu.xmm[i][8]);
1671 env->mxcsr = fpu.mxcsr;
1673 return 0;
1676 static int kvm_get_xsave(X86CPU *cpu)
1678 CPUX86State *env = &cpu->env;
1679 struct kvm_xsave* xsave = env->kvm_xsave_buf;
1680 int ret, i;
1681 const uint8_t *xmm, *ymmh, *zmmh;
1682 uint16_t cwd, swd, twd;
1684 if (!has_xsave) {
1685 return kvm_get_fpu(cpu);
1688 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave);
1689 if (ret < 0) {
1690 return ret;
1693 cwd = (uint16_t)xsave->region[XSAVE_FCW_FSW];
1694 swd = (uint16_t)(xsave->region[XSAVE_FCW_FSW] >> 16);
1695 twd = (uint16_t)xsave->region[XSAVE_FTW_FOP];
1696 env->fpop = (uint16_t)(xsave->region[XSAVE_FTW_FOP] >> 16);
1697 env->fpstt = (swd >> 11) & 7;
1698 env->fpus = swd;
1699 env->fpuc = cwd;
1700 for (i = 0; i < 8; ++i) {
1701 env->fptags[i] = !((twd >> i) & 1);
1703 memcpy(&env->fpip, &xsave->region[XSAVE_CWD_RIP], sizeof(env->fpip));
1704 memcpy(&env->fpdp, &xsave->region[XSAVE_CWD_RDP], sizeof(env->fpdp));
1705 env->mxcsr = xsave->region[XSAVE_MXCSR];
1706 memcpy(env->fpregs, &xsave->region[XSAVE_ST_SPACE],
1707 sizeof env->fpregs);
1708 env->xstate_bv = *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV];
1709 memcpy(env->bnd_regs, &xsave->region[XSAVE_BNDREGS],
1710 sizeof env->bnd_regs);
1711 memcpy(&env->bndcs_regs, &xsave->region[XSAVE_BNDCSR],
1712 sizeof(env->bndcs_regs));
1713 memcpy(env->opmask_regs, &xsave->region[XSAVE_OPMASK],
1714 sizeof env->opmask_regs);
1716 xmm = (const uint8_t *)&xsave->region[XSAVE_XMM_SPACE];
1717 ymmh = (const uint8_t *)&xsave->region[XSAVE_YMMH_SPACE];
1718 zmmh = (const uint8_t *)&xsave->region[XSAVE_ZMM_Hi256];
1719 for (i = 0; i < CPU_NB_REGS; i++, xmm += 16, ymmh += 16, zmmh += 32) {
1720 env->xmm_regs[i].XMM_Q(0) = ldq_p(xmm);
1721 env->xmm_regs[i].XMM_Q(1) = ldq_p(xmm+8);
1722 env->xmm_regs[i].XMM_Q(2) = ldq_p(ymmh);
1723 env->xmm_regs[i].XMM_Q(3) = ldq_p(ymmh+8);
1724 env->xmm_regs[i].XMM_Q(4) = ldq_p(zmmh);
1725 env->xmm_regs[i].XMM_Q(5) = ldq_p(zmmh+8);
1726 env->xmm_regs[i].XMM_Q(6) = ldq_p(zmmh+16);
1727 env->xmm_regs[i].XMM_Q(7) = ldq_p(zmmh+24);
1730 #ifdef TARGET_X86_64
1731 memcpy(&env->xmm_regs[16], &xsave->region[XSAVE_Hi16_ZMM],
1732 16 * sizeof env->xmm_regs[16]);
1733 #endif
1734 return 0;
1737 static int kvm_get_xcrs(X86CPU *cpu)
1739 CPUX86State *env = &cpu->env;
1740 int i, ret;
1741 struct kvm_xcrs xcrs;
1743 if (!has_xcrs) {
1744 return 0;
1747 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs);
1748 if (ret < 0) {
1749 return ret;
1752 for (i = 0; i < xcrs.nr_xcrs; i++) {
1753 /* Only support xcr0 now */
1754 if (xcrs.xcrs[i].xcr == 0) {
1755 env->xcr0 = xcrs.xcrs[i].value;
1756 break;
1759 return 0;
1762 static int kvm_get_sregs(X86CPU *cpu)
1764 CPUX86State *env = &cpu->env;
1765 struct kvm_sregs sregs;
1766 uint32_t hflags;
1767 int bit, i, ret;
1769 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
1770 if (ret < 0) {
1771 return ret;
1774 /* There can only be one pending IRQ set in the bitmap at a time, so try
1775 to find it and save its number instead (-1 for none). */
1776 env->interrupt_injected = -1;
1777 for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
1778 if (sregs.interrupt_bitmap[i]) {
1779 bit = ctz64(sregs.interrupt_bitmap[i]);
1780 env->interrupt_injected = i * 64 + bit;
1781 break;
1785 get_seg(&env->segs[R_CS], &sregs.cs);
1786 get_seg(&env->segs[R_DS], &sregs.ds);
1787 get_seg(&env->segs[R_ES], &sregs.es);
1788 get_seg(&env->segs[R_FS], &sregs.fs);
1789 get_seg(&env->segs[R_GS], &sregs.gs);
1790 get_seg(&env->segs[R_SS], &sregs.ss);
1792 get_seg(&env->tr, &sregs.tr);
1793 get_seg(&env->ldt, &sregs.ldt);
1795 env->idt.limit = sregs.idt.limit;
1796 env->idt.base = sregs.idt.base;
1797 env->gdt.limit = sregs.gdt.limit;
1798 env->gdt.base = sregs.gdt.base;
1800 env->cr[0] = sregs.cr0;
1801 env->cr[2] = sregs.cr2;
1802 env->cr[3] = sregs.cr3;
1803 env->cr[4] = sregs.cr4;
1805 env->efer = sregs.efer;
1807 /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
1809 #define HFLAG_COPY_MASK \
1810 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
1811 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
1812 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
1813 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
1815 hflags = (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
1816 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
1817 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
1818 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
1819 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
1820 hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
1821 (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
1823 if (env->efer & MSR_EFER_LMA) {
1824 hflags |= HF_LMA_MASK;
1827 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
1828 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
1829 } else {
1830 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
1831 (DESC_B_SHIFT - HF_CS32_SHIFT);
1832 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
1833 (DESC_B_SHIFT - HF_SS32_SHIFT);
1834 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
1835 !(hflags & HF_CS32_MASK)) {
1836 hflags |= HF_ADDSEG_MASK;
1837 } else {
1838 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
1839 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
1842 env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
1844 return 0;
1847 static int kvm_get_msrs(X86CPU *cpu)
1849 CPUX86State *env = &cpu->env;
1850 struct {
1851 struct kvm_msrs info;
1852 struct kvm_msr_entry entries[150];
1853 } msr_data;
1854 struct kvm_msr_entry *msrs = msr_data.entries;
1855 int ret, i, n;
1857 n = 0;
1858 msrs[n++].index = MSR_IA32_SYSENTER_CS;
1859 msrs[n++].index = MSR_IA32_SYSENTER_ESP;
1860 msrs[n++].index = MSR_IA32_SYSENTER_EIP;
1861 msrs[n++].index = MSR_PAT;
1862 if (has_msr_star) {
1863 msrs[n++].index = MSR_STAR;
1865 if (has_msr_hsave_pa) {
1866 msrs[n++].index = MSR_VM_HSAVE_PA;
1868 if (has_msr_tsc_aux) {
1869 msrs[n++].index = MSR_TSC_AUX;
1871 if (has_msr_tsc_adjust) {
1872 msrs[n++].index = MSR_TSC_ADJUST;
1874 if (has_msr_tsc_deadline) {
1875 msrs[n++].index = MSR_IA32_TSCDEADLINE;
1877 if (has_msr_misc_enable) {
1878 msrs[n++].index = MSR_IA32_MISC_ENABLE;
1880 if (has_msr_smbase) {
1881 msrs[n++].index = MSR_IA32_SMBASE;
1883 if (has_msr_feature_control) {
1884 msrs[n++].index = MSR_IA32_FEATURE_CONTROL;
1886 if (has_msr_bndcfgs) {
1887 msrs[n++].index = MSR_IA32_BNDCFGS;
1889 if (has_msr_xss) {
1890 msrs[n++].index = MSR_IA32_XSS;
1894 if (!env->tsc_valid) {
1895 msrs[n++].index = MSR_IA32_TSC;
1896 env->tsc_valid = !runstate_is_running();
1899 #ifdef TARGET_X86_64
1900 if (lm_capable_kernel) {
1901 msrs[n++].index = MSR_CSTAR;
1902 msrs[n++].index = MSR_KERNELGSBASE;
1903 msrs[n++].index = MSR_FMASK;
1904 msrs[n++].index = MSR_LSTAR;
1906 #endif
1907 msrs[n++].index = MSR_KVM_SYSTEM_TIME;
1908 msrs[n++].index = MSR_KVM_WALL_CLOCK;
1909 if (has_msr_async_pf_en) {
1910 msrs[n++].index = MSR_KVM_ASYNC_PF_EN;
1912 if (has_msr_pv_eoi_en) {
1913 msrs[n++].index = MSR_KVM_PV_EOI_EN;
1915 if (has_msr_kvm_steal_time) {
1916 msrs[n++].index = MSR_KVM_STEAL_TIME;
1918 if (has_msr_architectural_pmu) {
1919 msrs[n++].index = MSR_CORE_PERF_FIXED_CTR_CTRL;
1920 msrs[n++].index = MSR_CORE_PERF_GLOBAL_CTRL;
1921 msrs[n++].index = MSR_CORE_PERF_GLOBAL_STATUS;
1922 msrs[n++].index = MSR_CORE_PERF_GLOBAL_OVF_CTRL;
1923 for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
1924 msrs[n++].index = MSR_CORE_PERF_FIXED_CTR0 + i;
1926 for (i = 0; i < num_architectural_pmu_counters; i++) {
1927 msrs[n++].index = MSR_P6_PERFCTR0 + i;
1928 msrs[n++].index = MSR_P6_EVNTSEL0 + i;
1932 if (env->mcg_cap) {
1933 msrs[n++].index = MSR_MCG_STATUS;
1934 msrs[n++].index = MSR_MCG_CTL;
1935 for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
1936 msrs[n++].index = MSR_MC0_CTL + i;
1940 if (has_msr_hv_hypercall) {
1941 msrs[n++].index = HV_X64_MSR_HYPERCALL;
1942 msrs[n++].index = HV_X64_MSR_GUEST_OS_ID;
1944 if (has_msr_hv_vapic) {
1945 msrs[n++].index = HV_X64_MSR_APIC_ASSIST_PAGE;
1947 if (has_msr_hv_tsc) {
1948 msrs[n++].index = HV_X64_MSR_REFERENCE_TSC;
1950 if (has_msr_hv_crash) {
1951 int j;
1953 for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++) {
1954 msrs[n++].index = HV_X64_MSR_CRASH_P0 + j;
1957 if (has_msr_hv_runtime) {
1958 msrs[n++].index = HV_X64_MSR_VP_RUNTIME;
1960 if (cpu->hyperv_synic) {
1961 uint32_t msr;
1963 msrs[n++].index = HV_X64_MSR_SCONTROL;
1964 msrs[n++].index = HV_X64_MSR_SVERSION;
1965 msrs[n++].index = HV_X64_MSR_SIEFP;
1966 msrs[n++].index = HV_X64_MSR_SIMP;
1967 for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
1968 msrs[n++].index = msr;
1971 if (has_msr_hv_stimer) {
1972 uint32_t msr;
1974 for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT;
1975 msr++) {
1976 msrs[n++].index = msr;
1979 if (has_msr_mtrr) {
1980 msrs[n++].index = MSR_MTRRdefType;
1981 msrs[n++].index = MSR_MTRRfix64K_00000;
1982 msrs[n++].index = MSR_MTRRfix16K_80000;
1983 msrs[n++].index = MSR_MTRRfix16K_A0000;
1984 msrs[n++].index = MSR_MTRRfix4K_C0000;
1985 msrs[n++].index = MSR_MTRRfix4K_C8000;
1986 msrs[n++].index = MSR_MTRRfix4K_D0000;
1987 msrs[n++].index = MSR_MTRRfix4K_D8000;
1988 msrs[n++].index = MSR_MTRRfix4K_E0000;
1989 msrs[n++].index = MSR_MTRRfix4K_E8000;
1990 msrs[n++].index = MSR_MTRRfix4K_F0000;
1991 msrs[n++].index = MSR_MTRRfix4K_F8000;
1992 for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
1993 msrs[n++].index = MSR_MTRRphysBase(i);
1994 msrs[n++].index = MSR_MTRRphysMask(i);
1998 msr_data.info = (struct kvm_msrs) {
1999 .nmsrs = n,
2002 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
2003 if (ret < 0) {
2004 return ret;
2007 for (i = 0; i < ret; i++) {
2008 uint32_t index = msrs[i].index;
2009 switch (index) {
2010 case MSR_IA32_SYSENTER_CS:
2011 env->sysenter_cs = msrs[i].data;
2012 break;
2013 case MSR_IA32_SYSENTER_ESP:
2014 env->sysenter_esp = msrs[i].data;
2015 break;
2016 case MSR_IA32_SYSENTER_EIP:
2017 env->sysenter_eip = msrs[i].data;
2018 break;
2019 case MSR_PAT:
2020 env->pat = msrs[i].data;
2021 break;
2022 case MSR_STAR:
2023 env->star = msrs[i].data;
2024 break;
2025 #ifdef TARGET_X86_64
2026 case MSR_CSTAR:
2027 env->cstar = msrs[i].data;
2028 break;
2029 case MSR_KERNELGSBASE:
2030 env->kernelgsbase = msrs[i].data;
2031 break;
2032 case MSR_FMASK:
2033 env->fmask = msrs[i].data;
2034 break;
2035 case MSR_LSTAR:
2036 env->lstar = msrs[i].data;
2037 break;
2038 #endif
2039 case MSR_IA32_TSC:
2040 env->tsc = msrs[i].data;
2041 break;
2042 case MSR_TSC_AUX:
2043 env->tsc_aux = msrs[i].data;
2044 break;
2045 case MSR_TSC_ADJUST:
2046 env->tsc_adjust = msrs[i].data;
2047 break;
2048 case MSR_IA32_TSCDEADLINE:
2049 env->tsc_deadline = msrs[i].data;
2050 break;
2051 case MSR_VM_HSAVE_PA:
2052 env->vm_hsave = msrs[i].data;
2053 break;
2054 case MSR_KVM_SYSTEM_TIME:
2055 env->system_time_msr = msrs[i].data;
2056 break;
2057 case MSR_KVM_WALL_CLOCK:
2058 env->wall_clock_msr = msrs[i].data;
2059 break;
2060 case MSR_MCG_STATUS:
2061 env->mcg_status = msrs[i].data;
2062 break;
2063 case MSR_MCG_CTL:
2064 env->mcg_ctl = msrs[i].data;
2065 break;
2066 case MSR_IA32_MISC_ENABLE:
2067 env->msr_ia32_misc_enable = msrs[i].data;
2068 break;
2069 case MSR_IA32_SMBASE:
2070 env->smbase = msrs[i].data;
2071 break;
2072 case MSR_IA32_FEATURE_CONTROL:
2073 env->msr_ia32_feature_control = msrs[i].data;
2074 break;
2075 case MSR_IA32_BNDCFGS:
2076 env->msr_bndcfgs = msrs[i].data;
2077 break;
2078 case MSR_IA32_XSS:
2079 env->xss = msrs[i].data;
2080 break;
2081 default:
2082 if (msrs[i].index >= MSR_MC0_CTL &&
2083 msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
2084 env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
2086 break;
2087 case MSR_KVM_ASYNC_PF_EN:
2088 env->async_pf_en_msr = msrs[i].data;
2089 break;
2090 case MSR_KVM_PV_EOI_EN:
2091 env->pv_eoi_en_msr = msrs[i].data;
2092 break;
2093 case MSR_KVM_STEAL_TIME:
2094 env->steal_time_msr = msrs[i].data;
2095 break;
2096 case MSR_CORE_PERF_FIXED_CTR_CTRL:
2097 env->msr_fixed_ctr_ctrl = msrs[i].data;
2098 break;
2099 case MSR_CORE_PERF_GLOBAL_CTRL:
2100 env->msr_global_ctrl = msrs[i].data;
2101 break;
2102 case MSR_CORE_PERF_GLOBAL_STATUS:
2103 env->msr_global_status = msrs[i].data;
2104 break;
2105 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
2106 env->msr_global_ovf_ctrl = msrs[i].data;
2107 break;
2108 case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1:
2109 env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data;
2110 break;
2111 case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1:
2112 env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data;
2113 break;
2114 case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1:
2115 env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data;
2116 break;
2117 case HV_X64_MSR_HYPERCALL:
2118 env->msr_hv_hypercall = msrs[i].data;
2119 break;
2120 case HV_X64_MSR_GUEST_OS_ID:
2121 env->msr_hv_guest_os_id = msrs[i].data;
2122 break;
2123 case HV_X64_MSR_APIC_ASSIST_PAGE:
2124 env->msr_hv_vapic = msrs[i].data;
2125 break;
2126 case HV_X64_MSR_REFERENCE_TSC:
2127 env->msr_hv_tsc = msrs[i].data;
2128 break;
2129 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
2130 env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data;
2131 break;
2132 case HV_X64_MSR_VP_RUNTIME:
2133 env->msr_hv_runtime = msrs[i].data;
2134 break;
2135 case HV_X64_MSR_SCONTROL:
2136 env->msr_hv_synic_control = msrs[i].data;
2137 break;
2138 case HV_X64_MSR_SVERSION:
2139 env->msr_hv_synic_version = msrs[i].data;
2140 break;
2141 case HV_X64_MSR_SIEFP:
2142 env->msr_hv_synic_evt_page = msrs[i].data;
2143 break;
2144 case HV_X64_MSR_SIMP:
2145 env->msr_hv_synic_msg_page = msrs[i].data;
2146 break;
2147 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
2148 env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data;
2149 break;
2150 case HV_X64_MSR_STIMER0_CONFIG:
2151 case HV_X64_MSR_STIMER1_CONFIG:
2152 case HV_X64_MSR_STIMER2_CONFIG:
2153 case HV_X64_MSR_STIMER3_CONFIG:
2154 env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] =
2155 msrs[i].data;
2156 break;
2157 case HV_X64_MSR_STIMER0_COUNT:
2158 case HV_X64_MSR_STIMER1_COUNT:
2159 case HV_X64_MSR_STIMER2_COUNT:
2160 case HV_X64_MSR_STIMER3_COUNT:
2161 env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] =
2162 msrs[i].data;
2163 break;
2164 case MSR_MTRRdefType:
2165 env->mtrr_deftype = msrs[i].data;
2166 break;
2167 case MSR_MTRRfix64K_00000:
2168 env->mtrr_fixed[0] = msrs[i].data;
2169 break;
2170 case MSR_MTRRfix16K_80000:
2171 env->mtrr_fixed[1] = msrs[i].data;
2172 break;
2173 case MSR_MTRRfix16K_A0000:
2174 env->mtrr_fixed[2] = msrs[i].data;
2175 break;
2176 case MSR_MTRRfix4K_C0000:
2177 env->mtrr_fixed[3] = msrs[i].data;
2178 break;
2179 case MSR_MTRRfix4K_C8000:
2180 env->mtrr_fixed[4] = msrs[i].data;
2181 break;
2182 case MSR_MTRRfix4K_D0000:
2183 env->mtrr_fixed[5] = msrs[i].data;
2184 break;
2185 case MSR_MTRRfix4K_D8000:
2186 env->mtrr_fixed[6] = msrs[i].data;
2187 break;
2188 case MSR_MTRRfix4K_E0000:
2189 env->mtrr_fixed[7] = msrs[i].data;
2190 break;
2191 case MSR_MTRRfix4K_E8000:
2192 env->mtrr_fixed[8] = msrs[i].data;
2193 break;
2194 case MSR_MTRRfix4K_F0000:
2195 env->mtrr_fixed[9] = msrs[i].data;
2196 break;
2197 case MSR_MTRRfix4K_F8000:
2198 env->mtrr_fixed[10] = msrs[i].data;
2199 break;
2200 case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
2201 if (index & 1) {
2202 env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data;
2203 } else {
2204 env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
2206 break;
2210 return 0;
2213 static int kvm_put_mp_state(X86CPU *cpu)
2215 struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state };
2217 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
2220 static int kvm_get_mp_state(X86CPU *cpu)
2222 CPUState *cs = CPU(cpu);
2223 CPUX86State *env = &cpu->env;
2224 struct kvm_mp_state mp_state;
2225 int ret;
2227 ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
2228 if (ret < 0) {
2229 return ret;
2231 env->mp_state = mp_state.mp_state;
2232 if (kvm_irqchip_in_kernel()) {
2233 cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
2235 return 0;
2238 static int kvm_get_apic(X86CPU *cpu)
2240 DeviceState *apic = cpu->apic_state;
2241 struct kvm_lapic_state kapic;
2242 int ret;
2244 if (apic && kvm_irqchip_in_kernel()) {
2245 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic);
2246 if (ret < 0) {
2247 return ret;
2250 kvm_get_apic_state(apic, &kapic);
2252 return 0;
2255 static int kvm_put_apic(X86CPU *cpu)
2257 DeviceState *apic = cpu->apic_state;
2258 struct kvm_lapic_state kapic;
2260 if (apic && kvm_irqchip_in_kernel()) {
2261 kvm_put_apic_state(apic, &kapic);
2263 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_LAPIC, &kapic);
2265 return 0;
2268 static int kvm_put_vcpu_events(X86CPU *cpu, int level)
2270 CPUState *cs = CPU(cpu);
2271 CPUX86State *env = &cpu->env;
2272 struct kvm_vcpu_events events = {};
2274 if (!kvm_has_vcpu_events()) {
2275 return 0;
2278 events.exception.injected = (env->exception_injected >= 0);
2279 events.exception.nr = env->exception_injected;
2280 events.exception.has_error_code = env->has_error_code;
2281 events.exception.error_code = env->error_code;
2282 events.exception.pad = 0;
2284 events.interrupt.injected = (env->interrupt_injected >= 0);
2285 events.interrupt.nr = env->interrupt_injected;
2286 events.interrupt.soft = env->soft_interrupt;
2288 events.nmi.injected = env->nmi_injected;
2289 events.nmi.pending = env->nmi_pending;
2290 events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
2291 events.nmi.pad = 0;
2293 events.sipi_vector = env->sipi_vector;
2295 if (has_msr_smbase) {
2296 events.smi.smm = !!(env->hflags & HF_SMM_MASK);
2297 events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK);
2298 if (kvm_irqchip_in_kernel()) {
2299 /* As soon as these are moved to the kernel, remove them
2300 * from cs->interrupt_request.
2302 events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
2303 events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
2304 cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
2305 } else {
2306 /* Keep these in cs->interrupt_request. */
2307 events.smi.pending = 0;
2308 events.smi.latched_init = 0;
2310 events.flags |= KVM_VCPUEVENT_VALID_SMM;
2313 events.flags = 0;
2314 if (level >= KVM_PUT_RESET_STATE) {
2315 events.flags |=
2316 KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR;
2319 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
2322 static int kvm_get_vcpu_events(X86CPU *cpu)
2324 CPUX86State *env = &cpu->env;
2325 struct kvm_vcpu_events events;
2326 int ret;
2328 if (!kvm_has_vcpu_events()) {
2329 return 0;
2332 memset(&events, 0, sizeof(events));
2333 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
2334 if (ret < 0) {
2335 return ret;
2337 env->exception_injected =
2338 events.exception.injected ? events.exception.nr : -1;
2339 env->has_error_code = events.exception.has_error_code;
2340 env->error_code = events.exception.error_code;
2342 env->interrupt_injected =
2343 events.interrupt.injected ? events.interrupt.nr : -1;
2344 env->soft_interrupt = events.interrupt.soft;
2346 env->nmi_injected = events.nmi.injected;
2347 env->nmi_pending = events.nmi.pending;
2348 if (events.nmi.masked) {
2349 env->hflags2 |= HF2_NMI_MASK;
2350 } else {
2351 env->hflags2 &= ~HF2_NMI_MASK;
2354 if (events.flags & KVM_VCPUEVENT_VALID_SMM) {
2355 if (events.smi.smm) {
2356 env->hflags |= HF_SMM_MASK;
2357 } else {
2358 env->hflags &= ~HF_SMM_MASK;
2360 if (events.smi.pending) {
2361 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
2362 } else {
2363 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
2365 if (events.smi.smm_inside_nmi) {
2366 env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
2367 } else {
2368 env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
2370 if (events.smi.latched_init) {
2371 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
2372 } else {
2373 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
2377 env->sipi_vector = events.sipi_vector;
2379 return 0;
2382 static int kvm_guest_debug_workarounds(X86CPU *cpu)
2384 CPUState *cs = CPU(cpu);
2385 CPUX86State *env = &cpu->env;
2386 int ret = 0;
2387 unsigned long reinject_trap = 0;
2389 if (!kvm_has_vcpu_events()) {
2390 if (env->exception_injected == 1) {
2391 reinject_trap = KVM_GUESTDBG_INJECT_DB;
2392 } else if (env->exception_injected == 3) {
2393 reinject_trap = KVM_GUESTDBG_INJECT_BP;
2395 env->exception_injected = -1;
2399 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
2400 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
2401 * by updating the debug state once again if single-stepping is on.
2402 * Another reason to call kvm_update_guest_debug here is a pending debug
2403 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
2404 * reinject them via SET_GUEST_DEBUG.
2406 if (reinject_trap ||
2407 (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) {
2408 ret = kvm_update_guest_debug(cs, reinject_trap);
2410 return ret;
2413 static int kvm_put_debugregs(X86CPU *cpu)
2415 CPUX86State *env = &cpu->env;
2416 struct kvm_debugregs dbgregs;
2417 int i;
2419 if (!kvm_has_debugregs()) {
2420 return 0;
2423 for (i = 0; i < 4; i++) {
2424 dbgregs.db[i] = env->dr[i];
2426 dbgregs.dr6 = env->dr[6];
2427 dbgregs.dr7 = env->dr[7];
2428 dbgregs.flags = 0;
2430 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs);
2433 static int kvm_get_debugregs(X86CPU *cpu)
2435 CPUX86State *env = &cpu->env;
2436 struct kvm_debugregs dbgregs;
2437 int i, ret;
2439 if (!kvm_has_debugregs()) {
2440 return 0;
2443 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs);
2444 if (ret < 0) {
2445 return ret;
2447 for (i = 0; i < 4; i++) {
2448 env->dr[i] = dbgregs.db[i];
2450 env->dr[4] = env->dr[6] = dbgregs.dr6;
2451 env->dr[5] = env->dr[7] = dbgregs.dr7;
2453 return 0;
2456 int kvm_arch_put_registers(CPUState *cpu, int level)
2458 X86CPU *x86_cpu = X86_CPU(cpu);
2459 int ret;
2461 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
2463 if (level >= KVM_PUT_RESET_STATE && has_msr_feature_control) {
2464 ret = kvm_put_msr_feature_control(x86_cpu);
2465 if (ret < 0) {
2466 return ret;
2470 ret = kvm_getput_regs(x86_cpu, 1);
2471 if (ret < 0) {
2472 return ret;
2474 ret = kvm_put_xsave(x86_cpu);
2475 if (ret < 0) {
2476 return ret;
2478 ret = kvm_put_xcrs(x86_cpu);
2479 if (ret < 0) {
2480 return ret;
2482 ret = kvm_put_sregs(x86_cpu);
2483 if (ret < 0) {
2484 return ret;
2486 /* must be before kvm_put_msrs */
2487 ret = kvm_inject_mce_oldstyle(x86_cpu);
2488 if (ret < 0) {
2489 return ret;
2491 ret = kvm_put_msrs(x86_cpu, level);
2492 if (ret < 0) {
2493 return ret;
2495 if (level >= KVM_PUT_RESET_STATE) {
2496 ret = kvm_put_mp_state(x86_cpu);
2497 if (ret < 0) {
2498 return ret;
2500 ret = kvm_put_apic(x86_cpu);
2501 if (ret < 0) {
2502 return ret;
2506 ret = kvm_put_tscdeadline_msr(x86_cpu);
2507 if (ret < 0) {
2508 return ret;
2511 ret = kvm_put_vcpu_events(x86_cpu, level);
2512 if (ret < 0) {
2513 return ret;
2515 ret = kvm_put_debugregs(x86_cpu);
2516 if (ret < 0) {
2517 return ret;
2519 /* must be last */
2520 ret = kvm_guest_debug_workarounds(x86_cpu);
2521 if (ret < 0) {
2522 return ret;
2524 return 0;
2527 int kvm_arch_get_registers(CPUState *cs)
2529 X86CPU *cpu = X86_CPU(cs);
2530 int ret;
2532 assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs));
2534 ret = kvm_getput_regs(cpu, 0);
2535 if (ret < 0) {
2536 return ret;
2538 ret = kvm_get_xsave(cpu);
2539 if (ret < 0) {
2540 return ret;
2542 ret = kvm_get_xcrs(cpu);
2543 if (ret < 0) {
2544 return ret;
2546 ret = kvm_get_sregs(cpu);
2547 if (ret < 0) {
2548 return ret;
2550 ret = kvm_get_msrs(cpu);
2551 if (ret < 0) {
2552 return ret;
2554 ret = kvm_get_mp_state(cpu);
2555 if (ret < 0) {
2556 return ret;
2558 ret = kvm_get_apic(cpu);
2559 if (ret < 0) {
2560 return ret;
2562 ret = kvm_get_vcpu_events(cpu);
2563 if (ret < 0) {
2564 return ret;
2566 ret = kvm_get_debugregs(cpu);
2567 if (ret < 0) {
2568 return ret;
2570 return 0;
2573 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
2575 X86CPU *x86_cpu = X86_CPU(cpu);
2576 CPUX86State *env = &x86_cpu->env;
2577 int ret;
2579 /* Inject NMI */
2580 if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
2581 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
2582 qemu_mutex_lock_iothread();
2583 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
2584 qemu_mutex_unlock_iothread();
2585 DPRINTF("injected NMI\n");
2586 ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
2587 if (ret < 0) {
2588 fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
2589 strerror(-ret));
2592 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
2593 qemu_mutex_lock_iothread();
2594 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
2595 qemu_mutex_unlock_iothread();
2596 DPRINTF("injected SMI\n");
2597 ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
2598 if (ret < 0) {
2599 fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n",
2600 strerror(-ret));
2605 if (!kvm_pic_in_kernel()) {
2606 qemu_mutex_lock_iothread();
2609 /* Force the VCPU out of its inner loop to process any INIT requests
2610 * or (for userspace APIC, but it is cheap to combine the checks here)
2611 * pending TPR access reports.
2613 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
2614 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
2615 !(env->hflags & HF_SMM_MASK)) {
2616 cpu->exit_request = 1;
2618 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
2619 cpu->exit_request = 1;
2623 if (!kvm_pic_in_kernel()) {
2624 /* Try to inject an interrupt if the guest can accept it */
2625 if (run->ready_for_interrupt_injection &&
2626 (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
2627 (env->eflags & IF_MASK)) {
2628 int irq;
2630 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
2631 irq = cpu_get_pic_interrupt(env);
2632 if (irq >= 0) {
2633 struct kvm_interrupt intr;
2635 intr.irq = irq;
2636 DPRINTF("injected interrupt %d\n", irq);
2637 ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr);
2638 if (ret < 0) {
2639 fprintf(stderr,
2640 "KVM: injection failed, interrupt lost (%s)\n",
2641 strerror(-ret));
2646 /* If we have an interrupt but the guest is not ready to receive an
2647 * interrupt, request an interrupt window exit. This will
2648 * cause a return to userspace as soon as the guest is ready to
2649 * receive interrupts. */
2650 if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
2651 run->request_interrupt_window = 1;
2652 } else {
2653 run->request_interrupt_window = 0;
2656 DPRINTF("setting tpr\n");
2657 run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
2659 qemu_mutex_unlock_iothread();
2663 MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
2665 X86CPU *x86_cpu = X86_CPU(cpu);
2666 CPUX86State *env = &x86_cpu->env;
2668 if (run->flags & KVM_RUN_X86_SMM) {
2669 env->hflags |= HF_SMM_MASK;
2670 } else {
2671 env->hflags &= HF_SMM_MASK;
2673 if (run->if_flag) {
2674 env->eflags |= IF_MASK;
2675 } else {
2676 env->eflags &= ~IF_MASK;
2679 /* We need to protect the apic state against concurrent accesses from
2680 * different threads in case the userspace irqchip is used. */
2681 if (!kvm_irqchip_in_kernel()) {
2682 qemu_mutex_lock_iothread();
2684 cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
2685 cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
2686 if (!kvm_irqchip_in_kernel()) {
2687 qemu_mutex_unlock_iothread();
2689 return cpu_get_mem_attrs(env);
2692 int kvm_arch_process_async_events(CPUState *cs)
2694 X86CPU *cpu = X86_CPU(cs);
2695 CPUX86State *env = &cpu->env;
2697 if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
2698 /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
2699 assert(env->mcg_cap);
2701 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
2703 kvm_cpu_synchronize_state(cs);
2705 if (env->exception_injected == EXCP08_DBLE) {
2706 /* this means triple fault */
2707 qemu_system_reset_request();
2708 cs->exit_request = 1;
2709 return 0;
2711 env->exception_injected = EXCP12_MCHK;
2712 env->has_error_code = 0;
2714 cs->halted = 0;
2715 if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
2716 env->mp_state = KVM_MP_STATE_RUNNABLE;
2720 if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
2721 !(env->hflags & HF_SMM_MASK)) {
2722 kvm_cpu_synchronize_state(cs);
2723 do_cpu_init(cpu);
2726 if (kvm_irqchip_in_kernel()) {
2727 return 0;
2730 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
2731 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
2732 apic_poll_irq(cpu->apic_state);
2734 if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
2735 (env->eflags & IF_MASK)) ||
2736 (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
2737 cs->halted = 0;
2739 if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
2740 kvm_cpu_synchronize_state(cs);
2741 do_cpu_sipi(cpu);
2743 if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
2744 cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
2745 kvm_cpu_synchronize_state(cs);
2746 apic_handle_tpr_access_report(cpu->apic_state, env->eip,
2747 env->tpr_access_type);
2750 return cs->halted;
2753 static int kvm_handle_halt(X86CPU *cpu)
2755 CPUState *cs = CPU(cpu);
2756 CPUX86State *env = &cpu->env;
2758 if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
2759 (env->eflags & IF_MASK)) &&
2760 !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
2761 cs->halted = 1;
2762 return EXCP_HLT;
2765 return 0;
2768 static int kvm_handle_tpr_access(X86CPU *cpu)
2770 CPUState *cs = CPU(cpu);
2771 struct kvm_run *run = cs->kvm_run;
2773 apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip,
2774 run->tpr_access.is_write ? TPR_ACCESS_WRITE
2775 : TPR_ACCESS_READ);
2776 return 1;
2779 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
2781 static const uint8_t int3 = 0xcc;
2783 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
2784 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) {
2785 return -EINVAL;
2787 return 0;
2790 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
2792 uint8_t int3;
2794 if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
2795 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
2796 return -EINVAL;
2798 return 0;
2801 static struct {
2802 target_ulong addr;
2803 int len;
2804 int type;
2805 } hw_breakpoint[4];
2807 static int nb_hw_breakpoint;
2809 static int find_hw_breakpoint(target_ulong addr, int len, int type)
2811 int n;
2813 for (n = 0; n < nb_hw_breakpoint; n++) {
2814 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
2815 (hw_breakpoint[n].len == len || len == -1)) {
2816 return n;
2819 return -1;
2822 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
2823 target_ulong len, int type)
2825 switch (type) {
2826 case GDB_BREAKPOINT_HW:
2827 len = 1;
2828 break;
2829 case GDB_WATCHPOINT_WRITE:
2830 case GDB_WATCHPOINT_ACCESS:
2831 switch (len) {
2832 case 1:
2833 break;
2834 case 2:
2835 case 4:
2836 case 8:
2837 if (addr & (len - 1)) {
2838 return -EINVAL;
2840 break;
2841 default:
2842 return -EINVAL;
2844 break;
2845 default:
2846 return -ENOSYS;
2849 if (nb_hw_breakpoint == 4) {
2850 return -ENOBUFS;
2852 if (find_hw_breakpoint(addr, len, type) >= 0) {
2853 return -EEXIST;
2855 hw_breakpoint[nb_hw_breakpoint].addr = addr;
2856 hw_breakpoint[nb_hw_breakpoint].len = len;
2857 hw_breakpoint[nb_hw_breakpoint].type = type;
2858 nb_hw_breakpoint++;
2860 return 0;
2863 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
2864 target_ulong len, int type)
2866 int n;
2868 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
2869 if (n < 0) {
2870 return -ENOENT;
2872 nb_hw_breakpoint--;
2873 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
2875 return 0;
2878 void kvm_arch_remove_all_hw_breakpoints(void)
2880 nb_hw_breakpoint = 0;
2883 static CPUWatchpoint hw_watchpoint;
2885 static int kvm_handle_debug(X86CPU *cpu,
2886 struct kvm_debug_exit_arch *arch_info)
2888 CPUState *cs = CPU(cpu);
2889 CPUX86State *env = &cpu->env;
2890 int ret = 0;
2891 int n;
2893 if (arch_info->exception == 1) {
2894 if (arch_info->dr6 & (1 << 14)) {
2895 if (cs->singlestep_enabled) {
2896 ret = EXCP_DEBUG;
2898 } else {
2899 for (n = 0; n < 4; n++) {
2900 if (arch_info->dr6 & (1 << n)) {
2901 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
2902 case 0x0:
2903 ret = EXCP_DEBUG;
2904 break;
2905 case 0x1:
2906 ret = EXCP_DEBUG;
2907 cs->watchpoint_hit = &hw_watchpoint;
2908 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
2909 hw_watchpoint.flags = BP_MEM_WRITE;
2910 break;
2911 case 0x3:
2912 ret = EXCP_DEBUG;
2913 cs->watchpoint_hit = &hw_watchpoint;
2914 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
2915 hw_watchpoint.flags = BP_MEM_ACCESS;
2916 break;
2921 } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) {
2922 ret = EXCP_DEBUG;
2924 if (ret == 0) {
2925 cpu_synchronize_state(cs);
2926 assert(env->exception_injected == -1);
2928 /* pass to guest */
2929 env->exception_injected = arch_info->exception;
2930 env->has_error_code = 0;
2933 return ret;
2936 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
2938 const uint8_t type_code[] = {
2939 [GDB_BREAKPOINT_HW] = 0x0,
2940 [GDB_WATCHPOINT_WRITE] = 0x1,
2941 [GDB_WATCHPOINT_ACCESS] = 0x3
2943 const uint8_t len_code[] = {
2944 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
2946 int n;
2948 if (kvm_sw_breakpoints_active(cpu)) {
2949 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
2951 if (nb_hw_breakpoint > 0) {
2952 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
2953 dbg->arch.debugreg[7] = 0x0600;
2954 for (n = 0; n < nb_hw_breakpoint; n++) {
2955 dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
2956 dbg->arch.debugreg[7] |= (2 << (n * 2)) |
2957 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
2958 ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
2963 static bool host_supports_vmx(void)
2965 uint32_t ecx, unused;
2967 host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
2968 return ecx & CPUID_EXT_VMX;
2971 #define VMX_INVALID_GUEST_STATE 0x80000021
2973 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
2975 X86CPU *cpu = X86_CPU(cs);
2976 uint64_t code;
2977 int ret;
2979 switch (run->exit_reason) {
2980 case KVM_EXIT_HLT:
2981 DPRINTF("handle_hlt\n");
2982 qemu_mutex_lock_iothread();
2983 ret = kvm_handle_halt(cpu);
2984 qemu_mutex_unlock_iothread();
2985 break;
2986 case KVM_EXIT_SET_TPR:
2987 ret = 0;
2988 break;
2989 case KVM_EXIT_TPR_ACCESS:
2990 qemu_mutex_lock_iothread();
2991 ret = kvm_handle_tpr_access(cpu);
2992 qemu_mutex_unlock_iothread();
2993 break;
2994 case KVM_EXIT_FAIL_ENTRY:
2995 code = run->fail_entry.hardware_entry_failure_reason;
2996 fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
2997 code);
2998 if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
2999 fprintf(stderr,
3000 "\nIf you're running a guest on an Intel machine without "
3001 "unrestricted mode\n"
3002 "support, the failure can be most likely due to the guest "
3003 "entering an invalid\n"
3004 "state for Intel VT. For example, the guest maybe running "
3005 "in big real mode\n"
3006 "which is not supported on less recent Intel processors."
3007 "\n\n");
3009 ret = -1;
3010 break;
3011 case KVM_EXIT_EXCEPTION:
3012 fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
3013 run->ex.exception, run->ex.error_code);
3014 ret = -1;
3015 break;
3016 case KVM_EXIT_DEBUG:
3017 DPRINTF("kvm_exit_debug\n");
3018 qemu_mutex_lock_iothread();
3019 ret = kvm_handle_debug(cpu, &run->debug.arch);
3020 qemu_mutex_unlock_iothread();
3021 break;
3022 case KVM_EXIT_HYPERV:
3023 ret = kvm_hv_handle_exit(cpu, &run->hyperv);
3024 break;
3025 case KVM_EXIT_IOAPIC_EOI:
3026 ioapic_eoi_broadcast(run->eoi.vector);
3027 ret = 0;
3028 break;
3029 default:
3030 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
3031 ret = -1;
3032 break;
3035 return ret;
3038 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
3040 X86CPU *cpu = X86_CPU(cs);
3041 CPUX86State *env = &cpu->env;
3043 kvm_cpu_synchronize_state(cs);
3044 return !(env->cr[0] & CR0_PE_MASK) ||
3045 ((env->segs[R_CS].selector & 3) != 3);
3048 void kvm_arch_init_irq_routing(KVMState *s)
3050 if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
3051 /* If kernel can't do irq routing, interrupt source
3052 * override 0->2 cannot be set up as required by HPET.
3053 * So we have to disable it.
3055 no_hpet = 1;
3057 /* We know at this point that we're using the in-kernel
3058 * irqchip, so we can use irqfds, and on x86 we know
3059 * we can use msi via irqfd and GSI routing.
3061 kvm_msi_via_irqfd_allowed = true;
3062 kvm_gsi_routing_allowed = true;
3064 if (kvm_irqchip_is_split()) {
3065 int i;
3067 /* If the ioapic is in QEMU and the lapics are in KVM, reserve
3068 MSI routes for signaling interrupts to the local apics. */
3069 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
3070 struct MSIMessage msg = { 0x0, 0x0 };
3071 if (kvm_irqchip_add_msi_route(s, msg, NULL) < 0) {
3072 error_report("Could not enable split IRQ mode.");
3073 exit(1);
3079 int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
3081 int ret;
3082 if (machine_kernel_irqchip_split(ms)) {
3083 ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24);
3084 if (ret) {
3085 error_report("Could not enable split irqchip mode: %s\n",
3086 strerror(-ret));
3087 exit(1);
3088 } else {
3089 DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
3090 kvm_split_irqchip = true;
3091 return 1;
3093 } else {
3094 return 0;
3098 /* Classic KVM device assignment interface. Will remain x86 only. */
3099 int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr,
3100 uint32_t flags, uint32_t *dev_id)
3102 struct kvm_assigned_pci_dev dev_data = {
3103 .segnr = dev_addr->domain,
3104 .busnr = dev_addr->bus,
3105 .devfn = PCI_DEVFN(dev_addr->slot, dev_addr->function),
3106 .flags = flags,
3108 int ret;
3110 dev_data.assigned_dev_id =
3111 (dev_addr->domain << 16) | (dev_addr->bus << 8) | dev_data.devfn;
3113 ret = kvm_vm_ioctl(s, KVM_ASSIGN_PCI_DEVICE, &dev_data);
3114 if (ret < 0) {
3115 return ret;
3118 *dev_id = dev_data.assigned_dev_id;
3120 return 0;
3123 int kvm_device_pci_deassign(KVMState *s, uint32_t dev_id)
3125 struct kvm_assigned_pci_dev dev_data = {
3126 .assigned_dev_id = dev_id,
3129 return kvm_vm_ioctl(s, KVM_DEASSIGN_PCI_DEVICE, &dev_data);
3132 static int kvm_assign_irq_internal(KVMState *s, uint32_t dev_id,
3133 uint32_t irq_type, uint32_t guest_irq)
3135 struct kvm_assigned_irq assigned_irq = {
3136 .assigned_dev_id = dev_id,
3137 .guest_irq = guest_irq,
3138 .flags = irq_type,
3141 if (kvm_check_extension(s, KVM_CAP_ASSIGN_DEV_IRQ)) {
3142 return kvm_vm_ioctl(s, KVM_ASSIGN_DEV_IRQ, &assigned_irq);
3143 } else {
3144 return kvm_vm_ioctl(s, KVM_ASSIGN_IRQ, &assigned_irq);
3148 int kvm_device_intx_assign(KVMState *s, uint32_t dev_id, bool use_host_msi,
3149 uint32_t guest_irq)
3151 uint32_t irq_type = KVM_DEV_IRQ_GUEST_INTX |
3152 (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX);
3154 return kvm_assign_irq_internal(s, dev_id, irq_type, guest_irq);
3157 int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked)
3159 struct kvm_assigned_pci_dev dev_data = {
3160 .assigned_dev_id = dev_id,
3161 .flags = masked ? KVM_DEV_ASSIGN_MASK_INTX : 0,
3164 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_INTX_MASK, &dev_data);
3167 static int kvm_deassign_irq_internal(KVMState *s, uint32_t dev_id,
3168 uint32_t type)
3170 struct kvm_assigned_irq assigned_irq = {
3171 .assigned_dev_id = dev_id,
3172 .flags = type,
3175 return kvm_vm_ioctl(s, KVM_DEASSIGN_DEV_IRQ, &assigned_irq);
3178 int kvm_device_intx_deassign(KVMState *s, uint32_t dev_id, bool use_host_msi)
3180 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_INTX |
3181 (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX));
3184 int kvm_device_msi_assign(KVMState *s, uint32_t dev_id, int virq)
3186 return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSI |
3187 KVM_DEV_IRQ_GUEST_MSI, virq);
3190 int kvm_device_msi_deassign(KVMState *s, uint32_t dev_id)
3192 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSI |
3193 KVM_DEV_IRQ_HOST_MSI);
3196 bool kvm_device_msix_supported(KVMState *s)
3198 /* The kernel lacks a corresponding KVM_CAP, so we probe by calling
3199 * KVM_ASSIGN_SET_MSIX_NR with an invalid parameter. */
3200 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, NULL) == -EFAULT;
3203 int kvm_device_msix_init_vectors(KVMState *s, uint32_t dev_id,
3204 uint32_t nr_vectors)
3206 struct kvm_assigned_msix_nr msix_nr = {
3207 .assigned_dev_id = dev_id,
3208 .entry_nr = nr_vectors,
3211 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, &msix_nr);
3214 int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector,
3215 int virq)
3217 struct kvm_assigned_msix_entry msix_entry = {
3218 .assigned_dev_id = dev_id,
3219 .gsi = virq,
3220 .entry = vector,
3223 return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_ENTRY, &msix_entry);
3226 int kvm_device_msix_assign(KVMState *s, uint32_t dev_id)
3228 return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSIX |
3229 KVM_DEV_IRQ_GUEST_MSIX, 0);
3232 int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id)
3234 return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSIX |
3235 KVM_DEV_IRQ_HOST_MSIX);
3238 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
3239 uint64_t address, uint32_t data, PCIDevice *dev)
3241 return 0;
3244 int kvm_arch_msi_data_to_gsi(uint32_t data)
3246 abort();