4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
15 #include <sys/types.h>
16 #include <sys/ioctl.h>
19 #include <linux/kvm.h>
21 #include "qemu-common.h"
26 #include "host-utils.h"
30 #ifdef CONFIG_KVM_PARA
31 #include <linux/kvm_para.h>
37 #define DPRINTF(fmt, ...) \
38 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
40 #define DPRINTF(fmt, ...) \
44 #define MSR_KVM_WALL_CLOCK 0x11
45 #define MSR_KVM_SYSTEM_TIME 0x12
47 #ifdef KVM_CAP_EXT_CPUID
49 static struct kvm_cpuid2
*try_get_cpuid(KVMState
*s
, int max
)
51 struct kvm_cpuid2
*cpuid
;
54 size
= sizeof(*cpuid
) + max
* sizeof(*cpuid
->entries
);
55 cpuid
= (struct kvm_cpuid2
*)qemu_mallocz(size
);
57 r
= kvm_ioctl(s
, KVM_GET_SUPPORTED_CPUID
, cpuid
);
58 if (r
== 0 && cpuid
->nent
>= max
) {
66 fprintf(stderr
, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
74 uint32_t kvm_arch_get_supported_cpuid(CPUState
*env
, uint32_t function
,
75 uint32_t index
, int reg
)
77 struct kvm_cpuid2
*cpuid
;
82 if (!kvm_check_extension(env
->kvm_state
, KVM_CAP_EXT_CPUID
)) {
87 while ((cpuid
= try_get_cpuid(env
->kvm_state
, max
)) == NULL
) {
91 for (i
= 0; i
< cpuid
->nent
; ++i
) {
92 if (cpuid
->entries
[i
].function
== function
&&
93 cpuid
->entries
[i
].index
== index
) {
96 ret
= cpuid
->entries
[i
].eax
;
99 ret
= cpuid
->entries
[i
].ebx
;
102 ret
= cpuid
->entries
[i
].ecx
;
105 ret
= cpuid
->entries
[i
].edx
;
108 /* KVM before 2.6.30 misreports the following features */
109 ret
|= CPUID_MTRR
| CPUID_PAT
| CPUID_MCE
| CPUID_MCA
;
112 /* On Intel, kvm returns cpuid according to the Intel spec,
113 * so add missing bits according to the AMD spec:
115 cpuid_1_edx
= kvm_arch_get_supported_cpuid(env
, 1, 0, R_EDX
);
116 ret
|= cpuid_1_edx
& 0x183f7ff;
131 uint32_t kvm_arch_get_supported_cpuid(CPUState
*env
, uint32_t function
,
132 uint32_t index
, int reg
)
139 #ifdef CONFIG_KVM_PARA
140 struct kvm_para_features
{
143 } para_features
[] = {
144 #ifdef KVM_CAP_CLOCKSOURCE
145 { KVM_CAP_CLOCKSOURCE
, KVM_FEATURE_CLOCKSOURCE
},
147 #ifdef KVM_CAP_NOP_IO_DELAY
148 { KVM_CAP_NOP_IO_DELAY
, KVM_FEATURE_NOP_IO_DELAY
},
150 #ifdef KVM_CAP_PV_MMU
151 { KVM_CAP_PV_MMU
, KVM_FEATURE_MMU_OP
},
156 static int get_para_features(CPUState
*env
)
160 for (i
= 0; i
< ARRAY_SIZE(para_features
) - 1; i
++) {
161 if (kvm_check_extension(env
->kvm_state
, para_features
[i
].cap
))
162 features
|= (1 << para_features
[i
].feature
);
169 static int _kvm_arch_init_vcpu(CPUState
*env
);
171 int kvm_arch_init_vcpu(CPUState
*env
)
175 struct kvm_cpuid2 cpuid
;
176 struct kvm_cpuid_entry2 entries
[100];
177 } __attribute__((packed
)) cpuid_data
;
178 uint32_t limit
, i
, j
, cpuid_i
;
180 struct kvm_cpuid_entry2
*c
;
181 #ifdef KVM_CPUID_SIGNATURE
182 uint32_t signature
[3];
185 r
= _kvm_arch_init_vcpu(env
);
192 env
->mp_state
= KVM_MP_STATE_RUNNABLE
;
196 env
->cpuid_features
&= kvm_arch_get_supported_cpuid(env
, 1, 0, R_EDX
);
198 i
= env
->cpuid_ext_features
& CPUID_EXT_HYPERVISOR
;
199 env
->cpuid_ext_features
&= kvm_arch_get_supported_cpuid(env
, 1, 0, R_ECX
);
200 env
->cpuid_ext_features
|= i
;
202 env
->cpuid_ext2_features
&= kvm_arch_get_supported_cpuid(env
, 0x80000001,
204 env
->cpuid_ext3_features
&= kvm_arch_get_supported_cpuid(env
, 0x80000001,
209 #ifdef CONFIG_KVM_PARA
210 /* Paravirtualization CPUIDs */
211 memcpy(signature
, "KVMKVMKVM\0\0\0", 12);
212 c
= &cpuid_data
.entries
[cpuid_i
++];
213 memset(c
, 0, sizeof(*c
));
214 c
->function
= KVM_CPUID_SIGNATURE
;
216 c
->ebx
= signature
[0];
217 c
->ecx
= signature
[1];
218 c
->edx
= signature
[2];
220 c
= &cpuid_data
.entries
[cpuid_i
++];
221 memset(c
, 0, sizeof(*c
));
222 c
->function
= KVM_CPUID_FEATURES
;
223 c
->eax
= env
->cpuid_kvm_features
& get_para_features(env
);
226 cpu_x86_cpuid(env
, 0, 0, &limit
, &unused
, &unused
, &unused
);
228 for (i
= 0; i
<= limit
; i
++) {
229 c
= &cpuid_data
.entries
[cpuid_i
++];
233 /* Keep reading function 2 till all the input is received */
237 c
->flags
= KVM_CPUID_FLAG_STATEFUL_FUNC
|
238 KVM_CPUID_FLAG_STATE_READ_NEXT
;
239 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
240 times
= c
->eax
& 0xff;
242 for (j
= 1; j
< times
; ++j
) {
243 c
= &cpuid_data
.entries
[cpuid_i
++];
245 c
->flags
= KVM_CPUID_FLAG_STATEFUL_FUNC
;
246 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
255 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
257 cpu_x86_cpuid(env
, i
, j
, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
259 if (i
== 4 && c
->eax
== 0)
261 if (i
== 0xb && !(c
->ecx
& 0xff00))
263 if (i
== 0xd && c
->eax
== 0)
266 c
= &cpuid_data
.entries
[cpuid_i
++];
272 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
276 cpu_x86_cpuid(env
, 0x80000000, 0, &limit
, &unused
, &unused
, &unused
);
278 for (i
= 0x80000000; i
<= limit
; i
++) {
279 c
= &cpuid_data
.entries
[cpuid_i
++];
283 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
286 cpuid_data
.cpuid
.nent
= cpuid_i
;
288 return kvm_vcpu_ioctl(env
, KVM_SET_CPUID2
, &cpuid_data
);
291 void kvm_arch_reset_vcpu(CPUState
*env
)
293 env
->exception_injected
= -1;
294 env
->interrupt_injected
= -1;
295 env
->nmi_injected
= 0;
296 env
->nmi_pending
= 0;
297 /* Legal xcr0 for loading */
299 if (kvm_irqchip_in_kernel()) {
300 env
->mp_state
= cpu_is_bsp(env
) ? KVM_MP_STATE_RUNNABLE
:
301 KVM_MP_STATE_UNINITIALIZED
;
303 env
->mp_state
= KVM_MP_STATE_RUNNABLE
;
308 static int kvm_has_msr_star(CPUState
*env
)
310 static int has_msr_star
;
314 if (has_msr_star
== 0) {
315 struct kvm_msr_list msr_list
, *kvm_msr_list
;
319 /* Obtain MSR list from KVM. These are the MSRs that we must
322 ret
= kvm_ioctl(env
->kvm_state
, KVM_GET_MSR_INDEX_LIST
, &msr_list
);
323 if (ret
< 0 && ret
!= -E2BIG
) {
326 /* Old kernel modules had a bug and could write beyond the provided
327 memory. Allocate at least a safe amount of 1K. */
328 kvm_msr_list
= qemu_mallocz(MAX(1024, sizeof(msr_list
) +
330 sizeof(msr_list
.indices
[0])));
332 kvm_msr_list
->nmsrs
= msr_list
.nmsrs
;
333 ret
= kvm_ioctl(env
->kvm_state
, KVM_GET_MSR_INDEX_LIST
, kvm_msr_list
);
337 for (i
= 0; i
< kvm_msr_list
->nmsrs
; i
++) {
338 if (kvm_msr_list
->indices
[i
] == MSR_STAR
) {
348 if (has_msr_star
== 1)
353 static int kvm_init_identity_map_page(KVMState
*s
)
355 #ifdef KVM_CAP_SET_IDENTITY_MAP_ADDR
357 uint64_t addr
= 0xfffbc000;
359 if (!kvm_check_extension(s
, KVM_CAP_SET_IDENTITY_MAP_ADDR
)) {
363 ret
= kvm_vm_ioctl(s
, KVM_SET_IDENTITY_MAP_ADDR
, &addr
);
365 fprintf(stderr
, "kvm_set_identity_map_addr: %s\n", strerror(ret
));
372 int kvm_arch_init(KVMState
*s
, int smp_cpus
)
376 /* create vm86 tss. KVM uses vm86 mode to emulate 16-bit code
377 * directly. In order to use vm86 mode, a TSS is needed. Since this
378 * must be part of guest physical memory, we need to allocate it. Older
379 * versions of KVM just assumed that it would be at the end of physical
380 * memory but that doesn't work with more than 4GB of memory. We simply
381 * refuse to work with those older versions of KVM. */
382 ret
= kvm_ioctl(s
, KVM_CHECK_EXTENSION
, KVM_CAP_SET_TSS_ADDR
);
384 fprintf(stderr
, "kvm does not support KVM_CAP_SET_TSS_ADDR\n");
388 /* this address is 3 pages before the bios, and the bios should present
389 * as unavaible memory. FIXME, need to ensure the e820 map deals with
393 * Tell fw_cfg to notify the BIOS to reserve the range.
395 if (e820_add_entry(0xfffbc000, 0x4000, E820_RESERVED
) < 0) {
396 perror("e820_add_entry() table is full");
399 ret
= kvm_vm_ioctl(s
, KVM_SET_TSS_ADDR
, 0xfffbd000);
404 return kvm_init_identity_map_page(s
);
407 static void set_v8086_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
409 lhs
->selector
= rhs
->selector
;
410 lhs
->base
= rhs
->base
;
411 lhs
->limit
= rhs
->limit
;
423 static void set_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
425 unsigned flags
= rhs
->flags
;
426 lhs
->selector
= rhs
->selector
;
427 lhs
->base
= rhs
->base
;
428 lhs
->limit
= rhs
->limit
;
429 lhs
->type
= (flags
>> DESC_TYPE_SHIFT
) & 15;
430 lhs
->present
= (flags
& DESC_P_MASK
) != 0;
431 lhs
->dpl
= rhs
->selector
& 3;
432 lhs
->db
= (flags
>> DESC_B_SHIFT
) & 1;
433 lhs
->s
= (flags
& DESC_S_MASK
) != 0;
434 lhs
->l
= (flags
>> DESC_L_SHIFT
) & 1;
435 lhs
->g
= (flags
& DESC_G_MASK
) != 0;
436 lhs
->avl
= (flags
& DESC_AVL_MASK
) != 0;
440 static void get_seg(SegmentCache
*lhs
, const struct kvm_segment
*rhs
)
442 lhs
->selector
= rhs
->selector
;
443 lhs
->base
= rhs
->base
;
444 lhs
->limit
= rhs
->limit
;
446 (rhs
->type
<< DESC_TYPE_SHIFT
)
447 | (rhs
->present
* DESC_P_MASK
)
448 | (rhs
->dpl
<< DESC_DPL_SHIFT
)
449 | (rhs
->db
<< DESC_B_SHIFT
)
450 | (rhs
->s
* DESC_S_MASK
)
451 | (rhs
->l
<< DESC_L_SHIFT
)
452 | (rhs
->g
* DESC_G_MASK
)
453 | (rhs
->avl
* DESC_AVL_MASK
);
456 static void kvm_getput_reg(__u64
*kvm_reg
, target_ulong
*qemu_reg
, int set
)
459 *kvm_reg
= *qemu_reg
;
461 *qemu_reg
= *kvm_reg
;
464 static int kvm_getput_regs(CPUState
*env
, int set
)
466 struct kvm_regs regs
;
470 ret
= kvm_vcpu_ioctl(env
, KVM_GET_REGS
, ®s
);
475 kvm_getput_reg(®s
.rax
, &env
->regs
[R_EAX
], set
);
476 kvm_getput_reg(®s
.rbx
, &env
->regs
[R_EBX
], set
);
477 kvm_getput_reg(®s
.rcx
, &env
->regs
[R_ECX
], set
);
478 kvm_getput_reg(®s
.rdx
, &env
->regs
[R_EDX
], set
);
479 kvm_getput_reg(®s
.rsi
, &env
->regs
[R_ESI
], set
);
480 kvm_getput_reg(®s
.rdi
, &env
->regs
[R_EDI
], set
);
481 kvm_getput_reg(®s
.rsp
, &env
->regs
[R_ESP
], set
);
482 kvm_getput_reg(®s
.rbp
, &env
->regs
[R_EBP
], set
);
484 kvm_getput_reg(®s
.r8
, &env
->regs
[8], set
);
485 kvm_getput_reg(®s
.r9
, &env
->regs
[9], set
);
486 kvm_getput_reg(®s
.r10
, &env
->regs
[10], set
);
487 kvm_getput_reg(®s
.r11
, &env
->regs
[11], set
);
488 kvm_getput_reg(®s
.r12
, &env
->regs
[12], set
);
489 kvm_getput_reg(®s
.r13
, &env
->regs
[13], set
);
490 kvm_getput_reg(®s
.r14
, &env
->regs
[14], set
);
491 kvm_getput_reg(®s
.r15
, &env
->regs
[15], set
);
494 kvm_getput_reg(®s
.rflags
, &env
->eflags
, set
);
495 kvm_getput_reg(®s
.rip
, &env
->eip
, set
);
498 ret
= kvm_vcpu_ioctl(env
, KVM_SET_REGS
, ®s
);
503 static int kvm_put_fpu(CPUState
*env
)
508 memset(&fpu
, 0, sizeof fpu
);
509 fpu
.fsw
= env
->fpus
& ~(7 << 11);
510 fpu
.fsw
|= (env
->fpstt
& 7) << 11;
512 for (i
= 0; i
< 8; ++i
)
513 fpu
.ftwx
|= (!env
->fptags
[i
]) << i
;
514 memcpy(fpu
.fpr
, env
->fpregs
, sizeof env
->fpregs
);
515 memcpy(fpu
.xmm
, env
->xmm_regs
, sizeof env
->xmm_regs
);
516 fpu
.mxcsr
= env
->mxcsr
;
518 return kvm_vcpu_ioctl(env
, KVM_SET_FPU
, &fpu
);
522 #define XSAVE_CWD_RIP 2
523 #define XSAVE_CWD_RDP 4
524 #define XSAVE_MXCSR 6
525 #define XSAVE_ST_SPACE 8
526 #define XSAVE_XMM_SPACE 40
527 #define XSAVE_XSTATE_BV 128
528 #define XSAVE_YMMH_SPACE 144
531 static int kvm_put_xsave(CPUState
*env
)
535 struct kvm_xsave
* xsave
;
536 uint16_t cwd
, swd
, twd
, fop
;
538 if (!kvm_has_xsave())
539 return kvm_put_fpu(env
);
541 xsave
= qemu_memalign(4096, sizeof(struct kvm_xsave
));
542 memset(xsave
, 0, sizeof(struct kvm_xsave
));
543 cwd
= swd
= twd
= fop
= 0;
544 swd
= env
->fpus
& ~(7 << 11);
545 swd
|= (env
->fpstt
& 7) << 11;
547 for (i
= 0; i
< 8; ++i
)
548 twd
|= (!env
->fptags
[i
]) << i
;
549 xsave
->region
[0] = (uint32_t)(swd
<< 16) + cwd
;
550 xsave
->region
[1] = (uint32_t)(fop
<< 16) + twd
;
551 memcpy(&xsave
->region
[XSAVE_ST_SPACE
], env
->fpregs
,
553 memcpy(&xsave
->region
[XSAVE_XMM_SPACE
], env
->xmm_regs
,
554 sizeof env
->xmm_regs
);
555 xsave
->region
[XSAVE_MXCSR
] = env
->mxcsr
;
556 *(uint64_t *)&xsave
->region
[XSAVE_XSTATE_BV
] = env
->xstate_bv
;
557 memcpy(&xsave
->region
[XSAVE_YMMH_SPACE
], env
->ymmh_regs
,
558 sizeof env
->ymmh_regs
);
559 return kvm_vcpu_ioctl(env
, KVM_SET_XSAVE
, xsave
);
561 return kvm_put_fpu(env
);
565 static int kvm_put_xcrs(CPUState
*env
)
568 struct kvm_xcrs xcrs
;
575 xcrs
.xcrs
[0].xcr
= 0;
576 xcrs
.xcrs
[0].value
= env
->xcr0
;
577 return kvm_vcpu_ioctl(env
, KVM_SET_XCRS
, &xcrs
);
583 static int kvm_put_sregs(CPUState
*env
)
585 struct kvm_sregs sregs
;
587 memset(sregs
.interrupt_bitmap
, 0, sizeof(sregs
.interrupt_bitmap
));
588 if (env
->interrupt_injected
>= 0) {
589 sregs
.interrupt_bitmap
[env
->interrupt_injected
/ 64] |=
590 (uint64_t)1 << (env
->interrupt_injected
% 64);
593 if ((env
->eflags
& VM_MASK
)) {
594 set_v8086_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
595 set_v8086_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
596 set_v8086_seg(&sregs
.es
, &env
->segs
[R_ES
]);
597 set_v8086_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
598 set_v8086_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
599 set_v8086_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
601 set_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
602 set_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
603 set_seg(&sregs
.es
, &env
->segs
[R_ES
]);
604 set_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
605 set_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
606 set_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
608 if (env
->cr
[0] & CR0_PE_MASK
) {
609 /* force ss cpl to cs cpl */
610 sregs
.ss
.selector
= (sregs
.ss
.selector
& ~3) |
611 (sregs
.cs
.selector
& 3);
612 sregs
.ss
.dpl
= sregs
.ss
.selector
& 3;
616 set_seg(&sregs
.tr
, &env
->tr
);
617 set_seg(&sregs
.ldt
, &env
->ldt
);
619 sregs
.idt
.limit
= env
->idt
.limit
;
620 sregs
.idt
.base
= env
->idt
.base
;
621 sregs
.gdt
.limit
= env
->gdt
.limit
;
622 sregs
.gdt
.base
= env
->gdt
.base
;
624 sregs
.cr0
= env
->cr
[0];
625 sregs
.cr2
= env
->cr
[2];
626 sregs
.cr3
= env
->cr
[3];
627 sregs
.cr4
= env
->cr
[4];
629 sregs
.cr8
= cpu_get_apic_tpr(env
->apic_state
);
630 sregs
.apic_base
= cpu_get_apic_base(env
->apic_state
);
632 sregs
.efer
= env
->efer
;
634 return kvm_vcpu_ioctl(env
, KVM_SET_SREGS
, &sregs
);
639 static void kvm_msr_entry_set(struct kvm_msr_entry
*entry
,
640 uint32_t index
, uint64_t value
)
642 entry
->index
= index
;
647 static int kvm_put_msrs(CPUState
*env
, int level
)
650 struct kvm_msrs info
;
651 struct kvm_msr_entry entries
[100];
653 struct kvm_msr_entry
*msrs
= msr_data
.entries
;
656 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_CS
, env
->sysenter_cs
);
657 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_ESP
, env
->sysenter_esp
);
658 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_EIP
, env
->sysenter_eip
);
659 if (kvm_has_msr_star(env
))
660 kvm_msr_entry_set(&msrs
[n
++], MSR_STAR
, env
->star
);
661 kvm_msr_entry_set(&msrs
[n
++], MSR_VM_HSAVE_PA
, env
->vm_hsave
);
663 /* FIXME if lm capable */
664 kvm_msr_entry_set(&msrs
[n
++], MSR_CSTAR
, env
->cstar
);
665 kvm_msr_entry_set(&msrs
[n
++], MSR_KERNELGSBASE
, env
->kernelgsbase
);
666 kvm_msr_entry_set(&msrs
[n
++], MSR_FMASK
, env
->fmask
);
667 kvm_msr_entry_set(&msrs
[n
++], MSR_LSTAR
, env
->lstar
);
669 if (level
== KVM_PUT_FULL_STATE
) {
670 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_TSC
, env
->tsc
);
671 kvm_msr_entry_set(&msrs
[n
++], MSR_KVM_SYSTEM_TIME
,
672 env
->system_time_msr
);
673 kvm_msr_entry_set(&msrs
[n
++], MSR_KVM_WALL_CLOCK
, env
->wall_clock_msr
);
676 msr_data
.info
.nmsrs
= n
;
678 return kvm_vcpu_ioctl(env
, KVM_SET_MSRS
, &msr_data
);
683 static int kvm_get_fpu(CPUState
*env
)
688 ret
= kvm_vcpu_ioctl(env
, KVM_GET_FPU
, &fpu
);
692 env
->fpstt
= (fpu
.fsw
>> 11) & 7;
695 for (i
= 0; i
< 8; ++i
)
696 env
->fptags
[i
] = !((fpu
.ftwx
>> i
) & 1);
697 memcpy(env
->fpregs
, fpu
.fpr
, sizeof env
->fpregs
);
698 memcpy(env
->xmm_regs
, fpu
.xmm
, sizeof env
->xmm_regs
);
699 env
->mxcsr
= fpu
.mxcsr
;
704 static int kvm_get_xsave(CPUState
*env
)
707 struct kvm_xsave
* xsave
;
709 uint16_t cwd
, swd
, twd
, fop
;
711 if (!kvm_has_xsave())
712 return kvm_get_fpu(env
);
714 xsave
= qemu_memalign(4096, sizeof(struct kvm_xsave
));
715 ret
= kvm_vcpu_ioctl(env
, KVM_GET_XSAVE
, xsave
);
719 cwd
= (uint16_t)xsave
->region
[0];
720 swd
= (uint16_t)(xsave
->region
[0] >> 16);
721 twd
= (uint16_t)xsave
->region
[1];
722 fop
= (uint16_t)(xsave
->region
[1] >> 16);
723 env
->fpstt
= (swd
>> 11) & 7;
726 for (i
= 0; i
< 8; ++i
)
727 env
->fptags
[i
] = !((twd
>> i
) & 1);
728 env
->mxcsr
= xsave
->region
[XSAVE_MXCSR
];
729 memcpy(env
->fpregs
, &xsave
->region
[XSAVE_ST_SPACE
],
731 memcpy(env
->xmm_regs
, &xsave
->region
[XSAVE_XMM_SPACE
],
732 sizeof env
->xmm_regs
);
733 env
->xstate_bv
= *(uint64_t *)&xsave
->region
[XSAVE_XSTATE_BV
];
734 memcpy(env
->ymmh_regs
, &xsave
->region
[XSAVE_YMMH_SPACE
],
735 sizeof env
->ymmh_regs
);
738 return kvm_get_fpu(env
);
742 static int kvm_get_xcrs(CPUState
*env
)
746 struct kvm_xcrs xcrs
;
751 ret
= kvm_vcpu_ioctl(env
, KVM_GET_XCRS
, &xcrs
);
755 for (i
= 0; i
< xcrs
.nr_xcrs
; i
++)
756 /* Only support xcr0 now */
757 if (xcrs
.xcrs
[0].xcr
== 0) {
758 env
->xcr0
= xcrs
.xcrs
[0].value
;
767 static int kvm_get_sregs(CPUState
*env
)
769 struct kvm_sregs sregs
;
773 ret
= kvm_vcpu_ioctl(env
, KVM_GET_SREGS
, &sregs
);
777 /* There can only be one pending IRQ set in the bitmap at a time, so try
778 to find it and save its number instead (-1 for none). */
779 env
->interrupt_injected
= -1;
780 for (i
= 0; i
< ARRAY_SIZE(sregs
.interrupt_bitmap
); i
++) {
781 if (sregs
.interrupt_bitmap
[i
]) {
782 bit
= ctz64(sregs
.interrupt_bitmap
[i
]);
783 env
->interrupt_injected
= i
* 64 + bit
;
788 get_seg(&env
->segs
[R_CS
], &sregs
.cs
);
789 get_seg(&env
->segs
[R_DS
], &sregs
.ds
);
790 get_seg(&env
->segs
[R_ES
], &sregs
.es
);
791 get_seg(&env
->segs
[R_FS
], &sregs
.fs
);
792 get_seg(&env
->segs
[R_GS
], &sregs
.gs
);
793 get_seg(&env
->segs
[R_SS
], &sregs
.ss
);
795 get_seg(&env
->tr
, &sregs
.tr
);
796 get_seg(&env
->ldt
, &sregs
.ldt
);
798 env
->idt
.limit
= sregs
.idt
.limit
;
799 env
->idt
.base
= sregs
.idt
.base
;
800 env
->gdt
.limit
= sregs
.gdt
.limit
;
801 env
->gdt
.base
= sregs
.gdt
.base
;
803 env
->cr
[0] = sregs
.cr0
;
804 env
->cr
[2] = sregs
.cr2
;
805 env
->cr
[3] = sregs
.cr3
;
806 env
->cr
[4] = sregs
.cr4
;
808 cpu_set_apic_base(env
->apic_state
, sregs
.apic_base
);
810 env
->efer
= sregs
.efer
;
811 //cpu_set_apic_tpr(env->apic_state, sregs.cr8);
813 #define HFLAG_COPY_MASK ~( \
814 HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
815 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
816 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
817 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
821 hflags
= (env
->segs
[R_CS
].flags
>> DESC_DPL_SHIFT
) & HF_CPL_MASK
;
822 hflags
|= (env
->cr
[0] & CR0_PE_MASK
) << (HF_PE_SHIFT
- CR0_PE_SHIFT
);
823 hflags
|= (env
->cr
[0] << (HF_MP_SHIFT
- CR0_MP_SHIFT
)) &
824 (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
);
825 hflags
|= (env
->eflags
& (HF_TF_MASK
| HF_VM_MASK
| HF_IOPL_MASK
));
826 hflags
|= (env
->cr
[4] & CR4_OSFXSR_MASK
) <<
827 (HF_OSFXSR_SHIFT
- CR4_OSFXSR_SHIFT
);
829 if (env
->efer
& MSR_EFER_LMA
) {
830 hflags
|= HF_LMA_MASK
;
833 if ((hflags
& HF_LMA_MASK
) && (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
834 hflags
|= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
836 hflags
|= (env
->segs
[R_CS
].flags
& DESC_B_MASK
) >>
837 (DESC_B_SHIFT
- HF_CS32_SHIFT
);
838 hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
) >>
839 (DESC_B_SHIFT
- HF_SS32_SHIFT
);
840 if (!(env
->cr
[0] & CR0_PE_MASK
) ||
841 (env
->eflags
& VM_MASK
) ||
842 !(hflags
& HF_CS32_MASK
)) {
843 hflags
|= HF_ADDSEG_MASK
;
845 hflags
|= ((env
->segs
[R_DS
].base
|
846 env
->segs
[R_ES
].base
|
847 env
->segs
[R_SS
].base
) != 0) <<
851 env
->hflags
= (env
->hflags
& HFLAG_COPY_MASK
) | hflags
;
856 static int kvm_get_msrs(CPUState
*env
)
859 struct kvm_msrs info
;
860 struct kvm_msr_entry entries
[100];
862 struct kvm_msr_entry
*msrs
= msr_data
.entries
;
866 msrs
[n
++].index
= MSR_IA32_SYSENTER_CS
;
867 msrs
[n
++].index
= MSR_IA32_SYSENTER_ESP
;
868 msrs
[n
++].index
= MSR_IA32_SYSENTER_EIP
;
869 if (kvm_has_msr_star(env
))
870 msrs
[n
++].index
= MSR_STAR
;
871 msrs
[n
++].index
= MSR_IA32_TSC
;
872 msrs
[n
++].index
= MSR_VM_HSAVE_PA
;
874 /* FIXME lm_capable_kernel */
875 msrs
[n
++].index
= MSR_CSTAR
;
876 msrs
[n
++].index
= MSR_KERNELGSBASE
;
877 msrs
[n
++].index
= MSR_FMASK
;
878 msrs
[n
++].index
= MSR_LSTAR
;
880 msrs
[n
++].index
= MSR_KVM_SYSTEM_TIME
;
881 msrs
[n
++].index
= MSR_KVM_WALL_CLOCK
;
883 msr_data
.info
.nmsrs
= n
;
884 ret
= kvm_vcpu_ioctl(env
, KVM_GET_MSRS
, &msr_data
);
888 for (i
= 0; i
< ret
; i
++) {
889 switch (msrs
[i
].index
) {
890 case MSR_IA32_SYSENTER_CS
:
891 env
->sysenter_cs
= msrs
[i
].data
;
893 case MSR_IA32_SYSENTER_ESP
:
894 env
->sysenter_esp
= msrs
[i
].data
;
896 case MSR_IA32_SYSENTER_EIP
:
897 env
->sysenter_eip
= msrs
[i
].data
;
900 env
->star
= msrs
[i
].data
;
904 env
->cstar
= msrs
[i
].data
;
906 case MSR_KERNELGSBASE
:
907 env
->kernelgsbase
= msrs
[i
].data
;
910 env
->fmask
= msrs
[i
].data
;
913 env
->lstar
= msrs
[i
].data
;
917 env
->tsc
= msrs
[i
].data
;
919 case MSR_KVM_SYSTEM_TIME
:
920 env
->system_time_msr
= msrs
[i
].data
;
922 case MSR_KVM_WALL_CLOCK
:
923 env
->wall_clock_msr
= msrs
[i
].data
;
925 case MSR_VM_HSAVE_PA
:
926 env
->vm_hsave
= msrs
[i
].data
;
934 static int kvm_put_mp_state(CPUState
*env
)
936 struct kvm_mp_state mp_state
= { .mp_state
= env
->mp_state
};
938 return kvm_vcpu_ioctl(env
, KVM_SET_MP_STATE
, &mp_state
);
941 static int kvm_get_mp_state(CPUState
*env
)
943 struct kvm_mp_state mp_state
;
946 ret
= kvm_vcpu_ioctl(env
, KVM_GET_MP_STATE
, &mp_state
);
950 env
->mp_state
= mp_state
.mp_state
;
955 static int kvm_put_vcpu_events(CPUState
*env
, int level
)
957 #ifdef KVM_CAP_VCPU_EVENTS
958 struct kvm_vcpu_events events
;
960 if (!kvm_has_vcpu_events()) {
964 events
.exception
.injected
= (env
->exception_injected
>= 0);
965 events
.exception
.nr
= env
->exception_injected
;
966 events
.exception
.has_error_code
= env
->has_error_code
;
967 events
.exception
.error_code
= env
->error_code
;
969 events
.interrupt
.injected
= (env
->interrupt_injected
>= 0);
970 events
.interrupt
.nr
= env
->interrupt_injected
;
971 events
.interrupt
.soft
= env
->soft_interrupt
;
973 events
.nmi
.injected
= env
->nmi_injected
;
974 events
.nmi
.pending
= env
->nmi_pending
;
975 events
.nmi
.masked
= !!(env
->hflags2
& HF2_NMI_MASK
);
977 events
.sipi_vector
= env
->sipi_vector
;
980 if (level
>= KVM_PUT_RESET_STATE
) {
982 KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SIPI_VECTOR
;
985 return kvm_vcpu_ioctl(env
, KVM_SET_VCPU_EVENTS
, &events
);
991 static int kvm_get_vcpu_events(CPUState
*env
)
993 #ifdef KVM_CAP_VCPU_EVENTS
994 struct kvm_vcpu_events events
;
997 if (!kvm_has_vcpu_events()) {
1001 ret
= kvm_vcpu_ioctl(env
, KVM_GET_VCPU_EVENTS
, &events
);
1005 env
->exception_injected
=
1006 events
.exception
.injected
? events
.exception
.nr
: -1;
1007 env
->has_error_code
= events
.exception
.has_error_code
;
1008 env
->error_code
= events
.exception
.error_code
;
1010 env
->interrupt_injected
=
1011 events
.interrupt
.injected
? events
.interrupt
.nr
: -1;
1012 env
->soft_interrupt
= events
.interrupt
.soft
;
1014 env
->nmi_injected
= events
.nmi
.injected
;
1015 env
->nmi_pending
= events
.nmi
.pending
;
1016 if (events
.nmi
.masked
) {
1017 env
->hflags2
|= HF2_NMI_MASK
;
1019 env
->hflags2
&= ~HF2_NMI_MASK
;
1022 env
->sipi_vector
= events
.sipi_vector
;
1028 static int kvm_guest_debug_workarounds(CPUState
*env
)
1031 #ifdef KVM_CAP_SET_GUEST_DEBUG
1032 unsigned long reinject_trap
= 0;
1034 if (!kvm_has_vcpu_events()) {
1035 if (env
->exception_injected
== 1) {
1036 reinject_trap
= KVM_GUESTDBG_INJECT_DB
;
1037 } else if (env
->exception_injected
== 3) {
1038 reinject_trap
= KVM_GUESTDBG_INJECT_BP
;
1040 env
->exception_injected
= -1;
1044 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
1045 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
1046 * by updating the debug state once again if single-stepping is on.
1047 * Another reason to call kvm_update_guest_debug here is a pending debug
1048 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
1049 * reinject them via SET_GUEST_DEBUG.
1051 if (reinject_trap
||
1052 (!kvm_has_robust_singlestep() && env
->singlestep_enabled
)) {
1053 ret
= kvm_update_guest_debug(env
, reinject_trap
);
1055 #endif /* KVM_CAP_SET_GUEST_DEBUG */
1059 static int kvm_put_debugregs(CPUState
*env
)
1061 #ifdef KVM_CAP_DEBUGREGS
1062 struct kvm_debugregs dbgregs
;
1065 if (!kvm_has_debugregs()) {
1069 for (i
= 0; i
< 4; i
++) {
1070 dbgregs
.db
[i
] = env
->dr
[i
];
1072 dbgregs
.dr6
= env
->dr
[6];
1073 dbgregs
.dr7
= env
->dr
[7];
1076 return kvm_vcpu_ioctl(env
, KVM_SET_DEBUGREGS
, &dbgregs
);
1082 static int kvm_get_debugregs(CPUState
*env
)
1084 #ifdef KVM_CAP_DEBUGREGS
1085 struct kvm_debugregs dbgregs
;
1088 if (!kvm_has_debugregs()) {
1092 ret
= kvm_vcpu_ioctl(env
, KVM_GET_DEBUGREGS
, &dbgregs
);
1096 for (i
= 0; i
< 4; i
++) {
1097 env
->dr
[i
] = dbgregs
.db
[i
];
1099 env
->dr
[4] = env
->dr
[6] = dbgregs
.dr6
;
1100 env
->dr
[5] = env
->dr
[7] = dbgregs
.dr7
;
1107 int kvm_arch_put_registers(CPUState
*env
, int level
)
1111 assert(cpu_is_stopped(env
) || qemu_cpu_self(env
));
1113 ret
= kvm_getput_regs(env
, 1);
1117 ret
= kvm_put_xsave(env
);
1121 ret
= kvm_put_xcrs(env
);
1125 ret
= kvm_put_sregs(env
);
1129 ret
= kvm_put_msrs(env
, level
);
1133 if (level
>= KVM_PUT_RESET_STATE
) {
1134 ret
= kvm_put_mp_state(env
);
1139 ret
= kvm_put_vcpu_events(env
, level
);
1144 ret
= kvm_guest_debug_workarounds(env
);
1148 ret
= kvm_put_debugregs(env
);
1155 int kvm_arch_get_registers(CPUState
*env
)
1159 assert(cpu_is_stopped(env
) || qemu_cpu_self(env
));
1161 ret
= kvm_getput_regs(env
, 0);
1165 ret
= kvm_get_xsave(env
);
1169 ret
= kvm_get_xcrs(env
);
1173 ret
= kvm_get_sregs(env
);
1177 ret
= kvm_get_msrs(env
);
1181 ret
= kvm_get_mp_state(env
);
1185 ret
= kvm_get_vcpu_events(env
);
1189 ret
= kvm_get_debugregs(env
);
1196 int kvm_arch_pre_run(CPUState
*env
, struct kvm_run
*run
)
1198 /* Try to inject an interrupt if the guest can accept it */
1199 if (run
->ready_for_interrupt_injection
&&
1200 (env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
1201 (env
->eflags
& IF_MASK
)) {
1204 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
1205 irq
= cpu_get_pic_interrupt(env
);
1207 struct kvm_interrupt intr
;
1210 DPRINTF("injected interrupt %d\n", irq
);
1211 kvm_vcpu_ioctl(env
, KVM_INTERRUPT
, &intr
);
1215 /* If we have an interrupt but the guest is not ready to receive an
1216 * interrupt, request an interrupt window exit. This will
1217 * cause a return to userspace as soon as the guest is ready to
1218 * receive interrupts. */
1219 if ((env
->interrupt_request
& CPU_INTERRUPT_HARD
))
1220 run
->request_interrupt_window
= 1;
1222 run
->request_interrupt_window
= 0;
1224 DPRINTF("setting tpr\n");
1225 run
->cr8
= cpu_get_apic_tpr(env
->apic_state
);
1231 int kvm_arch_post_run(CPUState
*env
, struct kvm_run
*run
)
1234 env
->eflags
|= IF_MASK
;
1236 env
->eflags
&= ~IF_MASK
;
1238 cpu_set_apic_tpr(env
->apic_state
, run
->cr8
);
1239 cpu_set_apic_base(env
->apic_state
, run
->apic_base
);
1246 int kvm_arch_process_irqchip_events(CPUState
*env
)
1248 if (env
->interrupt_request
& CPU_INTERRUPT_INIT
) {
1249 kvm_cpu_synchronize_state(env
);
1251 env
->exception_index
= EXCP_HALTED
;
1254 if (env
->interrupt_request
& CPU_INTERRUPT_SIPI
) {
1255 kvm_cpu_synchronize_state(env
);
1262 static int kvm_handle_halt(CPUState
*env
)
1264 if (!((env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
1265 (env
->eflags
& IF_MASK
)) &&
1266 !(env
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
1268 env
->exception_index
= EXCP_HLT
;
1275 int kvm_arch_handle_exit(CPUState
*env
, struct kvm_run
*run
)
1279 switch (run
->exit_reason
) {
1281 DPRINTF("handle_hlt\n");
1282 ret
= kvm_handle_halt(env
);
1290 #ifdef KVM_CAP_SET_GUEST_DEBUG
1291 int kvm_arch_insert_sw_breakpoint(CPUState
*env
, struct kvm_sw_breakpoint
*bp
)
1293 static const uint8_t int3
= 0xcc;
1295 if (cpu_memory_rw_debug(env
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 0) ||
1296 cpu_memory_rw_debug(env
, bp
->pc
, (uint8_t *)&int3
, 1, 1))
1301 int kvm_arch_remove_sw_breakpoint(CPUState
*env
, struct kvm_sw_breakpoint
*bp
)
1305 if (cpu_memory_rw_debug(env
, bp
->pc
, &int3
, 1, 0) || int3
!= 0xcc ||
1306 cpu_memory_rw_debug(env
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 1))
1317 static int nb_hw_breakpoint
;
1319 static int find_hw_breakpoint(target_ulong addr
, int len
, int type
)
1323 for (n
= 0; n
< nb_hw_breakpoint
; n
++)
1324 if (hw_breakpoint
[n
].addr
== addr
&& hw_breakpoint
[n
].type
== type
&&
1325 (hw_breakpoint
[n
].len
== len
|| len
== -1))
1330 int kvm_arch_insert_hw_breakpoint(target_ulong addr
,
1331 target_ulong len
, int type
)
1334 case GDB_BREAKPOINT_HW
:
1337 case GDB_WATCHPOINT_WRITE
:
1338 case GDB_WATCHPOINT_ACCESS
:
1345 if (addr
& (len
- 1))
1356 if (nb_hw_breakpoint
== 4)
1359 if (find_hw_breakpoint(addr
, len
, type
) >= 0)
1362 hw_breakpoint
[nb_hw_breakpoint
].addr
= addr
;
1363 hw_breakpoint
[nb_hw_breakpoint
].len
= len
;
1364 hw_breakpoint
[nb_hw_breakpoint
].type
= type
;
1370 int kvm_arch_remove_hw_breakpoint(target_ulong addr
,
1371 target_ulong len
, int type
)
1375 n
= find_hw_breakpoint(addr
, (type
== GDB_BREAKPOINT_HW
) ? 1 : len
, type
);
1380 hw_breakpoint
[n
] = hw_breakpoint
[nb_hw_breakpoint
];
1385 void kvm_arch_remove_all_hw_breakpoints(void)
1387 nb_hw_breakpoint
= 0;
1390 static CPUWatchpoint hw_watchpoint
;
1392 int kvm_arch_debug(struct kvm_debug_exit_arch
*arch_info
)
1397 if (arch_info
->exception
== 1) {
1398 if (arch_info
->dr6
& (1 << 14)) {
1399 if (cpu_single_env
->singlestep_enabled
)
1402 for (n
= 0; n
< 4; n
++)
1403 if (arch_info
->dr6
& (1 << n
))
1404 switch ((arch_info
->dr7
>> (16 + n
*4)) & 0x3) {
1410 cpu_single_env
->watchpoint_hit
= &hw_watchpoint
;
1411 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
1412 hw_watchpoint
.flags
= BP_MEM_WRITE
;
1416 cpu_single_env
->watchpoint_hit
= &hw_watchpoint
;
1417 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
1418 hw_watchpoint
.flags
= BP_MEM_ACCESS
;
1422 } else if (kvm_find_sw_breakpoint(cpu_single_env
, arch_info
->pc
))
1426 cpu_synchronize_state(cpu_single_env
);
1427 assert(cpu_single_env
->exception_injected
== -1);
1429 cpu_single_env
->exception_injected
= arch_info
->exception
;
1430 cpu_single_env
->has_error_code
= 0;
1436 void kvm_arch_update_guest_debug(CPUState
*env
, struct kvm_guest_debug
*dbg
)
1438 const uint8_t type_code
[] = {
1439 [GDB_BREAKPOINT_HW
] = 0x0,
1440 [GDB_WATCHPOINT_WRITE
] = 0x1,
1441 [GDB_WATCHPOINT_ACCESS
] = 0x3
1443 const uint8_t len_code
[] = {
1444 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
1448 if (kvm_sw_breakpoints_active(env
))
1449 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_SW_BP
;
1451 if (nb_hw_breakpoint
> 0) {
1452 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_HW_BP
;
1453 dbg
->arch
.debugreg
[7] = 0x0600;
1454 for (n
= 0; n
< nb_hw_breakpoint
; n
++) {
1455 dbg
->arch
.debugreg
[n
] = hw_breakpoint
[n
].addr
;
1456 dbg
->arch
.debugreg
[7] |= (2 << (n
* 2)) |
1457 (type_code
[hw_breakpoint
[n
].type
] << (16 + n
*4)) |
1458 (len_code
[hw_breakpoint
[n
].len
] << (18 + n
*4));
1461 /* Legal xcr0 for loading */
1464 #endif /* KVM_CAP_SET_GUEST_DEBUG */
1466 bool kvm_arch_stop_on_emulation_error(CPUState
*env
)
1468 return !(env
->cr
[0] & CR0_PE_MASK
) ||
1469 ((env
->segs
[R_CS
].selector
& 3) != 3);
1472 #include "qemu-kvm-x86.c"