4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
15 #include <sys/types.h>
16 #include <sys/ioctl.h>
19 #include <linux/kvm.h>
21 #include "qemu-common.h"
26 #include "host-utils.h"
32 #ifdef CONFIG_KVM_PARA
33 #include <linux/kvm_para.h>
39 #define DPRINTF(fmt, ...) \
40 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
42 #define DPRINTF(fmt, ...) \
46 #define MSR_KVM_WALL_CLOCK 0x11
47 #define MSR_KVM_SYSTEM_TIME 0x12
50 #define BUS_MCEERR_AR 4
53 #define BUS_MCEERR_AO 5
56 #ifdef KVM_CAP_EXT_CPUID
58 static struct kvm_cpuid2
*try_get_cpuid(KVMState
*s
, int max
)
60 struct kvm_cpuid2
*cpuid
;
63 size
= sizeof(*cpuid
) + max
* sizeof(*cpuid
->entries
);
64 cpuid
= (struct kvm_cpuid2
*)qemu_mallocz(size
);
66 r
= kvm_ioctl(s
, KVM_GET_SUPPORTED_CPUID
, cpuid
);
67 if (r
== 0 && cpuid
->nent
>= max
) {
75 fprintf(stderr
, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
83 uint32_t kvm_arch_get_supported_cpuid(CPUState
*env
, uint32_t function
,
84 uint32_t index
, int reg
)
86 struct kvm_cpuid2
*cpuid
;
91 if (!kvm_check_extension(env
->kvm_state
, KVM_CAP_EXT_CPUID
)) {
96 while ((cpuid
= try_get_cpuid(env
->kvm_state
, max
)) == NULL
) {
100 for (i
= 0; i
< cpuid
->nent
; ++i
) {
101 if (cpuid
->entries
[i
].function
== function
&&
102 cpuid
->entries
[i
].index
== index
) {
105 ret
= cpuid
->entries
[i
].eax
;
108 ret
= cpuid
->entries
[i
].ebx
;
111 ret
= cpuid
->entries
[i
].ecx
;
114 ret
= cpuid
->entries
[i
].edx
;
117 /* KVM before 2.6.30 misreports the following features */
118 ret
|= CPUID_MTRR
| CPUID_PAT
| CPUID_MCE
| CPUID_MCA
;
121 /* On Intel, kvm returns cpuid according to the Intel spec,
122 * so add missing bits according to the AMD spec:
124 cpuid_1_edx
= kvm_arch_get_supported_cpuid(env
, 1, 0, R_EDX
);
125 ret
|= cpuid_1_edx
& 0x183f7ff;
140 uint32_t kvm_arch_get_supported_cpuid(CPUState
*env
, uint32_t function
,
141 uint32_t index
, int reg
)
148 #ifdef CONFIG_KVM_PARA
149 struct kvm_para_features
{
152 } para_features
[] = {
153 #ifdef KVM_CAP_CLOCKSOURCE
154 { KVM_CAP_CLOCKSOURCE
, KVM_FEATURE_CLOCKSOURCE
},
156 #ifdef KVM_CAP_NOP_IO_DELAY
157 { KVM_CAP_NOP_IO_DELAY
, KVM_FEATURE_NOP_IO_DELAY
},
159 #ifdef KVM_CAP_PV_MMU
160 { KVM_CAP_PV_MMU
, KVM_FEATURE_MMU_OP
},
165 static int get_para_features(CPUState
*env
)
169 for (i
= 0; i
< ARRAY_SIZE(para_features
) - 1; i
++) {
170 if (kvm_check_extension(env
->kvm_state
, para_features
[i
].cap
))
171 features
|= (1 << para_features
[i
].feature
);
179 static int kvm_get_mce_cap_supported(KVMState
*s
, uint64_t *mce_cap
,
184 r
= kvm_ioctl(s
, KVM_CHECK_EXTENSION
, KVM_CAP_MCE
);
187 return kvm_ioctl(s
, KVM_X86_GET_MCE_CAP_SUPPORTED
, mce_cap
);
192 static int kvm_setup_mce(CPUState
*env
, uint64_t *mcg_cap
)
194 return kvm_vcpu_ioctl(env
, KVM_X86_SETUP_MCE
, mcg_cap
);
197 static int kvm_set_mce(CPUState
*env
, struct kvm_x86_mce
*m
)
199 return kvm_vcpu_ioctl(env
, KVM_X86_SET_MCE
, m
);
202 static int kvm_get_msr(CPUState
*env
, struct kvm_msr_entry
*msrs
, int n
)
204 struct kvm_msrs
*kmsrs
= qemu_malloc(sizeof *kmsrs
+ n
* sizeof *msrs
);
208 memcpy(kmsrs
->entries
, msrs
, n
* sizeof *msrs
);
209 r
= kvm_vcpu_ioctl(env
, KVM_GET_MSRS
, kmsrs
);
210 memcpy(msrs
, kmsrs
->entries
, n
* sizeof *msrs
);
215 /* FIXME: kill this and kvm_get_msr, use env->mcg_status instead */
216 static int kvm_mce_in_exception(CPUState
*env
)
218 struct kvm_msr_entry msr_mcg_status
= {
219 .index
= MSR_MCG_STATUS
,
223 r
= kvm_get_msr(env
, &msr_mcg_status
, 1);
224 if (r
== -1 || r
== 0) {
227 return !!(msr_mcg_status
.data
& MCG_STATUS_MCIP
);
230 struct kvm_x86_mce_data
233 struct kvm_x86_mce
*mce
;
237 static void kvm_do_inject_x86_mce(void *_data
)
239 struct kvm_x86_mce_data
*data
= _data
;
242 /* If there is an MCE exception being processed, ignore this SRAO MCE */
243 if ((data
->env
->mcg_cap
& MCG_SER_P
) &&
244 !(data
->mce
->status
& MCI_STATUS_AR
)) {
245 r
= kvm_mce_in_exception(data
->env
);
247 fprintf(stderr
, "Failed to get MCE status\n");
253 r
= kvm_set_mce(data
->env
, data
->mce
);
255 perror("kvm_set_mce FAILED");
256 if (data
->abort_on_error
) {
263 void kvm_inject_x86_mce(CPUState
*cenv
, int bank
, uint64_t status
,
264 uint64_t mcg_status
, uint64_t addr
, uint64_t misc
,
268 struct kvm_x86_mce mce
= {
271 .mcg_status
= mcg_status
,
275 struct kvm_x86_mce_data data
= {
280 if (!cenv
->mcg_cap
) {
281 fprintf(stderr
, "MCE support is not enabled!\n");
285 run_on_cpu(cenv
, kvm_do_inject_x86_mce
, &data
);
292 int kvm_arch_init_vcpu(CPUState
*env
)
295 struct kvm_cpuid2 cpuid
;
296 struct kvm_cpuid_entry2 entries
[100];
297 } __attribute__((packed
)) cpuid_data
;
298 uint32_t limit
, i
, j
, cpuid_i
;
300 struct kvm_cpuid_entry2
*c
;
301 #ifdef KVM_CPUID_SIGNATURE
302 uint32_t signature
[3];
305 env
->mp_state
= KVM_MP_STATE_RUNNABLE
;
307 env
->cpuid_features
&= kvm_arch_get_supported_cpuid(env
, 1, 0, R_EDX
);
309 i
= env
->cpuid_ext_features
& CPUID_EXT_HYPERVISOR
;
310 env
->cpuid_ext_features
&= kvm_arch_get_supported_cpuid(env
, 1, 0, R_ECX
);
311 env
->cpuid_ext_features
|= i
;
313 env
->cpuid_ext2_features
&= kvm_arch_get_supported_cpuid(env
, 0x80000001,
315 env
->cpuid_ext3_features
&= kvm_arch_get_supported_cpuid(env
, 0x80000001,
317 env
->cpuid_svm_features
&= kvm_arch_get_supported_cpuid(env
, 0x8000000A,
323 #ifdef CONFIG_KVM_PARA
324 /* Paravirtualization CPUIDs */
325 memcpy(signature
, "KVMKVMKVM\0\0\0", 12);
326 c
= &cpuid_data
.entries
[cpuid_i
++];
327 memset(c
, 0, sizeof(*c
));
328 c
->function
= KVM_CPUID_SIGNATURE
;
330 c
->ebx
= signature
[0];
331 c
->ecx
= signature
[1];
332 c
->edx
= signature
[2];
334 c
= &cpuid_data
.entries
[cpuid_i
++];
335 memset(c
, 0, sizeof(*c
));
336 c
->function
= KVM_CPUID_FEATURES
;
337 c
->eax
= env
->cpuid_kvm_features
& get_para_features(env
);
340 cpu_x86_cpuid(env
, 0, 0, &limit
, &unused
, &unused
, &unused
);
342 for (i
= 0; i
<= limit
; i
++) {
343 c
= &cpuid_data
.entries
[cpuid_i
++];
347 /* Keep reading function 2 till all the input is received */
351 c
->flags
= KVM_CPUID_FLAG_STATEFUL_FUNC
|
352 KVM_CPUID_FLAG_STATE_READ_NEXT
;
353 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
354 times
= c
->eax
& 0xff;
356 for (j
= 1; j
< times
; ++j
) {
357 c
= &cpuid_data
.entries
[cpuid_i
++];
359 c
->flags
= KVM_CPUID_FLAG_STATEFUL_FUNC
;
360 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
369 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
371 cpu_x86_cpuid(env
, i
, j
, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
373 if (i
== 4 && c
->eax
== 0)
375 if (i
== 0xb && !(c
->ecx
& 0xff00))
377 if (i
== 0xd && c
->eax
== 0)
380 c
= &cpuid_data
.entries
[cpuid_i
++];
386 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
390 cpu_x86_cpuid(env
, 0x80000000, 0, &limit
, &unused
, &unused
, &unused
);
392 for (i
= 0x80000000; i
<= limit
; i
++) {
393 c
= &cpuid_data
.entries
[cpuid_i
++];
397 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
400 cpuid_data
.cpuid
.nent
= cpuid_i
;
403 if (((env
->cpuid_version
>> 8)&0xF) >= 6
404 && (env
->cpuid_features
&(CPUID_MCE
|CPUID_MCA
)) == (CPUID_MCE
|CPUID_MCA
)
405 && kvm_check_extension(env
->kvm_state
, KVM_CAP_MCE
) > 0) {
409 if (kvm_get_mce_cap_supported(env
->kvm_state
, &mcg_cap
, &banks
))
410 perror("kvm_get_mce_cap_supported FAILED");
412 if (banks
> MCE_BANKS_DEF
)
413 banks
= MCE_BANKS_DEF
;
414 mcg_cap
&= MCE_CAP_DEF
;
416 if (kvm_setup_mce(env
, &mcg_cap
))
417 perror("kvm_setup_mce FAILED");
419 env
->mcg_cap
= mcg_cap
;
424 return kvm_vcpu_ioctl(env
, KVM_SET_CPUID2
, &cpuid_data
);
427 void kvm_arch_reset_vcpu(CPUState
*env
)
429 env
->exception_injected
= -1;
430 env
->interrupt_injected
= -1;
431 env
->nmi_injected
= 0;
432 env
->nmi_pending
= 0;
433 if (kvm_irqchip_in_kernel()) {
434 env
->mp_state
= cpu_is_bsp(env
) ? KVM_MP_STATE_RUNNABLE
:
435 KVM_MP_STATE_UNINITIALIZED
;
437 env
->mp_state
= KVM_MP_STATE_RUNNABLE
;
442 int has_msr_hsave_pa
;
444 static void kvm_supported_msrs(CPUState
*env
)
446 static int kvm_supported_msrs
;
450 if (kvm_supported_msrs
== 0) {
451 struct kvm_msr_list msr_list
, *kvm_msr_list
;
453 kvm_supported_msrs
= -1;
455 /* Obtain MSR list from KVM. These are the MSRs that we must
458 ret
= kvm_ioctl(env
->kvm_state
, KVM_GET_MSR_INDEX_LIST
, &msr_list
);
459 if (ret
< 0 && ret
!= -E2BIG
) {
462 /* Old kernel modules had a bug and could write beyond the provided
463 memory. Allocate at least a safe amount of 1K. */
464 kvm_msr_list
= qemu_mallocz(MAX(1024, sizeof(msr_list
) +
466 sizeof(msr_list
.indices
[0])));
468 kvm_msr_list
->nmsrs
= msr_list
.nmsrs
;
469 ret
= kvm_ioctl(env
->kvm_state
, KVM_GET_MSR_INDEX_LIST
, kvm_msr_list
);
473 for (i
= 0; i
< kvm_msr_list
->nmsrs
; i
++) {
474 if (kvm_msr_list
->indices
[i
] == MSR_STAR
) {
478 if (kvm_msr_list
->indices
[i
] == MSR_VM_HSAVE_PA
) {
479 has_msr_hsave_pa
= 1;
491 static int kvm_has_msr_hsave_pa(CPUState
*env
)
493 kvm_supported_msrs(env
);
494 return has_msr_hsave_pa
;
497 static int kvm_has_msr_star(CPUState
*env
)
499 kvm_supported_msrs(env
);
503 static int kvm_init_identity_map_page(KVMState
*s
)
505 #ifdef KVM_CAP_SET_IDENTITY_MAP_ADDR
507 uint64_t addr
= 0xfffbc000;
509 if (!kvm_check_extension(s
, KVM_CAP_SET_IDENTITY_MAP_ADDR
)) {
513 ret
= kvm_vm_ioctl(s
, KVM_SET_IDENTITY_MAP_ADDR
, &addr
);
515 fprintf(stderr
, "kvm_set_identity_map_addr: %s\n", strerror(ret
));
522 int kvm_arch_init(KVMState
*s
, int smp_cpus
)
526 /* create vm86 tss. KVM uses vm86 mode to emulate 16-bit code
527 * directly. In order to use vm86 mode, a TSS is needed. Since this
528 * must be part of guest physical memory, we need to allocate it. Older
529 * versions of KVM just assumed that it would be at the end of physical
530 * memory but that doesn't work with more than 4GB of memory. We simply
531 * refuse to work with those older versions of KVM. */
532 ret
= kvm_ioctl(s
, KVM_CHECK_EXTENSION
, KVM_CAP_SET_TSS_ADDR
);
534 fprintf(stderr
, "kvm does not support KVM_CAP_SET_TSS_ADDR\n");
538 /* this address is 3 pages before the bios, and the bios should present
539 * as unavaible memory. FIXME, need to ensure the e820 map deals with
543 * Tell fw_cfg to notify the BIOS to reserve the range.
545 if (e820_add_entry(0xfffbc000, 0x4000, E820_RESERVED
) < 0) {
546 perror("e820_add_entry() table is full");
549 ret
= kvm_vm_ioctl(s
, KVM_SET_TSS_ADDR
, 0xfffbd000);
554 return kvm_init_identity_map_page(s
);
557 static void set_v8086_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
559 lhs
->selector
= rhs
->selector
;
560 lhs
->base
= rhs
->base
;
561 lhs
->limit
= rhs
->limit
;
573 static void set_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
575 unsigned flags
= rhs
->flags
;
576 lhs
->selector
= rhs
->selector
;
577 lhs
->base
= rhs
->base
;
578 lhs
->limit
= rhs
->limit
;
579 lhs
->type
= (flags
>> DESC_TYPE_SHIFT
) & 15;
580 lhs
->present
= (flags
& DESC_P_MASK
) != 0;
581 lhs
->dpl
= rhs
->selector
& 3;
582 lhs
->db
= (flags
>> DESC_B_SHIFT
) & 1;
583 lhs
->s
= (flags
& DESC_S_MASK
) != 0;
584 lhs
->l
= (flags
>> DESC_L_SHIFT
) & 1;
585 lhs
->g
= (flags
& DESC_G_MASK
) != 0;
586 lhs
->avl
= (flags
& DESC_AVL_MASK
) != 0;
590 static void get_seg(SegmentCache
*lhs
, const struct kvm_segment
*rhs
)
592 lhs
->selector
= rhs
->selector
;
593 lhs
->base
= rhs
->base
;
594 lhs
->limit
= rhs
->limit
;
596 (rhs
->type
<< DESC_TYPE_SHIFT
)
597 | (rhs
->present
* DESC_P_MASK
)
598 | (rhs
->dpl
<< DESC_DPL_SHIFT
)
599 | (rhs
->db
<< DESC_B_SHIFT
)
600 | (rhs
->s
* DESC_S_MASK
)
601 | (rhs
->l
<< DESC_L_SHIFT
)
602 | (rhs
->g
* DESC_G_MASK
)
603 | (rhs
->avl
* DESC_AVL_MASK
);
606 static void kvm_getput_reg(__u64
*kvm_reg
, target_ulong
*qemu_reg
, int set
)
609 *kvm_reg
= *qemu_reg
;
611 *qemu_reg
= *kvm_reg
;
614 static int kvm_getput_regs(CPUState
*env
, int set
)
616 struct kvm_regs regs
;
620 ret
= kvm_vcpu_ioctl(env
, KVM_GET_REGS
, ®s
);
625 kvm_getput_reg(®s
.rax
, &env
->regs
[R_EAX
], set
);
626 kvm_getput_reg(®s
.rbx
, &env
->regs
[R_EBX
], set
);
627 kvm_getput_reg(®s
.rcx
, &env
->regs
[R_ECX
], set
);
628 kvm_getput_reg(®s
.rdx
, &env
->regs
[R_EDX
], set
);
629 kvm_getput_reg(®s
.rsi
, &env
->regs
[R_ESI
], set
);
630 kvm_getput_reg(®s
.rdi
, &env
->regs
[R_EDI
], set
);
631 kvm_getput_reg(®s
.rsp
, &env
->regs
[R_ESP
], set
);
632 kvm_getput_reg(®s
.rbp
, &env
->regs
[R_EBP
], set
);
634 kvm_getput_reg(®s
.r8
, &env
->regs
[8], set
);
635 kvm_getput_reg(®s
.r9
, &env
->regs
[9], set
);
636 kvm_getput_reg(®s
.r10
, &env
->regs
[10], set
);
637 kvm_getput_reg(®s
.r11
, &env
->regs
[11], set
);
638 kvm_getput_reg(®s
.r12
, &env
->regs
[12], set
);
639 kvm_getput_reg(®s
.r13
, &env
->regs
[13], set
);
640 kvm_getput_reg(®s
.r14
, &env
->regs
[14], set
);
641 kvm_getput_reg(®s
.r15
, &env
->regs
[15], set
);
644 kvm_getput_reg(®s
.rflags
, &env
->eflags
, set
);
645 kvm_getput_reg(®s
.rip
, &env
->eip
, set
);
648 ret
= kvm_vcpu_ioctl(env
, KVM_SET_REGS
, ®s
);
653 static int kvm_put_fpu(CPUState
*env
)
658 memset(&fpu
, 0, sizeof fpu
);
659 fpu
.fsw
= env
->fpus
& ~(7 << 11);
660 fpu
.fsw
|= (env
->fpstt
& 7) << 11;
662 for (i
= 0; i
< 8; ++i
)
663 fpu
.ftwx
|= (!env
->fptags
[i
]) << i
;
664 memcpy(fpu
.fpr
, env
->fpregs
, sizeof env
->fpregs
);
665 memcpy(fpu
.xmm
, env
->xmm_regs
, sizeof env
->xmm_regs
);
666 fpu
.mxcsr
= env
->mxcsr
;
668 return kvm_vcpu_ioctl(env
, KVM_SET_FPU
, &fpu
);
672 #define XSAVE_CWD_RIP 2
673 #define XSAVE_CWD_RDP 4
674 #define XSAVE_MXCSR 6
675 #define XSAVE_ST_SPACE 8
676 #define XSAVE_XMM_SPACE 40
677 #define XSAVE_XSTATE_BV 128
678 #define XSAVE_YMMH_SPACE 144
681 static int kvm_put_xsave(CPUState
*env
)
685 struct kvm_xsave
* xsave
;
686 uint16_t cwd
, swd
, twd
, fop
;
688 if (!kvm_has_xsave())
689 return kvm_put_fpu(env
);
691 xsave
= qemu_memalign(4096, sizeof(struct kvm_xsave
));
692 memset(xsave
, 0, sizeof(struct kvm_xsave
));
693 cwd
= swd
= twd
= fop
= 0;
694 swd
= env
->fpus
& ~(7 << 11);
695 swd
|= (env
->fpstt
& 7) << 11;
697 for (i
= 0; i
< 8; ++i
)
698 twd
|= (!env
->fptags
[i
]) << i
;
699 xsave
->region
[0] = (uint32_t)(swd
<< 16) + cwd
;
700 xsave
->region
[1] = (uint32_t)(fop
<< 16) + twd
;
701 memcpy(&xsave
->region
[XSAVE_ST_SPACE
], env
->fpregs
,
703 memcpy(&xsave
->region
[XSAVE_XMM_SPACE
], env
->xmm_regs
,
704 sizeof env
->xmm_regs
);
705 xsave
->region
[XSAVE_MXCSR
] = env
->mxcsr
;
706 *(uint64_t *)&xsave
->region
[XSAVE_XSTATE_BV
] = env
->xstate_bv
;
707 memcpy(&xsave
->region
[XSAVE_YMMH_SPACE
], env
->ymmh_regs
,
708 sizeof env
->ymmh_regs
);
709 r
= kvm_vcpu_ioctl(env
, KVM_SET_XSAVE
, xsave
);
713 return kvm_put_fpu(env
);
717 static int kvm_put_xcrs(CPUState
*env
)
720 struct kvm_xcrs xcrs
;
727 xcrs
.xcrs
[0].xcr
= 0;
728 xcrs
.xcrs
[0].value
= env
->xcr0
;
729 return kvm_vcpu_ioctl(env
, KVM_SET_XCRS
, &xcrs
);
735 static int kvm_put_sregs(CPUState
*env
)
737 struct kvm_sregs sregs
;
739 memset(sregs
.interrupt_bitmap
, 0, sizeof(sregs
.interrupt_bitmap
));
740 if (env
->interrupt_injected
>= 0) {
741 sregs
.interrupt_bitmap
[env
->interrupt_injected
/ 64] |=
742 (uint64_t)1 << (env
->interrupt_injected
% 64);
745 if ((env
->eflags
& VM_MASK
)) {
746 set_v8086_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
747 set_v8086_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
748 set_v8086_seg(&sregs
.es
, &env
->segs
[R_ES
]);
749 set_v8086_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
750 set_v8086_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
751 set_v8086_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
753 set_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
754 set_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
755 set_seg(&sregs
.es
, &env
->segs
[R_ES
]);
756 set_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
757 set_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
758 set_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
760 if (env
->cr
[0] & CR0_PE_MASK
) {
761 /* force ss cpl to cs cpl */
762 sregs
.ss
.selector
= (sregs
.ss
.selector
& ~3) |
763 (sregs
.cs
.selector
& 3);
764 sregs
.ss
.dpl
= sregs
.ss
.selector
& 3;
768 set_seg(&sregs
.tr
, &env
->tr
);
769 set_seg(&sregs
.ldt
, &env
->ldt
);
771 sregs
.idt
.limit
= env
->idt
.limit
;
772 sregs
.idt
.base
= env
->idt
.base
;
773 sregs
.gdt
.limit
= env
->gdt
.limit
;
774 sregs
.gdt
.base
= env
->gdt
.base
;
776 sregs
.cr0
= env
->cr
[0];
777 sregs
.cr2
= env
->cr
[2];
778 sregs
.cr3
= env
->cr
[3];
779 sregs
.cr4
= env
->cr
[4];
781 sregs
.cr8
= cpu_get_apic_tpr(env
->apic_state
);
782 sregs
.apic_base
= cpu_get_apic_base(env
->apic_state
);
784 sregs
.efer
= env
->efer
;
786 return kvm_vcpu_ioctl(env
, KVM_SET_SREGS
, &sregs
);
789 static void kvm_msr_entry_set(struct kvm_msr_entry
*entry
,
790 uint32_t index
, uint64_t value
)
792 entry
->index
= index
;
796 static int kvm_put_msrs(CPUState
*env
, int level
)
799 struct kvm_msrs info
;
800 struct kvm_msr_entry entries
[100];
802 struct kvm_msr_entry
*msrs
= msr_data
.entries
;
805 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_CS
, env
->sysenter_cs
);
806 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_ESP
, env
->sysenter_esp
);
807 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_EIP
, env
->sysenter_eip
);
808 if (kvm_has_msr_star(env
))
809 kvm_msr_entry_set(&msrs
[n
++], MSR_STAR
, env
->star
);
810 if (kvm_has_msr_hsave_pa(env
))
811 kvm_msr_entry_set(&msrs
[n
++], MSR_VM_HSAVE_PA
, env
->vm_hsave
);
813 /* FIXME if lm capable */
814 kvm_msr_entry_set(&msrs
[n
++], MSR_CSTAR
, env
->cstar
);
815 kvm_msr_entry_set(&msrs
[n
++], MSR_KERNELGSBASE
, env
->kernelgsbase
);
816 kvm_msr_entry_set(&msrs
[n
++], MSR_FMASK
, env
->fmask
);
817 kvm_msr_entry_set(&msrs
[n
++], MSR_LSTAR
, env
->lstar
);
819 if (level
== KVM_PUT_FULL_STATE
) {
820 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_TSC
, env
->tsc
);
821 kvm_msr_entry_set(&msrs
[n
++], MSR_KVM_SYSTEM_TIME
,
822 env
->system_time_msr
);
823 kvm_msr_entry_set(&msrs
[n
++], MSR_KVM_WALL_CLOCK
, env
->wall_clock_msr
);
828 if (level
== KVM_PUT_RESET_STATE
)
829 kvm_msr_entry_set(&msrs
[n
++], MSR_MCG_STATUS
, env
->mcg_status
);
830 else if (level
== KVM_PUT_FULL_STATE
) {
831 kvm_msr_entry_set(&msrs
[n
++], MSR_MCG_STATUS
, env
->mcg_status
);
832 kvm_msr_entry_set(&msrs
[n
++], MSR_MCG_CTL
, env
->mcg_ctl
);
833 for (i
= 0; i
< (env
->mcg_cap
& 0xff) * 4; i
++)
834 kvm_msr_entry_set(&msrs
[n
++], MSR_MC0_CTL
+ i
, env
->mce_banks
[i
]);
839 msr_data
.info
.nmsrs
= n
;
841 return kvm_vcpu_ioctl(env
, KVM_SET_MSRS
, &msr_data
);
846 static int kvm_get_fpu(CPUState
*env
)
851 ret
= kvm_vcpu_ioctl(env
, KVM_GET_FPU
, &fpu
);
855 env
->fpstt
= (fpu
.fsw
>> 11) & 7;
858 for (i
= 0; i
< 8; ++i
)
859 env
->fptags
[i
] = !((fpu
.ftwx
>> i
) & 1);
860 memcpy(env
->fpregs
, fpu
.fpr
, sizeof env
->fpregs
);
861 memcpy(env
->xmm_regs
, fpu
.xmm
, sizeof env
->xmm_regs
);
862 env
->mxcsr
= fpu
.mxcsr
;
867 static int kvm_get_xsave(CPUState
*env
)
870 struct kvm_xsave
* xsave
;
872 uint16_t cwd
, swd
, twd
, fop
;
874 if (!kvm_has_xsave())
875 return kvm_get_fpu(env
);
877 xsave
= qemu_memalign(4096, sizeof(struct kvm_xsave
));
878 ret
= kvm_vcpu_ioctl(env
, KVM_GET_XSAVE
, xsave
);
884 cwd
= (uint16_t)xsave
->region
[0];
885 swd
= (uint16_t)(xsave
->region
[0] >> 16);
886 twd
= (uint16_t)xsave
->region
[1];
887 fop
= (uint16_t)(xsave
->region
[1] >> 16);
888 env
->fpstt
= (swd
>> 11) & 7;
891 for (i
= 0; i
< 8; ++i
)
892 env
->fptags
[i
] = !((twd
>> i
) & 1);
893 env
->mxcsr
= xsave
->region
[XSAVE_MXCSR
];
894 memcpy(env
->fpregs
, &xsave
->region
[XSAVE_ST_SPACE
],
896 memcpy(env
->xmm_regs
, &xsave
->region
[XSAVE_XMM_SPACE
],
897 sizeof env
->xmm_regs
);
898 env
->xstate_bv
= *(uint64_t *)&xsave
->region
[XSAVE_XSTATE_BV
];
899 memcpy(env
->ymmh_regs
, &xsave
->region
[XSAVE_YMMH_SPACE
],
900 sizeof env
->ymmh_regs
);
904 return kvm_get_fpu(env
);
908 static int kvm_get_xcrs(CPUState
*env
)
912 struct kvm_xcrs xcrs
;
917 ret
= kvm_vcpu_ioctl(env
, KVM_GET_XCRS
, &xcrs
);
921 for (i
= 0; i
< xcrs
.nr_xcrs
; i
++)
922 /* Only support xcr0 now */
923 if (xcrs
.xcrs
[0].xcr
== 0) {
924 env
->xcr0
= xcrs
.xcrs
[0].value
;
933 static int kvm_get_sregs(CPUState
*env
)
935 struct kvm_sregs sregs
;
939 ret
= kvm_vcpu_ioctl(env
, KVM_GET_SREGS
, &sregs
);
943 /* There can only be one pending IRQ set in the bitmap at a time, so try
944 to find it and save its number instead (-1 for none). */
945 env
->interrupt_injected
= -1;
946 for (i
= 0; i
< ARRAY_SIZE(sregs
.interrupt_bitmap
); i
++) {
947 if (sregs
.interrupt_bitmap
[i
]) {
948 bit
= ctz64(sregs
.interrupt_bitmap
[i
]);
949 env
->interrupt_injected
= i
* 64 + bit
;
954 get_seg(&env
->segs
[R_CS
], &sregs
.cs
);
955 get_seg(&env
->segs
[R_DS
], &sregs
.ds
);
956 get_seg(&env
->segs
[R_ES
], &sregs
.es
);
957 get_seg(&env
->segs
[R_FS
], &sregs
.fs
);
958 get_seg(&env
->segs
[R_GS
], &sregs
.gs
);
959 get_seg(&env
->segs
[R_SS
], &sregs
.ss
);
961 get_seg(&env
->tr
, &sregs
.tr
);
962 get_seg(&env
->ldt
, &sregs
.ldt
);
964 env
->idt
.limit
= sregs
.idt
.limit
;
965 env
->idt
.base
= sregs
.idt
.base
;
966 env
->gdt
.limit
= sregs
.gdt
.limit
;
967 env
->gdt
.base
= sregs
.gdt
.base
;
969 env
->cr
[0] = sregs
.cr0
;
970 env
->cr
[2] = sregs
.cr2
;
971 env
->cr
[3] = sregs
.cr3
;
972 env
->cr
[4] = sregs
.cr4
;
974 cpu_set_apic_base(env
->apic_state
, sregs
.apic_base
);
976 env
->efer
= sregs
.efer
;
977 //cpu_set_apic_tpr(env->apic_state, sregs.cr8);
979 #define HFLAG_COPY_MASK ~( \
980 HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
981 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
982 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
983 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
987 hflags
= (env
->segs
[R_CS
].flags
>> DESC_DPL_SHIFT
) & HF_CPL_MASK
;
988 hflags
|= (env
->cr
[0] & CR0_PE_MASK
) << (HF_PE_SHIFT
- CR0_PE_SHIFT
);
989 hflags
|= (env
->cr
[0] << (HF_MP_SHIFT
- CR0_MP_SHIFT
)) &
990 (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
);
991 hflags
|= (env
->eflags
& (HF_TF_MASK
| HF_VM_MASK
| HF_IOPL_MASK
));
992 hflags
|= (env
->cr
[4] & CR4_OSFXSR_MASK
) <<
993 (HF_OSFXSR_SHIFT
- CR4_OSFXSR_SHIFT
);
995 if (env
->efer
& MSR_EFER_LMA
) {
996 hflags
|= HF_LMA_MASK
;
999 if ((hflags
& HF_LMA_MASK
) && (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
1000 hflags
|= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
1002 hflags
|= (env
->segs
[R_CS
].flags
& DESC_B_MASK
) >>
1003 (DESC_B_SHIFT
- HF_CS32_SHIFT
);
1004 hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
) >>
1005 (DESC_B_SHIFT
- HF_SS32_SHIFT
);
1006 if (!(env
->cr
[0] & CR0_PE_MASK
) ||
1007 (env
->eflags
& VM_MASK
) ||
1008 !(hflags
& HF_CS32_MASK
)) {
1009 hflags
|= HF_ADDSEG_MASK
;
1011 hflags
|= ((env
->segs
[R_DS
].base
|
1012 env
->segs
[R_ES
].base
|
1013 env
->segs
[R_SS
].base
) != 0) <<
1017 env
->hflags
= (env
->hflags
& HFLAG_COPY_MASK
) | hflags
;
1022 static int kvm_get_msrs(CPUState
*env
)
1025 struct kvm_msrs info
;
1026 struct kvm_msr_entry entries
[100];
1028 struct kvm_msr_entry
*msrs
= msr_data
.entries
;
1032 msrs
[n
++].index
= MSR_IA32_SYSENTER_CS
;
1033 msrs
[n
++].index
= MSR_IA32_SYSENTER_ESP
;
1034 msrs
[n
++].index
= MSR_IA32_SYSENTER_EIP
;
1035 if (kvm_has_msr_star(env
))
1036 msrs
[n
++].index
= MSR_STAR
;
1037 if (kvm_has_msr_hsave_pa(env
))
1038 msrs
[n
++].index
= MSR_VM_HSAVE_PA
;
1039 msrs
[n
++].index
= MSR_IA32_TSC
;
1040 #ifdef TARGET_X86_64
1041 /* FIXME lm_capable_kernel */
1042 msrs
[n
++].index
= MSR_CSTAR
;
1043 msrs
[n
++].index
= MSR_KERNELGSBASE
;
1044 msrs
[n
++].index
= MSR_FMASK
;
1045 msrs
[n
++].index
= MSR_LSTAR
;
1047 msrs
[n
++].index
= MSR_KVM_SYSTEM_TIME
;
1048 msrs
[n
++].index
= MSR_KVM_WALL_CLOCK
;
1052 msrs
[n
++].index
= MSR_MCG_STATUS
;
1053 msrs
[n
++].index
= MSR_MCG_CTL
;
1054 for (i
= 0; i
< (env
->mcg_cap
& 0xff) * 4; i
++)
1055 msrs
[n
++].index
= MSR_MC0_CTL
+ i
;
1059 msr_data
.info
.nmsrs
= n
;
1060 ret
= kvm_vcpu_ioctl(env
, KVM_GET_MSRS
, &msr_data
);
1064 for (i
= 0; i
< ret
; i
++) {
1065 switch (msrs
[i
].index
) {
1066 case MSR_IA32_SYSENTER_CS
:
1067 env
->sysenter_cs
= msrs
[i
].data
;
1069 case MSR_IA32_SYSENTER_ESP
:
1070 env
->sysenter_esp
= msrs
[i
].data
;
1072 case MSR_IA32_SYSENTER_EIP
:
1073 env
->sysenter_eip
= msrs
[i
].data
;
1076 env
->star
= msrs
[i
].data
;
1078 #ifdef TARGET_X86_64
1080 env
->cstar
= msrs
[i
].data
;
1082 case MSR_KERNELGSBASE
:
1083 env
->kernelgsbase
= msrs
[i
].data
;
1086 env
->fmask
= msrs
[i
].data
;
1089 env
->lstar
= msrs
[i
].data
;
1093 env
->tsc
= msrs
[i
].data
;
1095 case MSR_VM_HSAVE_PA
:
1096 env
->vm_hsave
= msrs
[i
].data
;
1098 case MSR_KVM_SYSTEM_TIME
:
1099 env
->system_time_msr
= msrs
[i
].data
;
1101 case MSR_KVM_WALL_CLOCK
:
1102 env
->wall_clock_msr
= msrs
[i
].data
;
1105 case MSR_MCG_STATUS
:
1106 env
->mcg_status
= msrs
[i
].data
;
1109 env
->mcg_ctl
= msrs
[i
].data
;
1114 if (msrs
[i
].index
>= MSR_MC0_CTL
&&
1115 msrs
[i
].index
< MSR_MC0_CTL
+ (env
->mcg_cap
& 0xff) * 4) {
1116 env
->mce_banks
[msrs
[i
].index
- MSR_MC0_CTL
] = msrs
[i
].data
;
1126 static int kvm_put_mp_state(CPUState
*env
)
1128 struct kvm_mp_state mp_state
= { .mp_state
= env
->mp_state
};
1130 return kvm_vcpu_ioctl(env
, KVM_SET_MP_STATE
, &mp_state
);
1133 static int kvm_get_mp_state(CPUState
*env
)
1135 struct kvm_mp_state mp_state
;
1138 ret
= kvm_vcpu_ioctl(env
, KVM_GET_MP_STATE
, &mp_state
);
1142 env
->mp_state
= mp_state
.mp_state
;
1146 static int kvm_put_vcpu_events(CPUState
*env
, int level
)
1148 #ifdef KVM_CAP_VCPU_EVENTS
1149 struct kvm_vcpu_events events
;
1151 if (!kvm_has_vcpu_events()) {
1155 events
.exception
.injected
= (env
->exception_injected
>= 0);
1156 events
.exception
.nr
= env
->exception_injected
;
1157 events
.exception
.has_error_code
= env
->has_error_code
;
1158 events
.exception
.error_code
= env
->error_code
;
1160 events
.interrupt
.injected
= (env
->interrupt_injected
>= 0);
1161 events
.interrupt
.nr
= env
->interrupt_injected
;
1162 events
.interrupt
.soft
= env
->soft_interrupt
;
1164 events
.nmi
.injected
= env
->nmi_injected
;
1165 events
.nmi
.pending
= env
->nmi_pending
;
1166 events
.nmi
.masked
= !!(env
->hflags2
& HF2_NMI_MASK
);
1168 events
.sipi_vector
= env
->sipi_vector
;
1171 if (level
>= KVM_PUT_RESET_STATE
) {
1173 KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SIPI_VECTOR
;
1176 return kvm_vcpu_ioctl(env
, KVM_SET_VCPU_EVENTS
, &events
);
1182 static int kvm_get_vcpu_events(CPUState
*env
)
1184 #ifdef KVM_CAP_VCPU_EVENTS
1185 struct kvm_vcpu_events events
;
1188 if (!kvm_has_vcpu_events()) {
1192 ret
= kvm_vcpu_ioctl(env
, KVM_GET_VCPU_EVENTS
, &events
);
1196 env
->exception_injected
=
1197 events
.exception
.injected
? events
.exception
.nr
: -1;
1198 env
->has_error_code
= events
.exception
.has_error_code
;
1199 env
->error_code
= events
.exception
.error_code
;
1201 env
->interrupt_injected
=
1202 events
.interrupt
.injected
? events
.interrupt
.nr
: -1;
1203 env
->soft_interrupt
= events
.interrupt
.soft
;
1205 env
->nmi_injected
= events
.nmi
.injected
;
1206 env
->nmi_pending
= events
.nmi
.pending
;
1207 if (events
.nmi
.masked
) {
1208 env
->hflags2
|= HF2_NMI_MASK
;
1210 env
->hflags2
&= ~HF2_NMI_MASK
;
1213 env
->sipi_vector
= events
.sipi_vector
;
1219 static int kvm_guest_debug_workarounds(CPUState
*env
)
1222 #ifdef KVM_CAP_SET_GUEST_DEBUG
1223 unsigned long reinject_trap
= 0;
1225 if (!kvm_has_vcpu_events()) {
1226 if (env
->exception_injected
== 1) {
1227 reinject_trap
= KVM_GUESTDBG_INJECT_DB
;
1228 } else if (env
->exception_injected
== 3) {
1229 reinject_trap
= KVM_GUESTDBG_INJECT_BP
;
1231 env
->exception_injected
= -1;
1235 * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
1236 * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
1237 * by updating the debug state once again if single-stepping is on.
1238 * Another reason to call kvm_update_guest_debug here is a pending debug
1239 * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
1240 * reinject them via SET_GUEST_DEBUG.
1242 if (reinject_trap
||
1243 (!kvm_has_robust_singlestep() && env
->singlestep_enabled
)) {
1244 ret
= kvm_update_guest_debug(env
, reinject_trap
);
1246 #endif /* KVM_CAP_SET_GUEST_DEBUG */
1250 static int kvm_put_debugregs(CPUState
*env
)
1252 #ifdef KVM_CAP_DEBUGREGS
1253 struct kvm_debugregs dbgregs
;
1256 if (!kvm_has_debugregs()) {
1260 for (i
= 0; i
< 4; i
++) {
1261 dbgregs
.db
[i
] = env
->dr
[i
];
1263 dbgregs
.dr6
= env
->dr
[6];
1264 dbgregs
.dr7
= env
->dr
[7];
1267 return kvm_vcpu_ioctl(env
, KVM_SET_DEBUGREGS
, &dbgregs
);
1273 static int kvm_get_debugregs(CPUState
*env
)
1275 #ifdef KVM_CAP_DEBUGREGS
1276 struct kvm_debugregs dbgregs
;
1279 if (!kvm_has_debugregs()) {
1283 ret
= kvm_vcpu_ioctl(env
, KVM_GET_DEBUGREGS
, &dbgregs
);
1287 for (i
= 0; i
< 4; i
++) {
1288 env
->dr
[i
] = dbgregs
.db
[i
];
1290 env
->dr
[4] = env
->dr
[6] = dbgregs
.dr6
;
1291 env
->dr
[5] = env
->dr
[7] = dbgregs
.dr7
;
1297 int kvm_arch_put_registers(CPUState
*env
, int level
)
1301 assert(cpu_is_stopped(env
) || qemu_cpu_self(env
));
1303 ret
= kvm_getput_regs(env
, 1);
1307 ret
= kvm_put_xsave(env
);
1311 ret
= kvm_put_xcrs(env
);
1315 ret
= kvm_put_sregs(env
);
1319 ret
= kvm_put_msrs(env
, level
);
1323 if (level
>= KVM_PUT_RESET_STATE
) {
1324 ret
= kvm_put_mp_state(env
);
1329 ret
= kvm_put_vcpu_events(env
, level
);
1334 ret
= kvm_guest_debug_workarounds(env
);
1338 ret
= kvm_put_debugregs(env
);
1345 int kvm_arch_get_registers(CPUState
*env
)
1349 assert(cpu_is_stopped(env
) || qemu_cpu_self(env
));
1351 ret
= kvm_getput_regs(env
, 0);
1355 ret
= kvm_get_xsave(env
);
1359 ret
= kvm_get_xcrs(env
);
1363 ret
= kvm_get_sregs(env
);
1367 ret
= kvm_get_msrs(env
);
1371 ret
= kvm_get_mp_state(env
);
1375 ret
= kvm_get_vcpu_events(env
);
1379 ret
= kvm_get_debugregs(env
);
1386 int kvm_arch_pre_run(CPUState
*env
, struct kvm_run
*run
)
1388 /* Try to inject an interrupt if the guest can accept it */
1389 if (run
->ready_for_interrupt_injection
&&
1390 (env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
1391 (env
->eflags
& IF_MASK
)) {
1394 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
1395 irq
= cpu_get_pic_interrupt(env
);
1397 struct kvm_interrupt intr
;
1400 DPRINTF("injected interrupt %d\n", irq
);
1401 kvm_vcpu_ioctl(env
, KVM_INTERRUPT
, &intr
);
1405 /* If we have an interrupt but the guest is not ready to receive an
1406 * interrupt, request an interrupt window exit. This will
1407 * cause a return to userspace as soon as the guest is ready to
1408 * receive interrupts. */
1409 if ((env
->interrupt_request
& CPU_INTERRUPT_HARD
))
1410 run
->request_interrupt_window
= 1;
1412 run
->request_interrupt_window
= 0;
1414 DPRINTF("setting tpr\n");
1415 run
->cr8
= cpu_get_apic_tpr(env
->apic_state
);
1420 int kvm_arch_post_run(CPUState
*env
, struct kvm_run
*run
)
1423 env
->eflags
|= IF_MASK
;
1425 env
->eflags
&= ~IF_MASK
;
1427 cpu_set_apic_tpr(env
->apic_state
, run
->cr8
);
1428 cpu_set_apic_base(env
->apic_state
, run
->apic_base
);
1433 int kvm_arch_process_irqchip_events(CPUState
*env
)
1435 if (env
->interrupt_request
& CPU_INTERRUPT_INIT
) {
1436 kvm_cpu_synchronize_state(env
);
1438 env
->exception_index
= EXCP_HALTED
;
1441 if (env
->interrupt_request
& CPU_INTERRUPT_SIPI
) {
1442 kvm_cpu_synchronize_state(env
);
1449 static int kvm_handle_halt(CPUState
*env
)
1451 if (!((env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
1452 (env
->eflags
& IF_MASK
)) &&
1453 !(env
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
1455 env
->exception_index
= EXCP_HLT
;
1462 int kvm_arch_handle_exit(CPUState
*env
, struct kvm_run
*run
)
1466 switch (run
->exit_reason
) {
1468 DPRINTF("handle_hlt\n");
1469 ret
= kvm_handle_halt(env
);
1476 #ifdef KVM_CAP_SET_GUEST_DEBUG
1477 int kvm_arch_insert_sw_breakpoint(CPUState
*env
, struct kvm_sw_breakpoint
*bp
)
1479 static const uint8_t int3
= 0xcc;
1481 if (cpu_memory_rw_debug(env
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 0) ||
1482 cpu_memory_rw_debug(env
, bp
->pc
, (uint8_t *)&int3
, 1, 1))
1487 int kvm_arch_remove_sw_breakpoint(CPUState
*env
, struct kvm_sw_breakpoint
*bp
)
1491 if (cpu_memory_rw_debug(env
, bp
->pc
, &int3
, 1, 0) || int3
!= 0xcc ||
1492 cpu_memory_rw_debug(env
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 1))
1503 static int nb_hw_breakpoint
;
1505 static int find_hw_breakpoint(target_ulong addr
, int len
, int type
)
1509 for (n
= 0; n
< nb_hw_breakpoint
; n
++)
1510 if (hw_breakpoint
[n
].addr
== addr
&& hw_breakpoint
[n
].type
== type
&&
1511 (hw_breakpoint
[n
].len
== len
|| len
== -1))
1516 int kvm_arch_insert_hw_breakpoint(target_ulong addr
,
1517 target_ulong len
, int type
)
1520 case GDB_BREAKPOINT_HW
:
1523 case GDB_WATCHPOINT_WRITE
:
1524 case GDB_WATCHPOINT_ACCESS
:
1531 if (addr
& (len
- 1))
1542 if (nb_hw_breakpoint
== 4)
1545 if (find_hw_breakpoint(addr
, len
, type
) >= 0)
1548 hw_breakpoint
[nb_hw_breakpoint
].addr
= addr
;
1549 hw_breakpoint
[nb_hw_breakpoint
].len
= len
;
1550 hw_breakpoint
[nb_hw_breakpoint
].type
= type
;
1556 int kvm_arch_remove_hw_breakpoint(target_ulong addr
,
1557 target_ulong len
, int type
)
1561 n
= find_hw_breakpoint(addr
, (type
== GDB_BREAKPOINT_HW
) ? 1 : len
, type
);
1566 hw_breakpoint
[n
] = hw_breakpoint
[nb_hw_breakpoint
];
1571 void kvm_arch_remove_all_hw_breakpoints(void)
1573 nb_hw_breakpoint
= 0;
1576 static CPUWatchpoint hw_watchpoint
;
1578 int kvm_arch_debug(struct kvm_debug_exit_arch
*arch_info
)
1583 if (arch_info
->exception
== 1) {
1584 if (arch_info
->dr6
& (1 << 14)) {
1585 if (cpu_single_env
->singlestep_enabled
)
1588 for (n
= 0; n
< 4; n
++)
1589 if (arch_info
->dr6
& (1 << n
))
1590 switch ((arch_info
->dr7
>> (16 + n
*4)) & 0x3) {
1596 cpu_single_env
->watchpoint_hit
= &hw_watchpoint
;
1597 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
1598 hw_watchpoint
.flags
= BP_MEM_WRITE
;
1602 cpu_single_env
->watchpoint_hit
= &hw_watchpoint
;
1603 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
1604 hw_watchpoint
.flags
= BP_MEM_ACCESS
;
1608 } else if (kvm_find_sw_breakpoint(cpu_single_env
, arch_info
->pc
))
1612 cpu_synchronize_state(cpu_single_env
);
1613 assert(cpu_single_env
->exception_injected
== -1);
1615 cpu_single_env
->exception_injected
= arch_info
->exception
;
1616 cpu_single_env
->has_error_code
= 0;
1622 void kvm_arch_update_guest_debug(CPUState
*env
, struct kvm_guest_debug
*dbg
)
1624 const uint8_t type_code
[] = {
1625 [GDB_BREAKPOINT_HW
] = 0x0,
1626 [GDB_WATCHPOINT_WRITE
] = 0x1,
1627 [GDB_WATCHPOINT_ACCESS
] = 0x3
1629 const uint8_t len_code
[] = {
1630 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
1634 if (kvm_sw_breakpoints_active(env
))
1635 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_SW_BP
;
1637 if (nb_hw_breakpoint
> 0) {
1638 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_HW_BP
;
1639 dbg
->arch
.debugreg
[7] = 0x0600;
1640 for (n
= 0; n
< nb_hw_breakpoint
; n
++) {
1641 dbg
->arch
.debugreg
[n
] = hw_breakpoint
[n
].addr
;
1642 dbg
->arch
.debugreg
[7] |= (2 << (n
* 2)) |
1643 (type_code
[hw_breakpoint
[n
].type
] << (16 + n
*4)) |
1644 (len_code
[hw_breakpoint
[n
].len
] << (18 + n
*4));
1647 /* Legal xcr0 for loading */
1650 #endif /* KVM_CAP_SET_GUEST_DEBUG */
1652 bool kvm_arch_stop_on_emulation_error(CPUState
*env
)
1654 return !(env
->cr
[0] & CR0_PE_MASK
) ||
1655 ((env
->segs
[R_CS
].selector
& 3) != 3);
1658 static void hardware_memory_error(void)
1660 fprintf(stderr
, "Hardware memory error!\n");
1665 static void kvm_mce_broadcast_rest(CPUState
*env
)
1668 int family
, model
, cpuver
= env
->cpuid_version
;
1670 family
= (cpuver
>> 8) & 0xf;
1671 model
= ((cpuver
>> 12) & 0xf0) + ((cpuver
>> 4) & 0xf);
1673 /* Broadcast MCA signal for processor version 06H_EH and above */
1674 if ((family
== 6 && model
>= 14) || family
> 6) {
1675 for (cenv
= first_cpu
; cenv
!= NULL
; cenv
= cenv
->next_cpu
) {
1679 kvm_inject_x86_mce(cenv
, 1, MCI_STATUS_VAL
| MCI_STATUS_UC
,
1680 MCG_STATUS_MCIP
| MCG_STATUS_RIPV
, 0, 0, 1);
1686 int kvm_on_sigbus_vcpu(CPUState
*env
, int code
, void *addr
)
1688 #if defined(KVM_CAP_MCE)
1689 struct kvm_x86_mce mce
= {
1693 ram_addr_t ram_addr
;
1694 target_phys_addr_t paddr
;
1697 if ((env
->mcg_cap
& MCG_SER_P
) && addr
1698 && (code
== BUS_MCEERR_AR
1699 || code
== BUS_MCEERR_AO
)) {
1700 if (code
== BUS_MCEERR_AR
) {
1701 /* Fake an Intel architectural Data Load SRAR UCR */
1702 mce
.status
= MCI_STATUS_VAL
| MCI_STATUS_UC
| MCI_STATUS_EN
1703 | MCI_STATUS_MISCV
| MCI_STATUS_ADDRV
| MCI_STATUS_S
1704 | MCI_STATUS_AR
| 0x134;
1705 mce
.misc
= (MCM_ADDR_PHYS
<< 6) | 0xc;
1706 mce
.mcg_status
= MCG_STATUS_MCIP
| MCG_STATUS_EIPV
;
1709 * If there is an MCE excpetion being processed, ignore
1712 r
= kvm_mce_in_exception(env
);
1714 fprintf(stderr
, "Failed to get MCE status\n");
1718 /* Fake an Intel architectural Memory scrubbing UCR */
1719 mce
.status
= MCI_STATUS_VAL
| MCI_STATUS_UC
| MCI_STATUS_EN
1720 | MCI_STATUS_MISCV
| MCI_STATUS_ADDRV
| MCI_STATUS_S
1722 mce
.misc
= (MCM_ADDR_PHYS
<< 6) | 0xc;
1723 mce
.mcg_status
= MCG_STATUS_MCIP
| MCG_STATUS_RIPV
;
1725 vaddr
= (void *)addr
;
1726 if (qemu_ram_addr_from_host(vaddr
, &ram_addr
) ||
1727 !kvm_physical_memory_addr_from_ram(env
->kvm_state
, ram_addr
, &paddr
)) {
1728 fprintf(stderr
, "Hardware memory error for memory used by "
1729 "QEMU itself instead of guest system!\n");
1730 /* Hope we are lucky for AO MCE */
1731 if (code
== BUS_MCEERR_AO
) {
1734 hardware_memory_error();
1738 r
= kvm_set_mce(env
, &mce
);
1740 fprintf(stderr
, "kvm_set_mce: %s\n", strerror(errno
));
1743 kvm_mce_broadcast_rest(env
);
1747 if (code
== BUS_MCEERR_AO
) {
1749 } else if (code
== BUS_MCEERR_AR
) {
1750 hardware_memory_error();
1758 int kvm_on_sigbus(int code
, void *addr
)
1760 #if defined(KVM_CAP_MCE)
1761 if ((first_cpu
->mcg_cap
& MCG_SER_P
) && addr
&& code
== BUS_MCEERR_AO
) {
1764 ram_addr_t ram_addr
;
1765 target_phys_addr_t paddr
;
1767 /* Hope we are lucky for AO MCE */
1769 if (qemu_ram_addr_from_host(vaddr
, &ram_addr
) ||
1770 !kvm_physical_memory_addr_from_ram(first_cpu
->kvm_state
, ram_addr
, &paddr
)) {
1771 fprintf(stderr
, "Hardware memory error for memory used by "
1772 "QEMU itself instead of guest system!: %p\n", addr
);
1775 status
= MCI_STATUS_VAL
| MCI_STATUS_UC
| MCI_STATUS_EN
1776 | MCI_STATUS_MISCV
| MCI_STATUS_ADDRV
| MCI_STATUS_S
1778 kvm_inject_x86_mce(first_cpu
, 9, status
,
1779 MCG_STATUS_MCIP
| MCG_STATUS_RIPV
, paddr
,
1780 (MCM_ADDR_PHYS
<< 6) | 0xc, 1);
1781 kvm_mce_broadcast_rest(first_cpu
);
1785 if (code
== BUS_MCEERR_AO
) {
1787 } else if (code
== BUS_MCEERR_AR
) {
1788 hardware_memory_error();