4 * Copyright (C) 2006-2008 Qumranet Technologies
5 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
15 #include <sys/types.h>
16 #include <sys/ioctl.h>
19 #include <linux/kvm.h>
21 #include "qemu-common.h"
30 #define dprintf(fmt, ...) \
31 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
33 #define dprintf(fmt, ...) \
37 #ifdef KVM_CAP_EXT_CPUID
39 static struct kvm_cpuid2
*try_get_cpuid(KVMState
*s
, int max
)
41 struct kvm_cpuid2
*cpuid
;
44 size
= sizeof(*cpuid
) + max
* sizeof(*cpuid
->entries
);
45 cpuid
= (struct kvm_cpuid2
*)qemu_mallocz(size
);
47 r
= kvm_ioctl(s
, KVM_GET_SUPPORTED_CPUID
, cpuid
);
48 if (r
== 0 && cpuid
->nent
>= max
) {
56 fprintf(stderr
, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
64 uint32_t kvm_arch_get_supported_cpuid(CPUState
*env
, uint32_t function
, int reg
)
66 struct kvm_cpuid2
*cpuid
;
71 if (!kvm_check_extension(env
->kvm_state
, KVM_CAP_EXT_CPUID
)) {
76 while ((cpuid
= try_get_cpuid(env
->kvm_state
, max
)) == NULL
) {
80 for (i
= 0; i
< cpuid
->nent
; ++i
) {
81 if (cpuid
->entries
[i
].function
== function
) {
84 ret
= cpuid
->entries
[i
].eax
;
87 ret
= cpuid
->entries
[i
].ebx
;
90 ret
= cpuid
->entries
[i
].ecx
;
93 ret
= cpuid
->entries
[i
].edx
;
94 if (function
== 0x80000001) {
95 /* On Intel, kvm returns cpuid according to the Intel spec,
96 * so add missing bits according to the AMD spec:
98 cpuid_1_edx
= kvm_arch_get_supported_cpuid(env
, 1, R_EDX
);
99 ret
|= cpuid_1_edx
& 0xdfeff7ff;
113 uint32_t kvm_arch_get_supported_cpuid(CPUState
*env
, uint32_t function
, int reg
)
120 int kvm_arch_init_vcpu(CPUState
*env
)
123 struct kvm_cpuid2 cpuid
;
124 struct kvm_cpuid_entry2 entries
[100];
125 } __attribute__((packed
)) cpuid_data
;
126 uint32_t limit
, i
, j
, cpuid_i
;
131 cpu_x86_cpuid(env
, 0, 0, &limit
, &unused
, &unused
, &unused
);
133 for (i
= 0; i
<= limit
; i
++) {
134 struct kvm_cpuid_entry2
*c
= &cpuid_data
.entries
[cpuid_i
++];
138 /* Keep reading function 2 till all the input is received */
142 c
->flags
= KVM_CPUID_FLAG_STATEFUL_FUNC
|
143 KVM_CPUID_FLAG_STATE_READ_NEXT
;
144 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
145 times
= c
->eax
& 0xff;
147 for (j
= 1; j
< times
; ++j
) {
148 c
= &cpuid_data
.entries
[cpuid_i
++];
150 c
->flags
= KVM_CPUID_FLAG_STATEFUL_FUNC
;
151 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
160 c
->flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
162 cpu_x86_cpuid(env
, i
, j
, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
164 if (i
== 4 && c
->eax
== 0)
166 if (i
== 0xb && !(c
->ecx
& 0xff00))
168 if (i
== 0xd && c
->eax
== 0)
171 c
= &cpuid_data
.entries
[cpuid_i
++];
177 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
181 cpu_x86_cpuid(env
, 0x80000000, 0, &limit
, &unused
, &unused
, &unused
);
183 for (i
= 0x80000000; i
<= limit
; i
++) {
184 struct kvm_cpuid_entry2
*c
= &cpuid_data
.entries
[cpuid_i
++];
188 cpu_x86_cpuid(env
, i
, 0, &c
->eax
, &c
->ebx
, &c
->ecx
, &c
->edx
);
191 cpuid_data
.cpuid
.nent
= cpuid_i
;
193 return kvm_vcpu_ioctl(env
, KVM_SET_CPUID2
, &cpuid_data
);
196 static int kvm_has_msr_star(CPUState
*env
)
198 static int has_msr_star
;
202 if (has_msr_star
== 0) {
203 struct kvm_msr_list msr_list
, *kvm_msr_list
;
207 /* Obtain MSR list from KVM. These are the MSRs that we must
210 ret
= kvm_ioctl(env
->kvm_state
, KVM_GET_MSR_INDEX_LIST
, &msr_list
);
214 kvm_msr_list
= qemu_mallocz(sizeof(msr_list
) +
215 msr_list
.nmsrs
* sizeof(msr_list
.indices
[0]));
217 kvm_msr_list
->nmsrs
= msr_list
.nmsrs
;
218 ret
= kvm_ioctl(env
->kvm_state
, KVM_GET_MSR_INDEX_LIST
, kvm_msr_list
);
222 for (i
= 0; i
< kvm_msr_list
->nmsrs
; i
++) {
223 if (kvm_msr_list
->indices
[i
] == MSR_STAR
) {
233 if (has_msr_star
== 1)
238 int kvm_arch_init(KVMState
*s
, int smp_cpus
)
242 /* create vm86 tss. KVM uses vm86 mode to emulate 16-bit code
243 * directly. In order to use vm86 mode, a TSS is needed. Since this
244 * must be part of guest physical memory, we need to allocate it. Older
245 * versions of KVM just assumed that it would be at the end of physical
246 * memory but that doesn't work with more than 4GB of memory. We simply
247 * refuse to work with those older versions of KVM. */
248 ret
= kvm_ioctl(s
, KVM_CHECK_EXTENSION
, KVM_CAP_SET_TSS_ADDR
);
250 fprintf(stderr
, "kvm does not support KVM_CAP_SET_TSS_ADDR\n");
254 /* this address is 3 pages before the bios, and the bios should present
255 * as unavaible memory. FIXME, need to ensure the e820 map deals with
258 return kvm_vm_ioctl(s
, KVM_SET_TSS_ADDR
, 0xfffbd000);
261 static void set_v8086_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
263 lhs
->selector
= rhs
->selector
;
264 lhs
->base
= rhs
->base
;
265 lhs
->limit
= rhs
->limit
;
277 static void set_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
279 unsigned flags
= rhs
->flags
;
280 lhs
->selector
= rhs
->selector
;
281 lhs
->base
= rhs
->base
;
282 lhs
->limit
= rhs
->limit
;
283 lhs
->type
= (flags
>> DESC_TYPE_SHIFT
) & 15;
284 lhs
->present
= (flags
& DESC_P_MASK
) != 0;
285 lhs
->dpl
= rhs
->selector
& 3;
286 lhs
->db
= (flags
>> DESC_B_SHIFT
) & 1;
287 lhs
->s
= (flags
& DESC_S_MASK
) != 0;
288 lhs
->l
= (flags
>> DESC_L_SHIFT
) & 1;
289 lhs
->g
= (flags
& DESC_G_MASK
) != 0;
290 lhs
->avl
= (flags
& DESC_AVL_MASK
) != 0;
294 static void get_seg(SegmentCache
*lhs
, const struct kvm_segment
*rhs
)
296 lhs
->selector
= rhs
->selector
;
297 lhs
->base
= rhs
->base
;
298 lhs
->limit
= rhs
->limit
;
300 (rhs
->type
<< DESC_TYPE_SHIFT
)
301 | (rhs
->present
* DESC_P_MASK
)
302 | (rhs
->dpl
<< DESC_DPL_SHIFT
)
303 | (rhs
->db
<< DESC_B_SHIFT
)
304 | (rhs
->s
* DESC_S_MASK
)
305 | (rhs
->l
<< DESC_L_SHIFT
)
306 | (rhs
->g
* DESC_G_MASK
)
307 | (rhs
->avl
* DESC_AVL_MASK
);
310 static void kvm_getput_reg(__u64
*kvm_reg
, target_ulong
*qemu_reg
, int set
)
313 *kvm_reg
= *qemu_reg
;
315 *qemu_reg
= *kvm_reg
;
318 static int kvm_getput_regs(CPUState
*env
, int set
)
320 struct kvm_regs regs
;
324 ret
= kvm_vcpu_ioctl(env
, KVM_GET_REGS
, ®s
);
329 kvm_getput_reg(®s
.rax
, &env
->regs
[R_EAX
], set
);
330 kvm_getput_reg(®s
.rbx
, &env
->regs
[R_EBX
], set
);
331 kvm_getput_reg(®s
.rcx
, &env
->regs
[R_ECX
], set
);
332 kvm_getput_reg(®s
.rdx
, &env
->regs
[R_EDX
], set
);
333 kvm_getput_reg(®s
.rsi
, &env
->regs
[R_ESI
], set
);
334 kvm_getput_reg(®s
.rdi
, &env
->regs
[R_EDI
], set
);
335 kvm_getput_reg(®s
.rsp
, &env
->regs
[R_ESP
], set
);
336 kvm_getput_reg(®s
.rbp
, &env
->regs
[R_EBP
], set
);
338 kvm_getput_reg(®s
.r8
, &env
->regs
[8], set
);
339 kvm_getput_reg(®s
.r9
, &env
->regs
[9], set
);
340 kvm_getput_reg(®s
.r10
, &env
->regs
[10], set
);
341 kvm_getput_reg(®s
.r11
, &env
->regs
[11], set
);
342 kvm_getput_reg(®s
.r12
, &env
->regs
[12], set
);
343 kvm_getput_reg(®s
.r13
, &env
->regs
[13], set
);
344 kvm_getput_reg(®s
.r14
, &env
->regs
[14], set
);
345 kvm_getput_reg(®s
.r15
, &env
->regs
[15], set
);
348 kvm_getput_reg(®s
.rflags
, &env
->eflags
, set
);
349 kvm_getput_reg(®s
.rip
, &env
->eip
, set
);
352 ret
= kvm_vcpu_ioctl(env
, KVM_SET_REGS
, ®s
);
357 static int kvm_put_fpu(CPUState
*env
)
362 memset(&fpu
, 0, sizeof fpu
);
363 fpu
.fsw
= env
->fpus
& ~(7 << 11);
364 fpu
.fsw
|= (env
->fpstt
& 7) << 11;
366 for (i
= 0; i
< 8; ++i
)
367 fpu
.ftwx
|= (!env
->fptags
[i
]) << i
;
368 memcpy(fpu
.fpr
, env
->fpregs
, sizeof env
->fpregs
);
369 memcpy(fpu
.xmm
, env
->xmm_regs
, sizeof env
->xmm_regs
);
370 fpu
.mxcsr
= env
->mxcsr
;
372 return kvm_vcpu_ioctl(env
, KVM_SET_FPU
, &fpu
);
375 static int kvm_put_sregs(CPUState
*env
)
377 struct kvm_sregs sregs
;
379 memcpy(sregs
.interrupt_bitmap
,
380 env
->interrupt_bitmap
,
381 sizeof(sregs
.interrupt_bitmap
));
383 if ((env
->eflags
& VM_MASK
)) {
384 set_v8086_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
385 set_v8086_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
386 set_v8086_seg(&sregs
.es
, &env
->segs
[R_ES
]);
387 set_v8086_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
388 set_v8086_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
389 set_v8086_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
391 set_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
392 set_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
393 set_seg(&sregs
.es
, &env
->segs
[R_ES
]);
394 set_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
395 set_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
396 set_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
398 if (env
->cr
[0] & CR0_PE_MASK
) {
399 /* force ss cpl to cs cpl */
400 sregs
.ss
.selector
= (sregs
.ss
.selector
& ~3) |
401 (sregs
.cs
.selector
& 3);
402 sregs
.ss
.dpl
= sregs
.ss
.selector
& 3;
406 set_seg(&sregs
.tr
, &env
->tr
);
407 set_seg(&sregs
.ldt
, &env
->ldt
);
409 sregs
.idt
.limit
= env
->idt
.limit
;
410 sregs
.idt
.base
= env
->idt
.base
;
411 sregs
.gdt
.limit
= env
->gdt
.limit
;
412 sregs
.gdt
.base
= env
->gdt
.base
;
414 sregs
.cr0
= env
->cr
[0];
415 sregs
.cr2
= env
->cr
[2];
416 sregs
.cr3
= env
->cr
[3];
417 sregs
.cr4
= env
->cr
[4];
419 sregs
.cr8
= cpu_get_apic_tpr(env
);
420 sregs
.apic_base
= cpu_get_apic_base(env
);
422 sregs
.efer
= env
->efer
;
424 return kvm_vcpu_ioctl(env
, KVM_SET_SREGS
, &sregs
);
427 static void kvm_msr_entry_set(struct kvm_msr_entry
*entry
,
428 uint32_t index
, uint64_t value
)
430 entry
->index
= index
;
434 static int kvm_put_msrs(CPUState
*env
)
437 struct kvm_msrs info
;
438 struct kvm_msr_entry entries
[100];
440 struct kvm_msr_entry
*msrs
= msr_data
.entries
;
443 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_CS
, env
->sysenter_cs
);
444 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_ESP
, env
->sysenter_esp
);
445 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_SYSENTER_EIP
, env
->sysenter_eip
);
446 if (kvm_has_msr_star(env
))
447 kvm_msr_entry_set(&msrs
[n
++], MSR_STAR
, env
->star
);
448 kvm_msr_entry_set(&msrs
[n
++], MSR_IA32_TSC
, env
->tsc
);
450 /* FIXME if lm capable */
451 kvm_msr_entry_set(&msrs
[n
++], MSR_CSTAR
, env
->cstar
);
452 kvm_msr_entry_set(&msrs
[n
++], MSR_KERNELGSBASE
, env
->kernelgsbase
);
453 kvm_msr_entry_set(&msrs
[n
++], MSR_FMASK
, env
->fmask
);
454 kvm_msr_entry_set(&msrs
[n
++], MSR_LSTAR
, env
->lstar
);
456 msr_data
.info
.nmsrs
= n
;
458 return kvm_vcpu_ioctl(env
, KVM_SET_MSRS
, &msr_data
);
463 static int kvm_get_fpu(CPUState
*env
)
468 ret
= kvm_vcpu_ioctl(env
, KVM_GET_FPU
, &fpu
);
472 env
->fpstt
= (fpu
.fsw
>> 11) & 7;
475 for (i
= 0; i
< 8; ++i
)
476 env
->fptags
[i
] = !((fpu
.ftwx
>> i
) & 1);
477 memcpy(env
->fpregs
, fpu
.fpr
, sizeof env
->fpregs
);
478 memcpy(env
->xmm_regs
, fpu
.xmm
, sizeof env
->xmm_regs
);
479 env
->mxcsr
= fpu
.mxcsr
;
484 static int kvm_get_sregs(CPUState
*env
)
486 struct kvm_sregs sregs
;
490 ret
= kvm_vcpu_ioctl(env
, KVM_GET_SREGS
, &sregs
);
494 memcpy(env
->interrupt_bitmap
,
495 sregs
.interrupt_bitmap
,
496 sizeof(sregs
.interrupt_bitmap
));
498 get_seg(&env
->segs
[R_CS
], &sregs
.cs
);
499 get_seg(&env
->segs
[R_DS
], &sregs
.ds
);
500 get_seg(&env
->segs
[R_ES
], &sregs
.es
);
501 get_seg(&env
->segs
[R_FS
], &sregs
.fs
);
502 get_seg(&env
->segs
[R_GS
], &sregs
.gs
);
503 get_seg(&env
->segs
[R_SS
], &sregs
.ss
);
505 get_seg(&env
->tr
, &sregs
.tr
);
506 get_seg(&env
->ldt
, &sregs
.ldt
);
508 env
->idt
.limit
= sregs
.idt
.limit
;
509 env
->idt
.base
= sregs
.idt
.base
;
510 env
->gdt
.limit
= sregs
.gdt
.limit
;
511 env
->gdt
.base
= sregs
.gdt
.base
;
513 env
->cr
[0] = sregs
.cr0
;
514 env
->cr
[2] = sregs
.cr2
;
515 env
->cr
[3] = sregs
.cr3
;
516 env
->cr
[4] = sregs
.cr4
;
518 cpu_set_apic_base(env
, sregs
.apic_base
);
520 env
->efer
= sregs
.efer
;
521 //cpu_set_apic_tpr(env, sregs.cr8);
523 #define HFLAG_COPY_MASK ~( \
524 HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
525 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
526 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
527 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
531 hflags
= (env
->segs
[R_CS
].flags
>> DESC_DPL_SHIFT
) & HF_CPL_MASK
;
532 hflags
|= (env
->cr
[0] & CR0_PE_MASK
) << (HF_PE_SHIFT
- CR0_PE_SHIFT
);
533 hflags
|= (env
->cr
[0] << (HF_MP_SHIFT
- CR0_MP_SHIFT
)) &
534 (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
);
535 hflags
|= (env
->eflags
& (HF_TF_MASK
| HF_VM_MASK
| HF_IOPL_MASK
));
536 hflags
|= (env
->cr
[4] & CR4_OSFXSR_MASK
) <<
537 (HF_OSFXSR_SHIFT
- CR4_OSFXSR_SHIFT
);
539 if (env
->efer
& MSR_EFER_LMA
) {
540 hflags
|= HF_LMA_MASK
;
543 if ((hflags
& HF_LMA_MASK
) && (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
544 hflags
|= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
546 hflags
|= (env
->segs
[R_CS
].flags
& DESC_B_MASK
) >>
547 (DESC_B_SHIFT
- HF_CS32_SHIFT
);
548 hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
) >>
549 (DESC_B_SHIFT
- HF_SS32_SHIFT
);
550 if (!(env
->cr
[0] & CR0_PE_MASK
) ||
551 (env
->eflags
& VM_MASK
) ||
552 !(hflags
& HF_CS32_MASK
)) {
553 hflags
|= HF_ADDSEG_MASK
;
555 hflags
|= ((env
->segs
[R_DS
].base
|
556 env
->segs
[R_ES
].base
|
557 env
->segs
[R_SS
].base
) != 0) <<
561 env
->hflags
= (env
->hflags
& HFLAG_COPY_MASK
) | hflags
;
566 static int kvm_get_msrs(CPUState
*env
)
569 struct kvm_msrs info
;
570 struct kvm_msr_entry entries
[100];
572 struct kvm_msr_entry
*msrs
= msr_data
.entries
;
576 msrs
[n
++].index
= MSR_IA32_SYSENTER_CS
;
577 msrs
[n
++].index
= MSR_IA32_SYSENTER_ESP
;
578 msrs
[n
++].index
= MSR_IA32_SYSENTER_EIP
;
579 if (kvm_has_msr_star(env
))
580 msrs
[n
++].index
= MSR_STAR
;
581 msrs
[n
++].index
= MSR_IA32_TSC
;
583 /* FIXME lm_capable_kernel */
584 msrs
[n
++].index
= MSR_CSTAR
;
585 msrs
[n
++].index
= MSR_KERNELGSBASE
;
586 msrs
[n
++].index
= MSR_FMASK
;
587 msrs
[n
++].index
= MSR_LSTAR
;
589 msr_data
.info
.nmsrs
= n
;
590 ret
= kvm_vcpu_ioctl(env
, KVM_GET_MSRS
, &msr_data
);
594 for (i
= 0; i
< ret
; i
++) {
595 switch (msrs
[i
].index
) {
596 case MSR_IA32_SYSENTER_CS
:
597 env
->sysenter_cs
= msrs
[i
].data
;
599 case MSR_IA32_SYSENTER_ESP
:
600 env
->sysenter_esp
= msrs
[i
].data
;
602 case MSR_IA32_SYSENTER_EIP
:
603 env
->sysenter_eip
= msrs
[i
].data
;
606 env
->star
= msrs
[i
].data
;
610 env
->cstar
= msrs
[i
].data
;
612 case MSR_KERNELGSBASE
:
613 env
->kernelgsbase
= msrs
[i
].data
;
616 env
->fmask
= msrs
[i
].data
;
619 env
->lstar
= msrs
[i
].data
;
623 env
->tsc
= msrs
[i
].data
;
631 int kvm_arch_put_registers(CPUState
*env
)
635 ret
= kvm_getput_regs(env
, 1);
639 ret
= kvm_put_fpu(env
);
643 ret
= kvm_put_sregs(env
);
647 ret
= kvm_put_msrs(env
);
654 int kvm_arch_get_registers(CPUState
*env
)
658 ret
= kvm_getput_regs(env
, 0);
662 ret
= kvm_get_fpu(env
);
666 ret
= kvm_get_sregs(env
);
670 ret
= kvm_get_msrs(env
);
677 int kvm_arch_pre_run(CPUState
*env
, struct kvm_run
*run
)
679 /* Try to inject an interrupt if the guest can accept it */
680 if (run
->ready_for_interrupt_injection
&&
681 (env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
682 (env
->eflags
& IF_MASK
)) {
685 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
686 irq
= cpu_get_pic_interrupt(env
);
688 struct kvm_interrupt intr
;
691 dprintf("injected interrupt %d\n", irq
);
692 kvm_vcpu_ioctl(env
, KVM_INTERRUPT
, &intr
);
696 /* If we have an interrupt but the guest is not ready to receive an
697 * interrupt, request an interrupt window exit. This will
698 * cause a return to userspace as soon as the guest is ready to
699 * receive interrupts. */
700 if ((env
->interrupt_request
& CPU_INTERRUPT_HARD
))
701 run
->request_interrupt_window
= 1;
703 run
->request_interrupt_window
= 0;
705 dprintf("setting tpr\n");
706 run
->cr8
= cpu_get_apic_tpr(env
);
711 int kvm_arch_post_run(CPUState
*env
, struct kvm_run
*run
)
714 env
->eflags
|= IF_MASK
;
716 env
->eflags
&= ~IF_MASK
;
718 cpu_set_apic_tpr(env
, run
->cr8
);
719 cpu_set_apic_base(env
, run
->apic_base
);
724 static int kvm_handle_halt(CPUState
*env
)
726 if (!((env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
727 (env
->eflags
& IF_MASK
)) &&
728 !(env
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
730 env
->exception_index
= EXCP_HLT
;
737 int kvm_arch_handle_exit(CPUState
*env
, struct kvm_run
*run
)
741 switch (run
->exit_reason
) {
743 dprintf("handle_hlt\n");
744 ret
= kvm_handle_halt(env
);
751 #ifdef KVM_CAP_SET_GUEST_DEBUG
752 int kvm_arch_insert_sw_breakpoint(CPUState
*env
, struct kvm_sw_breakpoint
*bp
)
754 const static uint8_t int3
= 0xcc;
756 if (cpu_memory_rw_debug(env
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 0) ||
757 cpu_memory_rw_debug(env
, bp
->pc
, (uint8_t *)&int3
, 1, 1))
762 int kvm_arch_remove_sw_breakpoint(CPUState
*env
, struct kvm_sw_breakpoint
*bp
)
766 if (cpu_memory_rw_debug(env
, bp
->pc
, &int3
, 1, 0) || int3
!= 0xcc ||
767 cpu_memory_rw_debug(env
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 1, 1))
778 static int nb_hw_breakpoint
;
780 static int find_hw_breakpoint(target_ulong addr
, int len
, int type
)
784 for (n
= 0; n
< nb_hw_breakpoint
; n
++)
785 if (hw_breakpoint
[n
].addr
== addr
&& hw_breakpoint
[n
].type
== type
&&
786 (hw_breakpoint
[n
].len
== len
|| len
== -1))
791 int kvm_arch_insert_hw_breakpoint(target_ulong addr
,
792 target_ulong len
, int type
)
795 case GDB_BREAKPOINT_HW
:
798 case GDB_WATCHPOINT_WRITE
:
799 case GDB_WATCHPOINT_ACCESS
:
806 if (addr
& (len
- 1))
817 if (nb_hw_breakpoint
== 4)
820 if (find_hw_breakpoint(addr
, len
, type
) >= 0)
823 hw_breakpoint
[nb_hw_breakpoint
].addr
= addr
;
824 hw_breakpoint
[nb_hw_breakpoint
].len
= len
;
825 hw_breakpoint
[nb_hw_breakpoint
].type
= type
;
831 int kvm_arch_remove_hw_breakpoint(target_ulong addr
,
832 target_ulong len
, int type
)
836 n
= find_hw_breakpoint(addr
, (type
== GDB_BREAKPOINT_HW
) ? 1 : len
, type
);
841 hw_breakpoint
[n
] = hw_breakpoint
[nb_hw_breakpoint
];
846 void kvm_arch_remove_all_hw_breakpoints(void)
848 nb_hw_breakpoint
= 0;
851 static CPUWatchpoint hw_watchpoint
;
853 int kvm_arch_debug(struct kvm_debug_exit_arch
*arch_info
)
858 if (arch_info
->exception
== 1) {
859 if (arch_info
->dr6
& (1 << 14)) {
860 if (cpu_single_env
->singlestep_enabled
)
863 for (n
= 0; n
< 4; n
++)
864 if (arch_info
->dr6
& (1 << n
))
865 switch ((arch_info
->dr7
>> (16 + n
*4)) & 0x3) {
871 cpu_single_env
->watchpoint_hit
= &hw_watchpoint
;
872 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
873 hw_watchpoint
.flags
= BP_MEM_WRITE
;
877 cpu_single_env
->watchpoint_hit
= &hw_watchpoint
;
878 hw_watchpoint
.vaddr
= hw_breakpoint
[n
].addr
;
879 hw_watchpoint
.flags
= BP_MEM_ACCESS
;
883 } else if (kvm_find_sw_breakpoint(cpu_single_env
, arch_info
->pc
))
887 kvm_update_guest_debug(cpu_single_env
,
888 (arch_info
->exception
== 1) ?
889 KVM_GUESTDBG_INJECT_DB
: KVM_GUESTDBG_INJECT_BP
);
894 void kvm_arch_update_guest_debug(CPUState
*env
, struct kvm_guest_debug
*dbg
)
896 const uint8_t type_code
[] = {
897 [GDB_BREAKPOINT_HW
] = 0x0,
898 [GDB_WATCHPOINT_WRITE
] = 0x1,
899 [GDB_WATCHPOINT_ACCESS
] = 0x3
901 const uint8_t len_code
[] = {
902 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
906 if (kvm_sw_breakpoints_active(env
))
907 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_SW_BP
;
909 if (nb_hw_breakpoint
> 0) {
910 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_HW_BP
;
911 dbg
->arch
.debugreg
[7] = 0x0600;
912 for (n
= 0; n
< nb_hw_breakpoint
; n
++) {
913 dbg
->arch
.debugreg
[n
] = hw_breakpoint
[n
].addr
;
914 dbg
->arch
.debugreg
[7] |= (2 << (n
* 2)) |
915 (type_code
[hw_breakpoint
[n
].type
] << (16 + n
*4)) |
916 (len_code
[hw_breakpoint
[n
].len
] << (18 + n
*4));
920 #endif /* KVM_CAP_SET_GUEST_DEBUG */