Do not use env->halted to decide where halted state should be handled
[qemu-kvm/fedora.git] / qemu-kvm-x86.c
blob729d600078e6efa3f76b3ba7a4e8657e75cf8138
1 /*
2 * qemu/kvm integration, x86 specific code
4 * Copyright (C) 2006-2008 Qumranet Technologies
6 * Licensed under the terms of the GNU GPL version 2 or higher.
7 */
9 #include "config.h"
10 #include "config-host.h"
12 #include <string.h>
13 #include "hw/hw.h"
14 #include "gdbstub.h"
15 #include <sys/io.h>
17 #include "qemu-kvm.h"
18 #include "libkvm-all.h"
19 #include <pthread.h>
20 #include <sys/utsname.h>
21 #include <linux/kvm_para.h>
23 #include "kvm.h"
25 #define MSR_IA32_TSC 0x10
27 static struct kvm_msr_list *kvm_msr_list;
28 extern unsigned int kvm_shadow_memory;
29 static int kvm_has_msr_star;
30 static int kvm_has_vm_hsave_pa;
32 static int lm_capable_kernel;
34 int kvm_qemu_create_memory_alias(uint64_t phys_start,
35 uint64_t len,
36 uint64_t target_phys)
38 return kvm_create_memory_alias(kvm_context, phys_start, len, target_phys);
41 int kvm_qemu_destroy_memory_alias(uint64_t phys_start)
43 return kvm_destroy_memory_alias(kvm_context, phys_start);
46 int kvm_arch_qemu_create_context(void)
48 int i;
49 struct utsname utsname;
51 uname(&utsname);
52 lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
54 if (kvm_shadow_memory)
55 kvm_set_shadow_pages(kvm_context, kvm_shadow_memory);
57 kvm_msr_list = kvm_get_msr_list(kvm_context);
58 if (!kvm_msr_list)
59 return -1;
60 for (i = 0; i < kvm_msr_list->nmsrs; ++i) {
61 if (kvm_msr_list->indices[i] == MSR_STAR)
62 kvm_has_msr_star = 1;
63 if (kvm_msr_list->indices[i] == MSR_VM_HSAVE_PA)
64 kvm_has_vm_hsave_pa = 1;
67 return 0;
70 static void set_msr_entry(struct kvm_msr_entry *entry, uint32_t index,
71 uint64_t data)
73 entry->index = index;
74 entry->data = data;
77 /* returns 0 on success, non-0 on failure */
78 static int get_msr_entry(struct kvm_msr_entry *entry, CPUState *env)
80 switch (entry->index) {
81 case MSR_IA32_SYSENTER_CS:
82 env->sysenter_cs = entry->data;
83 break;
84 case MSR_IA32_SYSENTER_ESP:
85 env->sysenter_esp = entry->data;
86 break;
87 case MSR_IA32_SYSENTER_EIP:
88 env->sysenter_eip = entry->data;
89 break;
90 case MSR_STAR:
91 env->star = entry->data;
92 break;
93 #ifdef TARGET_X86_64
94 case MSR_CSTAR:
95 env->cstar = entry->data;
96 break;
97 case MSR_KERNELGSBASE:
98 env->kernelgsbase = entry->data;
99 break;
100 case MSR_FMASK:
101 env->fmask = entry->data;
102 break;
103 case MSR_LSTAR:
104 env->lstar = entry->data;
105 break;
106 #endif
107 case MSR_IA32_TSC:
108 env->tsc = entry->data;
109 break;
110 case MSR_VM_HSAVE_PA:
111 env->vm_hsave = entry->data;
112 break;
113 default:
114 printf("Warning unknown msr index 0x%x\n", entry->index);
115 return 1;
117 return 0;
120 #ifdef TARGET_X86_64
121 #define MSR_COUNT 9
122 #else
123 #define MSR_COUNT 5
124 #endif
126 static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
128 lhs->selector = rhs->selector;
129 lhs->base = rhs->base;
130 lhs->limit = rhs->limit;
131 lhs->type = 3;
132 lhs->present = 1;
133 lhs->dpl = 3;
134 lhs->db = 0;
135 lhs->s = 1;
136 lhs->l = 0;
137 lhs->g = 0;
138 lhs->avl = 0;
139 lhs->unusable = 0;
142 static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
144 unsigned flags = rhs->flags;
145 lhs->selector = rhs->selector;
146 lhs->base = rhs->base;
147 lhs->limit = rhs->limit;
148 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
149 lhs->present = (flags & DESC_P_MASK) != 0;
150 lhs->dpl = rhs->selector & 3;
151 lhs->db = (flags >> DESC_B_SHIFT) & 1;
152 lhs->s = (flags & DESC_S_MASK) != 0;
153 lhs->l = (flags >> DESC_L_SHIFT) & 1;
154 lhs->g = (flags & DESC_G_MASK) != 0;
155 lhs->avl = (flags & DESC_AVL_MASK) != 0;
156 lhs->unusable = 0;
159 static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
161 lhs->selector = rhs->selector;
162 lhs->base = rhs->base;
163 lhs->limit = rhs->limit;
164 lhs->flags =
165 (rhs->type << DESC_TYPE_SHIFT)
166 | (rhs->present * DESC_P_MASK)
167 | (rhs->dpl << DESC_DPL_SHIFT)
168 | (rhs->db << DESC_B_SHIFT)
169 | (rhs->s * DESC_S_MASK)
170 | (rhs->l << DESC_L_SHIFT)
171 | (rhs->g * DESC_G_MASK)
172 | (rhs->avl * DESC_AVL_MASK);
175 void kvm_arch_load_regs(CPUState *env)
177 struct kvm_regs regs;
178 struct kvm_fpu fpu;
179 struct kvm_sregs sregs;
180 struct kvm_msr_entry msrs[MSR_COUNT];
181 int rc, n, i;
183 regs.rax = env->regs[R_EAX];
184 regs.rbx = env->regs[R_EBX];
185 regs.rcx = env->regs[R_ECX];
186 regs.rdx = env->regs[R_EDX];
187 regs.rsi = env->regs[R_ESI];
188 regs.rdi = env->regs[R_EDI];
189 regs.rsp = env->regs[R_ESP];
190 regs.rbp = env->regs[R_EBP];
191 #ifdef TARGET_X86_64
192 regs.r8 = env->regs[8];
193 regs.r9 = env->regs[9];
194 regs.r10 = env->regs[10];
195 regs.r11 = env->regs[11];
196 regs.r12 = env->regs[12];
197 regs.r13 = env->regs[13];
198 regs.r14 = env->regs[14];
199 regs.r15 = env->regs[15];
200 #endif
202 regs.rflags = env->eflags;
203 regs.rip = env->eip;
205 kvm_set_regs(env->kvm_cpu_state.vcpu_ctx, &regs);
207 memset(&fpu, 0, sizeof fpu);
208 fpu.fsw = env->fpus & ~(7 << 11);
209 fpu.fsw |= (env->fpstt & 7) << 11;
210 fpu.fcw = env->fpuc;
211 for (i = 0; i < 8; ++i)
212 fpu.ftwx |= (!env->fptags[i]) << i;
213 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
214 memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
215 fpu.mxcsr = env->mxcsr;
216 kvm_set_fpu(env->kvm_cpu_state.vcpu_ctx, &fpu);
218 memcpy(sregs.interrupt_bitmap, env->interrupt_bitmap, sizeof(sregs.interrupt_bitmap));
220 if ((env->eflags & VM_MASK)) {
221 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
222 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
223 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
224 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
225 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
226 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
227 } else {
228 set_seg(&sregs.cs, &env->segs[R_CS]);
229 set_seg(&sregs.ds, &env->segs[R_DS]);
230 set_seg(&sregs.es, &env->segs[R_ES]);
231 set_seg(&sregs.fs, &env->segs[R_FS]);
232 set_seg(&sregs.gs, &env->segs[R_GS]);
233 set_seg(&sregs.ss, &env->segs[R_SS]);
235 if (env->cr[0] & CR0_PE_MASK) {
236 /* force ss cpl to cs cpl */
237 sregs.ss.selector = (sregs.ss.selector & ~3) |
238 (sregs.cs.selector & 3);
239 sregs.ss.dpl = sregs.ss.selector & 3;
243 set_seg(&sregs.tr, &env->tr);
244 set_seg(&sregs.ldt, &env->ldt);
246 sregs.idt.limit = env->idt.limit;
247 sregs.idt.base = env->idt.base;
248 sregs.gdt.limit = env->gdt.limit;
249 sregs.gdt.base = env->gdt.base;
251 sregs.cr0 = env->cr[0];
252 sregs.cr2 = env->cr[2];
253 sregs.cr3 = env->cr[3];
254 sregs.cr4 = env->cr[4];
256 sregs.cr8 = cpu_get_apic_tpr(env);
257 sregs.apic_base = cpu_get_apic_base(env);
259 sregs.efer = env->efer;
261 kvm_set_sregs(env->kvm_cpu_state.vcpu_ctx, &sregs);
263 /* msrs */
264 n = 0;
265 set_msr_entry(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
266 set_msr_entry(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
267 set_msr_entry(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
268 if (kvm_has_msr_star)
269 set_msr_entry(&msrs[n++], MSR_STAR, env->star);
270 if (kvm_has_vm_hsave_pa)
271 set_msr_entry(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave);
272 #ifdef TARGET_X86_64
273 if (lm_capable_kernel) {
274 set_msr_entry(&msrs[n++], MSR_CSTAR, env->cstar);
275 set_msr_entry(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
276 set_msr_entry(&msrs[n++], MSR_FMASK, env->fmask);
277 set_msr_entry(&msrs[n++], MSR_LSTAR , env->lstar);
279 #endif
281 rc = kvm_set_msrs(env->kvm_cpu_state.vcpu_ctx, msrs, n);
282 if (rc == -1)
283 perror("kvm_set_msrs FAILED");
286 void kvm_load_tsc(CPUState *env)
288 int rc;
289 struct kvm_msr_entry msr;
291 set_msr_entry(&msr, MSR_IA32_TSC, env->tsc);
293 rc = kvm_set_msrs(env->kvm_cpu_state.vcpu_ctx, &msr, 1);
294 if (rc == -1)
295 perror("kvm_set_tsc FAILED.\n");
298 void kvm_save_mpstate(CPUState *env)
300 #ifdef KVM_CAP_MP_STATE
301 int r;
302 struct kvm_mp_state mp_state;
304 r = kvm_get_mpstate(env->kvm_cpu_state.vcpu_ctx, &mp_state);
305 if (r < 0)
306 env->mp_state = -1;
307 else
308 env->mp_state = mp_state.mp_state;
309 #endif
312 void kvm_load_mpstate(CPUState *env)
314 #ifdef KVM_CAP_MP_STATE
315 struct kvm_mp_state mp_state = { .mp_state = env->mp_state };
318 * -1 indicates that the host did not support GET_MP_STATE ioctl,
319 * so don't touch it.
321 if (env->mp_state != -1)
322 kvm_set_mpstate(env->kvm_cpu_state.vcpu_ctx, &mp_state);
323 #endif
326 void kvm_arch_save_regs(CPUState *env)
328 struct kvm_regs regs;
329 struct kvm_fpu fpu;
330 struct kvm_sregs sregs;
331 struct kvm_msr_entry msrs[MSR_COUNT];
332 uint32_t hflags;
333 uint32_t i, n, rc;
335 kvm_get_regs(env->kvm_cpu_state.vcpu_ctx, &regs);
337 env->regs[R_EAX] = regs.rax;
338 env->regs[R_EBX] = regs.rbx;
339 env->regs[R_ECX] = regs.rcx;
340 env->regs[R_EDX] = regs.rdx;
341 env->regs[R_ESI] = regs.rsi;
342 env->regs[R_EDI] = regs.rdi;
343 env->regs[R_ESP] = regs.rsp;
344 env->regs[R_EBP] = regs.rbp;
345 #ifdef TARGET_X86_64
346 env->regs[8] = regs.r8;
347 env->regs[9] = regs.r9;
348 env->regs[10] = regs.r10;
349 env->regs[11] = regs.r11;
350 env->regs[12] = regs.r12;
351 env->regs[13] = regs.r13;
352 env->regs[14] = regs.r14;
353 env->regs[15] = regs.r15;
354 #endif
356 env->eflags = regs.rflags;
357 env->eip = regs.rip;
359 kvm_get_fpu(env->kvm_cpu_state.vcpu_ctx, &fpu);
360 env->fpstt = (fpu.fsw >> 11) & 7;
361 env->fpus = fpu.fsw;
362 env->fpuc = fpu.fcw;
363 for (i = 0; i < 8; ++i)
364 env->fptags[i] = !((fpu.ftwx >> i) & 1);
365 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
366 memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
367 env->mxcsr = fpu.mxcsr;
369 kvm_get_sregs(env->kvm_cpu_state.vcpu_ctx, &sregs);
371 memcpy(env->interrupt_bitmap, sregs.interrupt_bitmap, sizeof(env->interrupt_bitmap));
373 get_seg(&env->segs[R_CS], &sregs.cs);
374 get_seg(&env->segs[R_DS], &sregs.ds);
375 get_seg(&env->segs[R_ES], &sregs.es);
376 get_seg(&env->segs[R_FS], &sregs.fs);
377 get_seg(&env->segs[R_GS], &sregs.gs);
378 get_seg(&env->segs[R_SS], &sregs.ss);
380 get_seg(&env->tr, &sregs.tr);
381 get_seg(&env->ldt, &sregs.ldt);
383 env->idt.limit = sregs.idt.limit;
384 env->idt.base = sregs.idt.base;
385 env->gdt.limit = sregs.gdt.limit;
386 env->gdt.base = sregs.gdt.base;
388 env->cr[0] = sregs.cr0;
389 env->cr[2] = sregs.cr2;
390 env->cr[3] = sregs.cr3;
391 env->cr[4] = sregs.cr4;
393 cpu_set_apic_base(env, sregs.apic_base);
395 env->efer = sregs.efer;
396 //cpu_set_apic_tpr(env, sregs.cr8);
398 #define HFLAG_COPY_MASK ~( \
399 HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
400 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
401 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
402 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
406 hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
407 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
408 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
409 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
410 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
411 hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
412 (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
414 if (env->efer & MSR_EFER_LMA) {
415 hflags |= HF_LMA_MASK;
418 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
419 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
420 } else {
421 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
422 (DESC_B_SHIFT - HF_CS32_SHIFT);
423 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
424 (DESC_B_SHIFT - HF_SS32_SHIFT);
425 if (!(env->cr[0] & CR0_PE_MASK) ||
426 (env->eflags & VM_MASK) ||
427 !(hflags & HF_CS32_MASK)) {
428 hflags |= HF_ADDSEG_MASK;
429 } else {
430 hflags |= ((env->segs[R_DS].base |
431 env->segs[R_ES].base |
432 env->segs[R_SS].base) != 0) <<
433 HF_ADDSEG_SHIFT;
436 env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
438 /* msrs */
439 n = 0;
440 msrs[n++].index = MSR_IA32_SYSENTER_CS;
441 msrs[n++].index = MSR_IA32_SYSENTER_ESP;
442 msrs[n++].index = MSR_IA32_SYSENTER_EIP;
443 if (kvm_has_msr_star)
444 msrs[n++].index = MSR_STAR;
445 msrs[n++].index = MSR_IA32_TSC;
446 if (kvm_has_vm_hsave_pa)
447 msrs[n++].index = MSR_VM_HSAVE_PA;
448 #ifdef TARGET_X86_64
449 if (lm_capable_kernel) {
450 msrs[n++].index = MSR_CSTAR;
451 msrs[n++].index = MSR_KERNELGSBASE;
452 msrs[n++].index = MSR_FMASK;
453 msrs[n++].index = MSR_LSTAR;
455 #endif
456 rc = kvm_get_msrs(env->kvm_cpu_state.vcpu_ctx, msrs, n);
457 if (rc == -1) {
458 perror("kvm_get_msrs FAILED");
460 else {
461 n = rc; /* actual number of MSRs */
462 for (i=0 ; i<n; i++) {
463 if (get_msr_entry(&msrs[i], env))
464 return;
469 static void do_cpuid_ent(struct kvm_cpuid_entry2 *e, uint32_t function,
470 uint32_t count, CPUState *env)
472 env->regs[R_EAX] = function;
473 env->regs[R_ECX] = count;
474 qemu_kvm_cpuid_on_env(env);
475 e->function = function;
476 e->flags = 0;
477 e->index = 0;
478 e->eax = env->regs[R_EAX];
479 e->ebx = env->regs[R_EBX];
480 e->ecx = env->regs[R_ECX];
481 e->edx = env->regs[R_EDX];
484 struct kvm_para_features {
485 int cap;
486 int feature;
487 } para_features[] = {
488 #ifdef KVM_CAP_CLOCKSOURCE
489 { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
490 #endif
491 #ifdef KVM_CAP_NOP_IO_DELAY
492 { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
493 #endif
494 #ifdef KVM_CAP_PV_MMU
495 { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
496 #endif
497 #ifdef KVM_CAP_CR3_CACHE
498 { KVM_CAP_CR3_CACHE, KVM_FEATURE_CR3_CACHE },
499 #endif
500 { -1, -1 }
503 static int get_para_features(kvm_context_t kvm_context)
505 int i, features = 0;
507 for (i = 0; i < ARRAY_SIZE(para_features)-1; i++) {
508 if (kvm_check_extension(kvm_context, para_features[i].cap))
509 features |= (1 << para_features[i].feature);
512 return features;
515 static void kvm_trim_features(uint32_t *features, uint32_t supported)
517 int i;
518 uint32_t mask;
520 for (i = 0; i < 32; ++i) {
521 mask = 1U << i;
522 if ((*features & mask) && !(supported & mask)) {
523 *features &= ~mask;
528 int kvm_arch_qemu_init_env(CPUState *cenv)
530 struct kvm_cpuid_entry2 cpuid_ent[100];
531 #ifdef KVM_CPUID_SIGNATURE
532 struct kvm_cpuid_entry2 *pv_ent;
533 uint32_t signature[3];
534 #endif
535 int cpuid_nent = 0;
536 CPUState copy;
537 uint32_t i, j, limit;
539 qemu_kvm_load_lapic(cenv);
541 copy = *cenv;
543 #ifdef KVM_CPUID_SIGNATURE
544 /* Paravirtualization CPUIDs */
545 memcpy(signature, "KVMKVMKVM\0\0\0", 12);
546 pv_ent = &cpuid_ent[cpuid_nent++];
547 memset(pv_ent, 0, sizeof(*pv_ent));
548 pv_ent->function = KVM_CPUID_SIGNATURE;
549 pv_ent->eax = 0;
550 pv_ent->ebx = signature[0];
551 pv_ent->ecx = signature[1];
552 pv_ent->edx = signature[2];
554 pv_ent = &cpuid_ent[cpuid_nent++];
555 memset(pv_ent, 0, sizeof(*pv_ent));
556 pv_ent->function = KVM_CPUID_FEATURES;
557 pv_ent->eax = get_para_features(kvm_context);
558 #endif
560 copy.regs[R_EAX] = 0;
561 qemu_kvm_cpuid_on_env(&copy);
562 limit = copy.regs[R_EAX];
564 for (i = 0; i <= limit; ++i) {
565 if (i == 4 || i == 0xb || i == 0xd) {
566 for (j = 0; ; ++j) {
567 do_cpuid_ent(&cpuid_ent[cpuid_nent], i, j, &copy);
569 cpuid_ent[cpuid_nent].flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
570 cpuid_ent[cpuid_nent].index = j;
572 cpuid_nent++;
574 if (i == 4 && copy.regs[R_EAX] == 0)
575 break;
576 if (i == 0xb && !(copy.regs[R_ECX] & 0xff00))
577 break;
578 if (i == 0xd && copy.regs[R_EAX] == 0)
579 break;
581 } else
582 do_cpuid_ent(&cpuid_ent[cpuid_nent++], i, 0, &copy);
585 copy.regs[R_EAX] = 0x80000000;
586 qemu_kvm_cpuid_on_env(&copy);
587 limit = copy.regs[R_EAX];
589 for (i = 0x80000000; i <= limit; ++i)
590 do_cpuid_ent(&cpuid_ent[cpuid_nent++], i, 0, &copy);
592 kvm_setup_cpuid2(cenv->kvm_cpu_state.vcpu_ctx, cpuid_nent, cpuid_ent);
594 kvm_trim_features(&cenv->cpuid_features,
595 kvm_arch_get_supported_cpuid(cenv, 1, R_EDX));
596 kvm_trim_features(&cenv->cpuid_ext_features,
597 kvm_arch_get_supported_cpuid(cenv, 1, R_ECX));
598 kvm_trim_features(&cenv->cpuid_ext2_features,
599 kvm_arch_get_supported_cpuid(cenv, 0x80000001, R_EDX));
600 kvm_trim_features(&cenv->cpuid_ext3_features,
601 kvm_arch_get_supported_cpuid(cenv, 0x80000001, R_ECX));
603 return 0;
606 int kvm_arch_halt(void *opaque, kvm_vcpu_context_t vcpu)
608 CPUState *env = cpu_single_env;
610 if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
611 (env->eflags & IF_MASK)) &&
612 !(env->interrupt_request & CPU_INTERRUPT_NMI)) {
613 env->halted = 1;
614 env->exception_index = EXCP_HLT;
616 return 1;
619 void kvm_arch_pre_kvm_run(void *opaque, CPUState *env)
621 if (!kvm_irqchip_in_kernel(kvm_context))
622 kvm_set_cr8(env->kvm_cpu_state.vcpu_ctx, cpu_get_apic_tpr(env));
625 void kvm_arch_post_kvm_run(void *opaque, CPUState *env)
627 cpu_single_env = env;
629 env->eflags = kvm_get_interrupt_flag(env->kvm_cpu_state.vcpu_ctx)
630 ? env->eflags | IF_MASK : env->eflags & ~IF_MASK;
632 cpu_set_apic_tpr(env, kvm_get_cr8(env->kvm_cpu_state.vcpu_ctx));
633 cpu_set_apic_base(env, kvm_get_apic_base(env->kvm_cpu_state.vcpu_ctx));
636 int kvm_arch_has_work(CPUState *env)
638 if (env->exit_request ||
639 ((env->interrupt_request & CPU_INTERRUPT_HARD) &&
640 (env->eflags & IF_MASK)) ||
641 (env->interrupt_request & CPU_INTERRUPT_NMI))
642 return 1;
643 return 0;
646 int kvm_arch_try_push_interrupts(void *opaque)
648 CPUState *env = cpu_single_env;
649 int r, irq;
651 if (kvm_is_ready_for_interrupt_injection(env->kvm_cpu_state.vcpu_ctx) &&
652 (env->interrupt_request & CPU_INTERRUPT_HARD) &&
653 (env->eflags & IF_MASK)) {
654 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
655 irq = cpu_get_pic_interrupt(env);
656 if (irq >= 0) {
657 r = kvm_inject_irq(env->kvm_cpu_state.vcpu_ctx, irq);
658 if (r < 0)
659 printf("cpu %d fail inject %x\n", env->cpu_index, irq);
663 return (env->interrupt_request & CPU_INTERRUPT_HARD) != 0;
666 #ifdef KVM_CAP_USER_NMI
667 void kvm_arch_push_nmi(void *opaque)
669 CPUState *env = cpu_single_env;
670 int r;
672 if (likely(!(env->interrupt_request & CPU_INTERRUPT_NMI)))
673 return;
675 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
676 r = kvm_inject_nmi(env->kvm_cpu_state.vcpu_ctx);
677 if (r < 0)
678 printf("cpu %d fail inject NMI\n", env->cpu_index);
680 #endif /* KVM_CAP_USER_NMI */
682 void kvm_arch_update_regs_for_sipi(CPUState *env)
684 SegmentCache cs = env->segs[R_CS];
686 kvm_arch_save_regs(env);
687 env->segs[R_CS] = cs;
688 env->eip = 0;
689 kvm_arch_load_regs(env);
692 int handle_tpr_access(void *opaque, kvm_vcpu_context_t vcpu,
693 uint64_t rip, int is_write)
695 kvm_tpr_access_report(cpu_single_env, rip, is_write);
696 return 0;
699 void kvm_arch_cpu_reset(CPUState *env)
701 kvm_arch_load_regs(env);
702 if (env->cpu_index != 0) {
703 if (kvm_irqchip_in_kernel(kvm_context)) {
704 #ifdef KVM_CAP_MP_STATE
705 kvm_reset_mpstate(env->kvm_cpu_state.vcpu_ctx);
706 #endif
707 } else {
708 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
709 env->halted = 1;
710 env->exception_index = EXCP_HLT;
715 int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
717 uint8_t int3 = 0xcc;
719 if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
720 cpu_memory_rw_debug(env, bp->pc, &int3, 1, 1))
721 return -EINVAL;
722 return 0;
725 int kvm_arch_remove_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
727 uint8_t int3;
729 if (cpu_memory_rw_debug(env, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
730 cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1))
731 return -EINVAL;
732 return 0;
735 #ifdef KVM_CAP_SET_GUEST_DEBUG
736 static struct {
737 target_ulong addr;
738 int len;
739 int type;
740 } hw_breakpoint[4];
742 static int nb_hw_breakpoint;
744 static int find_hw_breakpoint(target_ulong addr, int len, int type)
746 int n;
748 for (n = 0; n < nb_hw_breakpoint; n++)
749 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
750 (hw_breakpoint[n].len == len || len == -1))
751 return n;
752 return -1;
755 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
756 target_ulong len, int type)
758 switch (type) {
759 case GDB_BREAKPOINT_HW:
760 len = 1;
761 break;
762 case GDB_WATCHPOINT_WRITE:
763 case GDB_WATCHPOINT_ACCESS:
764 switch (len) {
765 case 1:
766 break;
767 case 2:
768 case 4:
769 case 8:
770 if (addr & (len - 1))
771 return -EINVAL;
772 break;
773 default:
774 return -EINVAL;
776 break;
777 default:
778 return -ENOSYS;
781 if (nb_hw_breakpoint == 4)
782 return -ENOBUFS;
784 if (find_hw_breakpoint(addr, len, type) >= 0)
785 return -EEXIST;
787 hw_breakpoint[nb_hw_breakpoint].addr = addr;
788 hw_breakpoint[nb_hw_breakpoint].len = len;
789 hw_breakpoint[nb_hw_breakpoint].type = type;
790 nb_hw_breakpoint++;
792 return 0;
795 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
796 target_ulong len, int type)
798 int n;
800 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
801 if (n < 0)
802 return -ENOENT;
804 nb_hw_breakpoint--;
805 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
807 return 0;
810 void kvm_arch_remove_all_hw_breakpoints(void)
812 nb_hw_breakpoint = 0;
815 static CPUWatchpoint hw_watchpoint;
817 int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info)
819 int handle = 0;
820 int n;
822 if (arch_info->exception == 1) {
823 if (arch_info->dr6 & (1 << 14)) {
824 if (cpu_single_env->singlestep_enabled)
825 handle = 1;
826 } else {
827 for (n = 0; n < 4; n++)
828 if (arch_info->dr6 & (1 << n))
829 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
830 case 0x0:
831 handle = 1;
832 break;
833 case 0x1:
834 handle = 1;
835 cpu_single_env->watchpoint_hit = &hw_watchpoint;
836 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
837 hw_watchpoint.flags = BP_MEM_WRITE;
838 break;
839 case 0x3:
840 handle = 1;
841 cpu_single_env->watchpoint_hit = &hw_watchpoint;
842 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
843 hw_watchpoint.flags = BP_MEM_ACCESS;
844 break;
847 } else if (kvm_find_sw_breakpoint(arch_info->pc))
848 handle = 1;
850 if (!handle)
851 kvm_update_guest_debug(cpu_single_env,
852 (arch_info->exception == 1) ?
853 KVM_GUESTDBG_INJECT_DB : KVM_GUESTDBG_INJECT_BP);
855 return handle;
858 void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg)
860 const uint8_t type_code[] = {
861 [GDB_BREAKPOINT_HW] = 0x0,
862 [GDB_WATCHPOINT_WRITE] = 0x1,
863 [GDB_WATCHPOINT_ACCESS] = 0x3
865 const uint8_t len_code[] = {
866 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
868 int n;
870 if (!TAILQ_EMPTY(&kvm_sw_breakpoints))
871 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
873 if (nb_hw_breakpoint > 0) {
874 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
875 dbg->arch.debugreg[7] = 0x0600;
876 for (n = 0; n < nb_hw_breakpoint; n++) {
877 dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
878 dbg->arch.debugreg[7] |= (2 << (n * 2)) |
879 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
880 (len_code[hw_breakpoint[n].len] << (18 + n*4));
884 #endif
886 void kvm_arch_do_ioperm(void *_data)
888 struct ioperm_data *data = _data;
889 ioperm(data->start_port, data->num, data->turn_on);
893 * Setup x86 specific IRQ routing
895 int kvm_arch_init_irq_routing(void)
897 int i, r;
899 if (kvm_irqchip && kvm_has_gsi_routing(kvm_context)) {
900 kvm_clear_gsi_routes(kvm_context);
901 for (i = 0; i < 8; ++i) {
902 if (i == 2)
903 continue;
904 r = kvm_add_irq_route(kvm_context, i, KVM_IRQCHIP_PIC_MASTER, i);
905 if (r < 0)
906 return r;
908 for (i = 8; i < 16; ++i) {
909 r = kvm_add_irq_route(kvm_context, i, KVM_IRQCHIP_PIC_SLAVE, i - 8);
910 if (r < 0)
911 return r;
913 for (i = 0; i < 24; ++i) {
914 r = kvm_add_irq_route(kvm_context, i, KVM_IRQCHIP_IOAPIC, i);
915 if (r < 0)
916 return r;
918 kvm_commit_irq_routes(kvm_context);
920 return 0;
923 uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function,
924 int reg)
926 return kvm_get_supported_cpuid(kvm_context, function, reg);