Drop unncessary setting of env->exception_index
[qemu-kvm/amd-iommu.git] / qemu-kvm-x86.c
blob686538589ce31865993ef262668beadf929b5245
1 /*
2 * qemu/kvm integration, x86 specific code
4 * Copyright (C) 2006-2008 Qumranet Technologies
6 * Licensed under the terms of the GNU GPL version 2 or higher.
7 */
9 #include "config.h"
10 #include "config-host.h"
12 #include <string.h>
13 #include "hw/hw.h"
14 #include "gdbstub.h"
15 #include <sys/io.h>
17 #include "qemu-kvm.h"
18 #include "libkvm-all.h"
19 #include <pthread.h>
20 #include <sys/utsname.h>
21 #include <linux/kvm_para.h>
23 #include "kvm.h"
25 #define MSR_IA32_TSC 0x10
27 static struct kvm_msr_list *kvm_msr_list;
28 extern unsigned int kvm_shadow_memory;
29 static int kvm_has_msr_star;
30 static int kvm_has_vm_hsave_pa;
32 static int lm_capable_kernel;
34 int kvm_qemu_create_memory_alias(uint64_t phys_start,
35 uint64_t len,
36 uint64_t target_phys)
38 return kvm_create_memory_alias(kvm_context, phys_start, len, target_phys);
41 int kvm_qemu_destroy_memory_alias(uint64_t phys_start)
43 return kvm_destroy_memory_alias(kvm_context, phys_start);
46 int kvm_arch_qemu_create_context(void)
48 int i;
49 struct utsname utsname;
51 uname(&utsname);
52 lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
54 if (kvm_shadow_memory)
55 kvm_set_shadow_pages(kvm_context, kvm_shadow_memory);
57 kvm_msr_list = kvm_get_msr_list(kvm_context);
58 if (!kvm_msr_list)
59 return -1;
60 for (i = 0; i < kvm_msr_list->nmsrs; ++i) {
61 if (kvm_msr_list->indices[i] == MSR_STAR)
62 kvm_has_msr_star = 1;
63 if (kvm_msr_list->indices[i] == MSR_VM_HSAVE_PA)
64 kvm_has_vm_hsave_pa = 1;
67 return 0;
70 static void set_msr_entry(struct kvm_msr_entry *entry, uint32_t index,
71 uint64_t data)
73 entry->index = index;
74 entry->data = data;
77 /* returns 0 on success, non-0 on failure */
78 static int get_msr_entry(struct kvm_msr_entry *entry, CPUState *env)
80 switch (entry->index) {
81 case MSR_IA32_SYSENTER_CS:
82 env->sysenter_cs = entry->data;
83 break;
84 case MSR_IA32_SYSENTER_ESP:
85 env->sysenter_esp = entry->data;
86 break;
87 case MSR_IA32_SYSENTER_EIP:
88 env->sysenter_eip = entry->data;
89 break;
90 case MSR_STAR:
91 env->star = entry->data;
92 break;
93 #ifdef TARGET_X86_64
94 case MSR_CSTAR:
95 env->cstar = entry->data;
96 break;
97 case MSR_KERNELGSBASE:
98 env->kernelgsbase = entry->data;
99 break;
100 case MSR_FMASK:
101 env->fmask = entry->data;
102 break;
103 case MSR_LSTAR:
104 env->lstar = entry->data;
105 break;
106 #endif
107 case MSR_IA32_TSC:
108 env->tsc = entry->data;
109 break;
110 case MSR_VM_HSAVE_PA:
111 env->vm_hsave = entry->data;
112 break;
113 default:
114 printf("Warning unknown msr index 0x%x\n", entry->index);
115 return 1;
117 return 0;
120 #ifdef TARGET_X86_64
121 #define MSR_COUNT 9
122 #else
123 #define MSR_COUNT 5
124 #endif
126 static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
128 lhs->selector = rhs->selector;
129 lhs->base = rhs->base;
130 lhs->limit = rhs->limit;
131 lhs->type = 3;
132 lhs->present = 1;
133 lhs->dpl = 3;
134 lhs->db = 0;
135 lhs->s = 1;
136 lhs->l = 0;
137 lhs->g = 0;
138 lhs->avl = 0;
139 lhs->unusable = 0;
142 static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
144 unsigned flags = rhs->flags;
145 lhs->selector = rhs->selector;
146 lhs->base = rhs->base;
147 lhs->limit = rhs->limit;
148 lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
149 lhs->present = (flags & DESC_P_MASK) != 0;
150 lhs->dpl = rhs->selector & 3;
151 lhs->db = (flags >> DESC_B_SHIFT) & 1;
152 lhs->s = (flags & DESC_S_MASK) != 0;
153 lhs->l = (flags >> DESC_L_SHIFT) & 1;
154 lhs->g = (flags & DESC_G_MASK) != 0;
155 lhs->avl = (flags & DESC_AVL_MASK) != 0;
156 lhs->unusable = 0;
159 static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
161 lhs->selector = rhs->selector;
162 lhs->base = rhs->base;
163 lhs->limit = rhs->limit;
164 lhs->flags =
165 (rhs->type << DESC_TYPE_SHIFT)
166 | (rhs->present * DESC_P_MASK)
167 | (rhs->dpl << DESC_DPL_SHIFT)
168 | (rhs->db << DESC_B_SHIFT)
169 | (rhs->s * DESC_S_MASK)
170 | (rhs->l << DESC_L_SHIFT)
171 | (rhs->g * DESC_G_MASK)
172 | (rhs->avl * DESC_AVL_MASK);
175 void kvm_arch_load_regs(CPUState *env)
177 struct kvm_regs regs;
178 struct kvm_fpu fpu;
179 struct kvm_sregs sregs;
180 struct kvm_msr_entry msrs[MSR_COUNT];
181 int rc, n, i;
183 regs.rax = env->regs[R_EAX];
184 regs.rbx = env->regs[R_EBX];
185 regs.rcx = env->regs[R_ECX];
186 regs.rdx = env->regs[R_EDX];
187 regs.rsi = env->regs[R_ESI];
188 regs.rdi = env->regs[R_EDI];
189 regs.rsp = env->regs[R_ESP];
190 regs.rbp = env->regs[R_EBP];
191 #ifdef TARGET_X86_64
192 regs.r8 = env->regs[8];
193 regs.r9 = env->regs[9];
194 regs.r10 = env->regs[10];
195 regs.r11 = env->regs[11];
196 regs.r12 = env->regs[12];
197 regs.r13 = env->regs[13];
198 regs.r14 = env->regs[14];
199 regs.r15 = env->regs[15];
200 #endif
202 regs.rflags = env->eflags;
203 regs.rip = env->eip;
205 kvm_set_regs(env->kvm_cpu_state.vcpu_ctx, &regs);
207 memset(&fpu, 0, sizeof fpu);
208 fpu.fsw = env->fpus & ~(7 << 11);
209 fpu.fsw |= (env->fpstt & 7) << 11;
210 fpu.fcw = env->fpuc;
211 for (i = 0; i < 8; ++i)
212 fpu.ftwx |= (!env->fptags[i]) << i;
213 memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
214 memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
215 fpu.mxcsr = env->mxcsr;
216 kvm_set_fpu(env->kvm_cpu_state.vcpu_ctx, &fpu);
218 memcpy(sregs.interrupt_bitmap, env->interrupt_bitmap, sizeof(sregs.interrupt_bitmap));
220 if ((env->eflags & VM_MASK)) {
221 set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
222 set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
223 set_v8086_seg(&sregs.es, &env->segs[R_ES]);
224 set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
225 set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
226 set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
227 } else {
228 set_seg(&sregs.cs, &env->segs[R_CS]);
229 set_seg(&sregs.ds, &env->segs[R_DS]);
230 set_seg(&sregs.es, &env->segs[R_ES]);
231 set_seg(&sregs.fs, &env->segs[R_FS]);
232 set_seg(&sregs.gs, &env->segs[R_GS]);
233 set_seg(&sregs.ss, &env->segs[R_SS]);
235 if (env->cr[0] & CR0_PE_MASK) {
236 /* force ss cpl to cs cpl */
237 sregs.ss.selector = (sregs.ss.selector & ~3) |
238 (sregs.cs.selector & 3);
239 sregs.ss.dpl = sregs.ss.selector & 3;
243 set_seg(&sregs.tr, &env->tr);
244 set_seg(&sregs.ldt, &env->ldt);
246 sregs.idt.limit = env->idt.limit;
247 sregs.idt.base = env->idt.base;
248 sregs.gdt.limit = env->gdt.limit;
249 sregs.gdt.base = env->gdt.base;
251 sregs.cr0 = env->cr[0];
252 sregs.cr2 = env->cr[2];
253 sregs.cr3 = env->cr[3];
254 sregs.cr4 = env->cr[4];
256 sregs.cr8 = cpu_get_apic_tpr(env);
257 sregs.apic_base = cpu_get_apic_base(env);
259 sregs.efer = env->efer;
261 kvm_set_sregs(env->kvm_cpu_state.vcpu_ctx, &sregs);
263 /* msrs */
264 n = 0;
265 set_msr_entry(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
266 set_msr_entry(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
267 set_msr_entry(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
268 if (kvm_has_msr_star)
269 set_msr_entry(&msrs[n++], MSR_STAR, env->star);
270 if (kvm_has_vm_hsave_pa)
271 set_msr_entry(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave);
272 #ifdef TARGET_X86_64
273 if (lm_capable_kernel) {
274 set_msr_entry(&msrs[n++], MSR_CSTAR, env->cstar);
275 set_msr_entry(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
276 set_msr_entry(&msrs[n++], MSR_FMASK, env->fmask);
277 set_msr_entry(&msrs[n++], MSR_LSTAR , env->lstar);
279 #endif
281 rc = kvm_set_msrs(env->kvm_cpu_state.vcpu_ctx, msrs, n);
282 if (rc == -1)
283 perror("kvm_set_msrs FAILED");
286 void kvm_load_tsc(CPUState *env)
288 int rc;
289 struct kvm_msr_entry msr;
291 set_msr_entry(&msr, MSR_IA32_TSC, env->tsc);
293 rc = kvm_set_msrs(env->kvm_cpu_state.vcpu_ctx, &msr, 1);
294 if (rc == -1)
295 perror("kvm_set_tsc FAILED.\n");
298 void kvm_arch_save_mpstate(CPUState *env)
300 #ifdef KVM_CAP_MP_STATE
301 int r;
302 struct kvm_mp_state mp_state;
304 r = kvm_get_mpstate(env->kvm_cpu_state.vcpu_ctx, &mp_state);
305 if (r < 0)
306 env->mp_state = -1;
307 else
308 env->mp_state = mp_state.mp_state;
309 #endif
312 void kvm_arch_load_mpstate(CPUState *env)
314 #ifdef KVM_CAP_MP_STATE
315 struct kvm_mp_state mp_state = { .mp_state = env->mp_state };
318 * -1 indicates that the host did not support GET_MP_STATE ioctl,
319 * so don't touch it.
321 if (env->mp_state != -1)
322 kvm_set_mpstate(env->kvm_cpu_state.vcpu_ctx, &mp_state);
323 #endif
326 void kvm_arch_save_regs(CPUState *env)
328 struct kvm_regs regs;
329 struct kvm_fpu fpu;
330 struct kvm_sregs sregs;
331 struct kvm_msr_entry msrs[MSR_COUNT];
332 uint32_t hflags;
333 uint32_t i, n, rc;
335 kvm_get_regs(env->kvm_cpu_state.vcpu_ctx, &regs);
337 env->regs[R_EAX] = regs.rax;
338 env->regs[R_EBX] = regs.rbx;
339 env->regs[R_ECX] = regs.rcx;
340 env->regs[R_EDX] = regs.rdx;
341 env->regs[R_ESI] = regs.rsi;
342 env->regs[R_EDI] = regs.rdi;
343 env->regs[R_ESP] = regs.rsp;
344 env->regs[R_EBP] = regs.rbp;
345 #ifdef TARGET_X86_64
346 env->regs[8] = regs.r8;
347 env->regs[9] = regs.r9;
348 env->regs[10] = regs.r10;
349 env->regs[11] = regs.r11;
350 env->regs[12] = regs.r12;
351 env->regs[13] = regs.r13;
352 env->regs[14] = regs.r14;
353 env->regs[15] = regs.r15;
354 #endif
356 env->eflags = regs.rflags;
357 env->eip = regs.rip;
359 kvm_get_fpu(env->kvm_cpu_state.vcpu_ctx, &fpu);
360 env->fpstt = (fpu.fsw >> 11) & 7;
361 env->fpus = fpu.fsw;
362 env->fpuc = fpu.fcw;
363 for (i = 0; i < 8; ++i)
364 env->fptags[i] = !((fpu.ftwx >> i) & 1);
365 memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
366 memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs);
367 env->mxcsr = fpu.mxcsr;
369 kvm_get_sregs(env->kvm_cpu_state.vcpu_ctx, &sregs);
371 memcpy(env->interrupt_bitmap, sregs.interrupt_bitmap, sizeof(env->interrupt_bitmap));
373 get_seg(&env->segs[R_CS], &sregs.cs);
374 get_seg(&env->segs[R_DS], &sregs.ds);
375 get_seg(&env->segs[R_ES], &sregs.es);
376 get_seg(&env->segs[R_FS], &sregs.fs);
377 get_seg(&env->segs[R_GS], &sregs.gs);
378 get_seg(&env->segs[R_SS], &sregs.ss);
380 get_seg(&env->tr, &sregs.tr);
381 get_seg(&env->ldt, &sregs.ldt);
383 env->idt.limit = sregs.idt.limit;
384 env->idt.base = sregs.idt.base;
385 env->gdt.limit = sregs.gdt.limit;
386 env->gdt.base = sregs.gdt.base;
388 env->cr[0] = sregs.cr0;
389 env->cr[2] = sregs.cr2;
390 env->cr[3] = sregs.cr3;
391 env->cr[4] = sregs.cr4;
393 cpu_set_apic_base(env, sregs.apic_base);
395 env->efer = sregs.efer;
396 //cpu_set_apic_tpr(env, sregs.cr8);
398 #define HFLAG_COPY_MASK ~( \
399 HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
400 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
401 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
402 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
406 hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
407 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
408 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
409 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
410 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
411 hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
412 (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
414 if (env->efer & MSR_EFER_LMA) {
415 hflags |= HF_LMA_MASK;
418 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
419 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
420 } else {
421 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
422 (DESC_B_SHIFT - HF_CS32_SHIFT);
423 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
424 (DESC_B_SHIFT - HF_SS32_SHIFT);
425 if (!(env->cr[0] & CR0_PE_MASK) ||
426 (env->eflags & VM_MASK) ||
427 !(hflags & HF_CS32_MASK)) {
428 hflags |= HF_ADDSEG_MASK;
429 } else {
430 hflags |= ((env->segs[R_DS].base |
431 env->segs[R_ES].base |
432 env->segs[R_SS].base) != 0) <<
433 HF_ADDSEG_SHIFT;
436 env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
438 /* msrs */
439 n = 0;
440 msrs[n++].index = MSR_IA32_SYSENTER_CS;
441 msrs[n++].index = MSR_IA32_SYSENTER_ESP;
442 msrs[n++].index = MSR_IA32_SYSENTER_EIP;
443 if (kvm_has_msr_star)
444 msrs[n++].index = MSR_STAR;
445 msrs[n++].index = MSR_IA32_TSC;
446 if (kvm_has_vm_hsave_pa)
447 msrs[n++].index = MSR_VM_HSAVE_PA;
448 #ifdef TARGET_X86_64
449 if (lm_capable_kernel) {
450 msrs[n++].index = MSR_CSTAR;
451 msrs[n++].index = MSR_KERNELGSBASE;
452 msrs[n++].index = MSR_FMASK;
453 msrs[n++].index = MSR_LSTAR;
455 #endif
456 rc = kvm_get_msrs(env->kvm_cpu_state.vcpu_ctx, msrs, n);
457 if (rc == -1) {
458 perror("kvm_get_msrs FAILED");
460 else {
461 n = rc; /* actual number of MSRs */
462 for (i=0 ; i<n; i++) {
463 if (get_msr_entry(&msrs[i], env))
464 return;
469 static void do_cpuid_ent(struct kvm_cpuid_entry2 *e, uint32_t function,
470 uint32_t count, CPUState *env)
472 env->regs[R_EAX] = function;
473 env->regs[R_ECX] = count;
474 qemu_kvm_cpuid_on_env(env);
475 e->function = function;
476 e->flags = 0;
477 e->index = 0;
478 e->eax = env->regs[R_EAX];
479 e->ebx = env->regs[R_EBX];
480 e->ecx = env->regs[R_ECX];
481 e->edx = env->regs[R_EDX];
484 struct kvm_para_features {
485 int cap;
486 int feature;
487 } para_features[] = {
488 #ifdef KVM_CAP_CLOCKSOURCE
489 { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
490 #endif
491 #ifdef KVM_CAP_NOP_IO_DELAY
492 { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
493 #endif
494 #ifdef KVM_CAP_PV_MMU
495 { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
496 #endif
497 #ifdef KVM_CAP_CR3_CACHE
498 { KVM_CAP_CR3_CACHE, KVM_FEATURE_CR3_CACHE },
499 #endif
500 { -1, -1 }
503 static int get_para_features(kvm_context_t kvm_context)
505 int i, features = 0;
507 for (i = 0; i < ARRAY_SIZE(para_features)-1; i++) {
508 if (kvm_check_extension(kvm_context, para_features[i].cap))
509 features |= (1 << para_features[i].feature);
512 return features;
515 static void kvm_trim_features(uint32_t *features, uint32_t supported)
517 int i;
518 uint32_t mask;
520 for (i = 0; i < 32; ++i) {
521 mask = 1U << i;
522 if ((*features & mask) && !(supported & mask)) {
523 *features &= ~mask;
528 int kvm_arch_qemu_init_env(CPUState *cenv)
530 struct kvm_cpuid_entry2 cpuid_ent[100];
531 #ifdef KVM_CPUID_SIGNATURE
532 struct kvm_cpuid_entry2 *pv_ent;
533 uint32_t signature[3];
534 #endif
535 int cpuid_nent = 0;
536 CPUState copy;
537 uint32_t i, j, limit;
539 qemu_kvm_load_lapic(cenv);
541 copy = *cenv;
543 #ifdef KVM_CPUID_SIGNATURE
544 /* Paravirtualization CPUIDs */
545 memcpy(signature, "KVMKVMKVM\0\0\0", 12);
546 pv_ent = &cpuid_ent[cpuid_nent++];
547 memset(pv_ent, 0, sizeof(*pv_ent));
548 pv_ent->function = KVM_CPUID_SIGNATURE;
549 pv_ent->eax = 0;
550 pv_ent->ebx = signature[0];
551 pv_ent->ecx = signature[1];
552 pv_ent->edx = signature[2];
554 pv_ent = &cpuid_ent[cpuid_nent++];
555 memset(pv_ent, 0, sizeof(*pv_ent));
556 pv_ent->function = KVM_CPUID_FEATURES;
557 pv_ent->eax = get_para_features(kvm_context);
558 #endif
560 copy.regs[R_EAX] = 0;
561 qemu_kvm_cpuid_on_env(&copy);
562 limit = copy.regs[R_EAX];
564 for (i = 0; i <= limit; ++i) {
565 if (i == 4 || i == 0xb || i == 0xd) {
566 for (j = 0; ; ++j) {
567 do_cpuid_ent(&cpuid_ent[cpuid_nent], i, j, &copy);
569 cpuid_ent[cpuid_nent].flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
570 cpuid_ent[cpuid_nent].index = j;
572 cpuid_nent++;
574 if (i == 4 && copy.regs[R_EAX] == 0)
575 break;
576 if (i == 0xb && !(copy.regs[R_ECX] & 0xff00))
577 break;
578 if (i == 0xd && copy.regs[R_EAX] == 0)
579 break;
581 } else
582 do_cpuid_ent(&cpuid_ent[cpuid_nent++], i, 0, &copy);
585 copy.regs[R_EAX] = 0x80000000;
586 qemu_kvm_cpuid_on_env(&copy);
587 limit = copy.regs[R_EAX];
589 for (i = 0x80000000; i <= limit; ++i)
590 do_cpuid_ent(&cpuid_ent[cpuid_nent++], i, 0, &copy);
592 kvm_setup_cpuid2(cenv->kvm_cpu_state.vcpu_ctx, cpuid_nent, cpuid_ent);
594 kvm_trim_features(&cenv->cpuid_features,
595 kvm_arch_get_supported_cpuid(cenv, 1, R_EDX));
596 kvm_trim_features(&cenv->cpuid_ext_features,
597 kvm_arch_get_supported_cpuid(cenv, 1, R_ECX));
598 kvm_trim_features(&cenv->cpuid_ext2_features,
599 kvm_arch_get_supported_cpuid(cenv, 0x80000001, R_EDX));
600 kvm_trim_features(&cenv->cpuid_ext3_features,
601 kvm_arch_get_supported_cpuid(cenv, 0x80000001, R_ECX));
603 return 0;
606 int kvm_arch_halt(void *opaque, kvm_vcpu_context_t vcpu)
608 CPUState *env = cpu_single_env;
610 if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
611 (env->eflags & IF_MASK)) &&
612 !(env->interrupt_request & CPU_INTERRUPT_NMI)) {
613 env->halted = 1;
615 return 1;
618 void kvm_arch_pre_kvm_run(void *opaque, CPUState *env)
620 if (!kvm_irqchip_in_kernel(kvm_context))
621 kvm_set_cr8(env->kvm_cpu_state.vcpu_ctx, cpu_get_apic_tpr(env));
624 void kvm_arch_post_kvm_run(void *opaque, CPUState *env)
626 cpu_single_env = env;
628 env->eflags = kvm_get_interrupt_flag(env->kvm_cpu_state.vcpu_ctx)
629 ? env->eflags | IF_MASK : env->eflags & ~IF_MASK;
631 cpu_set_apic_tpr(env, kvm_get_cr8(env->kvm_cpu_state.vcpu_ctx));
632 cpu_set_apic_base(env, kvm_get_apic_base(env->kvm_cpu_state.vcpu_ctx));
635 int kvm_arch_has_work(CPUState *env)
637 if (env->exit_request ||
638 ((env->interrupt_request & CPU_INTERRUPT_HARD) &&
639 (env->eflags & IF_MASK)) ||
640 (env->interrupt_request & CPU_INTERRUPT_NMI))
641 return 1;
642 return 0;
645 int kvm_arch_try_push_interrupts(void *opaque)
647 CPUState *env = cpu_single_env;
648 int r, irq;
650 if (kvm_is_ready_for_interrupt_injection(env->kvm_cpu_state.vcpu_ctx) &&
651 (env->interrupt_request & CPU_INTERRUPT_HARD) &&
652 (env->eflags & IF_MASK)) {
653 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
654 irq = cpu_get_pic_interrupt(env);
655 if (irq >= 0) {
656 r = kvm_inject_irq(env->kvm_cpu_state.vcpu_ctx, irq);
657 if (r < 0)
658 printf("cpu %d fail inject %x\n", env->cpu_index, irq);
662 return (env->interrupt_request & CPU_INTERRUPT_HARD) != 0;
665 #ifdef KVM_CAP_USER_NMI
666 void kvm_arch_push_nmi(void *opaque)
668 CPUState *env = cpu_single_env;
669 int r;
671 if (likely(!(env->interrupt_request & CPU_INTERRUPT_NMI)))
672 return;
674 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
675 r = kvm_inject_nmi(env->kvm_cpu_state.vcpu_ctx);
676 if (r < 0)
677 printf("cpu %d fail inject NMI\n", env->cpu_index);
679 #endif /* KVM_CAP_USER_NMI */
681 void kvm_arch_update_regs_for_sipi(CPUState *env)
683 SegmentCache cs = env->segs[R_CS];
685 kvm_arch_save_regs(env);
686 env->segs[R_CS] = cs;
687 env->eip = 0;
688 kvm_arch_load_regs(env);
691 int handle_tpr_access(void *opaque, kvm_vcpu_context_t vcpu,
692 uint64_t rip, int is_write)
694 kvm_tpr_access_report(cpu_single_env, rip, is_write);
695 return 0;
698 void kvm_arch_cpu_reset(CPUState *env)
700 kvm_arch_load_regs(env);
701 if (env->cpu_index != 0) {
702 if (kvm_irqchip_in_kernel(kvm_context)) {
703 #ifdef KVM_CAP_MP_STATE
704 kvm_reset_mpstate(env->kvm_cpu_state.vcpu_ctx);
705 #endif
706 } else {
707 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
708 env->halted = 1;
713 int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
715 uint8_t int3 = 0xcc;
717 if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
718 cpu_memory_rw_debug(env, bp->pc, &int3, 1, 1))
719 return -EINVAL;
720 return 0;
723 int kvm_arch_remove_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
725 uint8_t int3;
727 if (cpu_memory_rw_debug(env, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
728 cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1))
729 return -EINVAL;
730 return 0;
733 #ifdef KVM_CAP_SET_GUEST_DEBUG
734 static struct {
735 target_ulong addr;
736 int len;
737 int type;
738 } hw_breakpoint[4];
740 static int nb_hw_breakpoint;
742 static int find_hw_breakpoint(target_ulong addr, int len, int type)
744 int n;
746 for (n = 0; n < nb_hw_breakpoint; n++)
747 if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
748 (hw_breakpoint[n].len == len || len == -1))
749 return n;
750 return -1;
753 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
754 target_ulong len, int type)
756 switch (type) {
757 case GDB_BREAKPOINT_HW:
758 len = 1;
759 break;
760 case GDB_WATCHPOINT_WRITE:
761 case GDB_WATCHPOINT_ACCESS:
762 switch (len) {
763 case 1:
764 break;
765 case 2:
766 case 4:
767 case 8:
768 if (addr & (len - 1))
769 return -EINVAL;
770 break;
771 default:
772 return -EINVAL;
774 break;
775 default:
776 return -ENOSYS;
779 if (nb_hw_breakpoint == 4)
780 return -ENOBUFS;
782 if (find_hw_breakpoint(addr, len, type) >= 0)
783 return -EEXIST;
785 hw_breakpoint[nb_hw_breakpoint].addr = addr;
786 hw_breakpoint[nb_hw_breakpoint].len = len;
787 hw_breakpoint[nb_hw_breakpoint].type = type;
788 nb_hw_breakpoint++;
790 return 0;
793 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
794 target_ulong len, int type)
796 int n;
798 n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
799 if (n < 0)
800 return -ENOENT;
802 nb_hw_breakpoint--;
803 hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
805 return 0;
808 void kvm_arch_remove_all_hw_breakpoints(void)
810 nb_hw_breakpoint = 0;
813 static CPUWatchpoint hw_watchpoint;
815 int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info)
817 int handle = 0;
818 int n;
820 if (arch_info->exception == 1) {
821 if (arch_info->dr6 & (1 << 14)) {
822 if (cpu_single_env->singlestep_enabled)
823 handle = 1;
824 } else {
825 for (n = 0; n < 4; n++)
826 if (arch_info->dr6 & (1 << n))
827 switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
828 case 0x0:
829 handle = 1;
830 break;
831 case 0x1:
832 handle = 1;
833 cpu_single_env->watchpoint_hit = &hw_watchpoint;
834 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
835 hw_watchpoint.flags = BP_MEM_WRITE;
836 break;
837 case 0x3:
838 handle = 1;
839 cpu_single_env->watchpoint_hit = &hw_watchpoint;
840 hw_watchpoint.vaddr = hw_breakpoint[n].addr;
841 hw_watchpoint.flags = BP_MEM_ACCESS;
842 break;
845 } else if (kvm_find_sw_breakpoint(arch_info->pc))
846 handle = 1;
848 if (!handle)
849 kvm_update_guest_debug(cpu_single_env,
850 (arch_info->exception == 1) ?
851 KVM_GUESTDBG_INJECT_DB : KVM_GUESTDBG_INJECT_BP);
853 return handle;
856 void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg)
858 const uint8_t type_code[] = {
859 [GDB_BREAKPOINT_HW] = 0x0,
860 [GDB_WATCHPOINT_WRITE] = 0x1,
861 [GDB_WATCHPOINT_ACCESS] = 0x3
863 const uint8_t len_code[] = {
864 [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
866 int n;
868 if (!TAILQ_EMPTY(&kvm_sw_breakpoints))
869 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
871 if (nb_hw_breakpoint > 0) {
872 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
873 dbg->arch.debugreg[7] = 0x0600;
874 for (n = 0; n < nb_hw_breakpoint; n++) {
875 dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
876 dbg->arch.debugreg[7] |= (2 << (n * 2)) |
877 (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
878 (len_code[hw_breakpoint[n].len] << (18 + n*4));
882 #endif
884 void kvm_arch_do_ioperm(void *_data)
886 struct ioperm_data *data = _data;
887 ioperm(data->start_port, data->num, data->turn_on);
891 * Setup x86 specific IRQ routing
893 int kvm_arch_init_irq_routing(void)
895 int i, r;
897 if (kvm_irqchip && kvm_has_gsi_routing(kvm_context)) {
898 kvm_clear_gsi_routes(kvm_context);
899 for (i = 0; i < 8; ++i) {
900 if (i == 2)
901 continue;
902 r = kvm_add_irq_route(kvm_context, i, KVM_IRQCHIP_PIC_MASTER, i);
903 if (r < 0)
904 return r;
906 for (i = 8; i < 16; ++i) {
907 r = kvm_add_irq_route(kvm_context, i, KVM_IRQCHIP_PIC_SLAVE, i - 8);
908 if (r < 0)
909 return r;
911 for (i = 0; i < 24; ++i) {
912 r = kvm_add_irq_route(kvm_context, i, KVM_IRQCHIP_IOAPIC, i);
913 if (r < 0)
914 return r;
916 kvm_commit_irq_routes(kvm_context);
918 return 0;
921 uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function,
922 int reg)
924 return kvm_get_supported_cpuid(kvm_context, function, reg);