hw/timer/sse-timer: Model the SSE Subsystem System Timer
[qemu/ar7.git] / target / i386 / tcg / svm_helper.c
blob097bb9b83db24cffd2df3ae5a2818485ebf84511
1 /*
2 * x86 SVM helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
25 #include "helper-tcg.h"
27 /* Secure Virtual Machine helpers */
29 #if defined(CONFIG_USER_ONLY)
31 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
35 void helper_vmmcall(CPUX86State *env)
39 void helper_vmload(CPUX86State *env, int aflag)
43 void helper_vmsave(CPUX86State *env, int aflag)
47 void helper_stgi(CPUX86State *env)
51 void helper_clgi(CPUX86State *env)
55 void helper_skinit(CPUX86State *env)
59 void helper_invlpga(CPUX86State *env, int aflag)
63 void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1,
64 uintptr_t retaddr)
66 assert(0);
69 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
70 uint64_t param)
74 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
75 uint64_t param, uintptr_t retaddr)
79 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
80 uint32_t next_eip_addend)
83 #else
85 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
86 const SegmentCache *sc)
88 CPUState *cs = env_cpu(env);
90 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
91 sc->selector);
92 x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
93 sc->base);
94 x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
95 sc->limit);
96 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
97 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
100 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
101 SegmentCache *sc)
103 CPUState *cs = env_cpu(env);
104 unsigned int flags;
106 sc->selector = x86_lduw_phys(cs,
107 addr + offsetof(struct vmcb_seg, selector));
108 sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
109 sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
110 flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
111 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
114 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
115 int seg_reg)
117 SegmentCache sc1, *sc = &sc1;
119 svm_load_seg(env, addr, sc);
120 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
121 sc->base, sc->limit, sc->flags);
124 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
126 CPUState *cs = env_cpu(env);
127 target_ulong addr;
128 uint64_t nested_ctl;
129 uint32_t event_inj;
130 uint32_t int_ctl;
132 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
134 if (aflag == 2) {
135 addr = env->regs[R_EAX];
136 } else {
137 addr = (uint32_t)env->regs[R_EAX];
140 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
142 env->vm_vmcb = addr;
144 /* save the current CPU state in the hsave page */
145 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
146 env->gdt.base);
147 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
148 env->gdt.limit);
150 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
151 env->idt.base);
152 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
153 env->idt.limit);
155 x86_stq_phys(cs,
156 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
157 x86_stq_phys(cs,
158 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
159 x86_stq_phys(cs,
160 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
161 x86_stq_phys(cs,
162 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
163 x86_stq_phys(cs,
164 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
165 x86_stq_phys(cs,
166 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
168 x86_stq_phys(cs,
169 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
170 x86_stq_phys(cs,
171 env->vm_hsave + offsetof(struct vmcb, save.rflags),
172 cpu_compute_eflags(env));
174 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
175 &env->segs[R_ES]);
176 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
177 &env->segs[R_CS]);
178 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
179 &env->segs[R_SS]);
180 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
181 &env->segs[R_DS]);
183 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
184 env->eip + next_eip_addend);
185 x86_stq_phys(cs,
186 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
187 x86_stq_phys(cs,
188 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
190 /* load the interception bitmaps so we do not need to access the
191 vmcb in svm mode */
192 env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
193 control.intercept));
194 env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
195 offsetof(struct vmcb,
196 control.intercept_cr_read));
197 env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
198 offsetof(struct vmcb,
199 control.intercept_cr_write));
200 env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
201 offsetof(struct vmcb,
202 control.intercept_dr_read));
203 env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
204 offsetof(struct vmcb,
205 control.intercept_dr_write));
206 env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
207 offsetof(struct vmcb,
208 control.intercept_exceptions
211 nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
212 control.nested_ctl));
214 env->nested_pg_mode = 0;
216 if (nested_ctl & SVM_NPT_ENABLED) {
217 env->nested_cr3 = x86_ldq_phys(cs,
218 env->vm_vmcb + offsetof(struct vmcb,
219 control.nested_cr3));
220 env->hflags2 |= HF2_NPT_MASK;
222 if (env->cr[4] & CR4_PAE_MASK) {
223 env->nested_pg_mode |= SVM_NPT_PAE;
225 if (env->cr[4] & CR4_PSE_MASK) {
226 env->nested_pg_mode |= SVM_NPT_PSE;
228 if (env->hflags & HF_LMA_MASK) {
229 env->nested_pg_mode |= SVM_NPT_LMA;
231 if (env->efer & MSR_EFER_NXE) {
232 env->nested_pg_mode |= SVM_NPT_NXE;
236 /* enable intercepts */
237 env->hflags |= HF_GUEST_MASK;
239 env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
240 offsetof(struct vmcb, control.tsc_offset));
242 env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
243 save.gdtr.base));
244 env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
245 save.gdtr.limit));
247 env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
248 save.idtr.base));
249 env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
250 save.idtr.limit));
252 /* clear exit_info_2 so we behave like the real hardware */
253 x86_stq_phys(cs,
254 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
256 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
257 env->vm_vmcb + offsetof(struct vmcb,
258 save.cr0)));
259 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
260 env->vm_vmcb + offsetof(struct vmcb,
261 save.cr4)));
262 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
263 env->vm_vmcb + offsetof(struct vmcb,
264 save.cr3)));
265 env->cr[2] = x86_ldq_phys(cs,
266 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
267 int_ctl = x86_ldl_phys(cs,
268 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
269 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
270 if (int_ctl & V_INTR_MASKING_MASK) {
271 env->v_tpr = int_ctl & V_TPR_MASK;
272 env->hflags2 |= HF2_VINTR_MASK;
273 if (env->eflags & IF_MASK) {
274 env->hflags2 |= HF2_HIF_MASK;
278 cpu_load_efer(env,
279 x86_ldq_phys(cs,
280 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
281 env->eflags = 0;
282 cpu_load_eflags(env, x86_ldq_phys(cs,
283 env->vm_vmcb + offsetof(struct vmcb,
284 save.rflags)),
285 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
287 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
288 R_ES);
289 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
290 R_CS);
291 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
292 R_SS);
293 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
294 R_DS);
296 env->eip = x86_ldq_phys(cs,
297 env->vm_vmcb + offsetof(struct vmcb, save.rip));
299 env->regs[R_ESP] = x86_ldq_phys(cs,
300 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
301 env->regs[R_EAX] = x86_ldq_phys(cs,
302 env->vm_vmcb + offsetof(struct vmcb, save.rax));
303 env->dr[7] = x86_ldq_phys(cs,
304 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
305 env->dr[6] = x86_ldq_phys(cs,
306 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
308 /* FIXME: guest state consistency checks */
310 switch (x86_ldub_phys(cs,
311 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
312 case TLB_CONTROL_DO_NOTHING:
313 break;
314 case TLB_CONTROL_FLUSH_ALL_ASID:
315 /* FIXME: this is not 100% correct but should work for now */
316 tlb_flush(cs);
317 break;
320 env->hflags2 |= HF2_GIF_MASK;
322 if (int_ctl & V_IRQ_MASK) {
323 CPUState *cs = env_cpu(env);
325 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
328 /* maybe we need to inject an event */
329 event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
330 control.event_inj));
331 if (event_inj & SVM_EVTINJ_VALID) {
332 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
333 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
334 uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
335 offsetof(struct vmcb,
336 control.event_inj_err));
338 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
339 /* FIXME: need to implement valid_err */
340 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
341 case SVM_EVTINJ_TYPE_INTR:
342 cs->exception_index = vector;
343 env->error_code = event_inj_err;
344 env->exception_is_int = 0;
345 env->exception_next_eip = -1;
346 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
347 /* XXX: is it always correct? */
348 do_interrupt_x86_hardirq(env, vector, 1);
349 break;
350 case SVM_EVTINJ_TYPE_NMI:
351 cs->exception_index = EXCP02_NMI;
352 env->error_code = event_inj_err;
353 env->exception_is_int = 0;
354 env->exception_next_eip = env->eip;
355 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
356 cpu_loop_exit(cs);
357 break;
358 case SVM_EVTINJ_TYPE_EXEPT:
359 cs->exception_index = vector;
360 env->error_code = event_inj_err;
361 env->exception_is_int = 0;
362 env->exception_next_eip = -1;
363 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
364 cpu_loop_exit(cs);
365 break;
366 case SVM_EVTINJ_TYPE_SOFT:
367 cs->exception_index = vector;
368 env->error_code = event_inj_err;
369 env->exception_is_int = 1;
370 env->exception_next_eip = env->eip;
371 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
372 cpu_loop_exit(cs);
373 break;
375 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
376 env->error_code);
380 void helper_vmmcall(CPUX86State *env)
382 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
383 raise_exception(env, EXCP06_ILLOP);
386 void helper_vmload(CPUX86State *env, int aflag)
388 CPUState *cs = env_cpu(env);
389 target_ulong addr;
391 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
393 if (aflag == 2) {
394 addr = env->regs[R_EAX];
395 } else {
396 addr = (uint32_t)env->regs[R_EAX];
399 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
400 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
401 addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
402 save.fs.base)),
403 env->segs[R_FS].base);
405 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
406 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
407 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
408 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
410 #ifdef TARGET_X86_64
411 env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
412 save.kernel_gs_base));
413 env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
414 env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
415 env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
416 #endif
417 env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
418 env->sysenter_cs = x86_ldq_phys(cs,
419 addr + offsetof(struct vmcb, save.sysenter_cs));
420 env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
421 save.sysenter_esp));
422 env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
423 save.sysenter_eip));
426 void helper_vmsave(CPUX86State *env, int aflag)
428 CPUState *cs = env_cpu(env);
429 target_ulong addr;
431 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
433 if (aflag == 2) {
434 addr = env->regs[R_EAX];
435 } else {
436 addr = (uint32_t)env->regs[R_EAX];
439 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
440 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
441 addr, x86_ldq_phys(cs,
442 addr + offsetof(struct vmcb, save.fs.base)),
443 env->segs[R_FS].base);
445 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
446 &env->segs[R_FS]);
447 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
448 &env->segs[R_GS]);
449 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
450 &env->tr);
451 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
452 &env->ldt);
454 #ifdef TARGET_X86_64
455 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
456 env->kernelgsbase);
457 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
458 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
459 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
460 #endif
461 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
462 x86_stq_phys(cs,
463 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
464 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
465 env->sysenter_esp);
466 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
467 env->sysenter_eip);
470 void helper_stgi(CPUX86State *env)
472 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
473 env->hflags2 |= HF2_GIF_MASK;
476 void helper_clgi(CPUX86State *env)
478 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
479 env->hflags2 &= ~HF2_GIF_MASK;
482 void helper_skinit(CPUX86State *env)
484 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0, GETPC());
485 /* XXX: not implemented */
486 raise_exception(env, EXCP06_ILLOP);
489 void helper_invlpga(CPUX86State *env, int aflag)
491 X86CPU *cpu = env_archcpu(env);
492 target_ulong addr;
494 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0, GETPC());
496 if (aflag == 2) {
497 addr = env->regs[R_EAX];
498 } else {
499 addr = (uint32_t)env->regs[R_EAX];
502 /* XXX: could use the ASID to see if it is needed to do the
503 flush */
504 tlb_flush_page(CPU(cpu), addr);
507 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
508 uint64_t param, uintptr_t retaddr)
510 CPUState *cs = env_cpu(env);
512 if (likely(!(env->hflags & HF_GUEST_MASK))) {
513 return;
515 switch (type) {
516 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
517 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
518 cpu_vmexit(env, type, param, retaddr);
520 break;
521 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
522 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
523 cpu_vmexit(env, type, param, retaddr);
525 break;
526 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
527 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
528 cpu_vmexit(env, type, param, retaddr);
530 break;
531 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
532 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
533 cpu_vmexit(env, type, param, retaddr);
535 break;
536 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
537 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
538 cpu_vmexit(env, type, param, retaddr);
540 break;
541 case SVM_EXIT_MSR:
542 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
543 /* FIXME: this should be read in at vmrun (faster this way?) */
544 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
545 offsetof(struct vmcb,
546 control.msrpm_base_pa));
547 uint32_t t0, t1;
549 switch ((uint32_t)env->regs[R_ECX]) {
550 case 0 ... 0x1fff:
551 t0 = (env->regs[R_ECX] * 2) % 8;
552 t1 = (env->regs[R_ECX] * 2) / 8;
553 break;
554 case 0xc0000000 ... 0xc0001fff:
555 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
556 t1 = (t0 / 8);
557 t0 %= 8;
558 break;
559 case 0xc0010000 ... 0xc0011fff:
560 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
561 t1 = (t0 / 8);
562 t0 %= 8;
563 break;
564 default:
565 cpu_vmexit(env, type, param, retaddr);
566 t0 = 0;
567 t1 = 0;
568 break;
570 if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
571 cpu_vmexit(env, type, param, retaddr);
574 break;
575 default:
576 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
577 cpu_vmexit(env, type, param, retaddr);
579 break;
583 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
584 uint64_t param)
586 cpu_svm_check_intercept_param(env, type, param, GETPC());
589 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
590 uint32_t next_eip_addend)
592 CPUState *cs = env_cpu(env);
594 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
595 /* FIXME: this should be read in at vmrun (faster this way?) */
596 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
597 offsetof(struct vmcb, control.iopm_base_pa));
598 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
600 if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
601 /* next env->eip */
602 x86_stq_phys(cs,
603 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
604 env->eip + next_eip_addend);
605 cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
610 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
611 uintptr_t retaddr)
613 CPUState *cs = env_cpu(env);
615 cpu_restore_state(cs, retaddr, true);
617 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
618 PRIx64 ", " TARGET_FMT_lx ")!\n",
619 exit_code, exit_info_1,
620 x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
621 control.exit_info_2)),
622 env->eip);
624 cs->exception_index = EXCP_VMEXIT + exit_code;
625 env->error_code = exit_info_1;
627 /* remove any pending exception */
628 env->old_exception = -1;
629 cpu_loop_exit(cs);
632 void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
634 CPUState *cs = env_cpu(env);
635 uint32_t int_ctl;
637 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
638 x86_stl_phys(cs,
639 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
640 SVM_INTERRUPT_SHADOW_MASK);
641 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
642 } else {
643 x86_stl_phys(cs,
644 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
646 env->hflags2 &= ~HF2_NPT_MASK;
648 /* Save the VM state in the vmcb */
649 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
650 &env->segs[R_ES]);
651 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
652 &env->segs[R_CS]);
653 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
654 &env->segs[R_SS]);
655 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
656 &env->segs[R_DS]);
658 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
659 env->gdt.base);
660 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
661 env->gdt.limit);
663 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
664 env->idt.base);
665 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
666 env->idt.limit);
668 x86_stq_phys(cs,
669 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
670 x86_stq_phys(cs,
671 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
672 x86_stq_phys(cs,
673 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
674 x86_stq_phys(cs,
675 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
676 x86_stq_phys(cs,
677 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
679 int_ctl = x86_ldl_phys(cs,
680 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
681 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
682 int_ctl |= env->v_tpr & V_TPR_MASK;
683 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
684 int_ctl |= V_IRQ_MASK;
686 x86_stl_phys(cs,
687 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
689 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
690 cpu_compute_eflags(env));
691 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
692 env->eip);
693 x86_stq_phys(cs,
694 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
695 x86_stq_phys(cs,
696 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
697 x86_stq_phys(cs,
698 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
699 x86_stq_phys(cs,
700 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
701 x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
702 env->hflags & HF_CPL_MASK);
704 /* Reload the host state from vm_hsave */
705 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
706 env->hflags &= ~HF_GUEST_MASK;
707 env->intercept = 0;
708 env->intercept_exceptions = 0;
709 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
710 env->tsc_offset = 0;
712 env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
713 save.gdtr.base));
714 env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
715 save.gdtr.limit));
717 env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
718 save.idtr.base));
719 env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
720 save.idtr.limit));
722 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
723 env->vm_hsave + offsetof(struct vmcb,
724 save.cr0)) |
725 CR0_PE_MASK);
726 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
727 env->vm_hsave + offsetof(struct vmcb,
728 save.cr4)));
729 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
730 env->vm_hsave + offsetof(struct vmcb,
731 save.cr3)));
732 /* we need to set the efer after the crs so the hidden flags get
733 set properly */
734 cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
735 save.efer)));
736 env->eflags = 0;
737 cpu_load_eflags(env, x86_ldq_phys(cs,
738 env->vm_hsave + offsetof(struct vmcb,
739 save.rflags)),
740 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
741 VM_MASK));
743 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
744 R_ES);
745 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
746 R_CS);
747 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
748 R_SS);
749 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
750 R_DS);
752 env->eip = x86_ldq_phys(cs,
753 env->vm_hsave + offsetof(struct vmcb, save.rip));
754 env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
755 offsetof(struct vmcb, save.rsp));
756 env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
757 offsetof(struct vmcb, save.rax));
759 env->dr[6] = x86_ldq_phys(cs,
760 env->vm_hsave + offsetof(struct vmcb, save.dr6));
761 env->dr[7] = x86_ldq_phys(cs,
762 env->vm_hsave + offsetof(struct vmcb, save.dr7));
764 /* other setups */
765 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
766 exit_code);
767 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
768 exit_info_1);
770 x86_stl_phys(cs,
771 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
772 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
773 control.event_inj)));
774 x86_stl_phys(cs,
775 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
776 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
777 control.event_inj_err)));
778 x86_stl_phys(cs,
779 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
781 env->hflags2 &= ~HF2_GIF_MASK;
782 /* FIXME: Resets the current ASID register to zero (host ASID). */
784 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
786 /* Clears the TSC_OFFSET inside the processor. */
788 /* If the host is in PAE mode, the processor reloads the host's PDPEs
789 from the page table indicated the host's CR3. If the PDPEs contain
790 illegal state, the processor causes a shutdown. */
792 /* Disables all breakpoints in the host DR7 register. */
794 /* Checks the reloaded host state for consistency. */
796 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
797 host's code segment or non-canonical (in the case of long mode), a
798 #GP fault is delivered inside the host. */
801 #endif