tests/check-block: Do not run the iotests with old versions of bash
[qemu/ar7.git] / target / i386 / svm_helper.c
blob6224387eab580106f7142fc39157bb281890c083
1 /*
2 * x86 SVM helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
26 /* Secure Virtual Machine helpers */
28 #if defined(CONFIG_USER_ONLY)
30 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
34 void helper_vmmcall(CPUX86State *env)
38 void helper_vmload(CPUX86State *env, int aflag)
42 void helper_vmsave(CPUX86State *env, int aflag)
46 void helper_stgi(CPUX86State *env)
50 void helper_clgi(CPUX86State *env)
54 void helper_skinit(CPUX86State *env)
58 void helper_invlpga(CPUX86State *env, int aflag)
62 void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1,
63 uintptr_t retaddr)
65 assert(0);
68 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
69 uint64_t param)
73 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
74 uint64_t param, uintptr_t retaddr)
78 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
79 uint32_t next_eip_addend)
82 #else
84 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
85 const SegmentCache *sc)
87 CPUState *cs = env_cpu(env);
89 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
90 sc->selector);
91 x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
92 sc->base);
93 x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
94 sc->limit);
95 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
96 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
99 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
100 SegmentCache *sc)
102 CPUState *cs = env_cpu(env);
103 unsigned int flags;
105 sc->selector = x86_lduw_phys(cs,
106 addr + offsetof(struct vmcb_seg, selector));
107 sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
108 sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
109 flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
110 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
113 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
114 int seg_reg)
116 SegmentCache sc1, *sc = &sc1;
118 svm_load_seg(env, addr, sc);
119 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
120 sc->base, sc->limit, sc->flags);
123 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
125 CPUState *cs = env_cpu(env);
126 target_ulong addr;
127 uint64_t nested_ctl;
128 uint32_t event_inj;
129 uint32_t int_ctl;
131 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
133 if (aflag == 2) {
134 addr = env->regs[R_EAX];
135 } else {
136 addr = (uint32_t)env->regs[R_EAX];
139 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
141 env->vm_vmcb = addr;
143 /* save the current CPU state in the hsave page */
144 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
145 env->gdt.base);
146 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
147 env->gdt.limit);
149 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
150 env->idt.base);
151 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
152 env->idt.limit);
154 x86_stq_phys(cs,
155 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
156 x86_stq_phys(cs,
157 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
158 x86_stq_phys(cs,
159 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
160 x86_stq_phys(cs,
161 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
162 x86_stq_phys(cs,
163 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
164 x86_stq_phys(cs,
165 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
167 x86_stq_phys(cs,
168 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
169 x86_stq_phys(cs,
170 env->vm_hsave + offsetof(struct vmcb, save.rflags),
171 cpu_compute_eflags(env));
173 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
174 &env->segs[R_ES]);
175 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
176 &env->segs[R_CS]);
177 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
178 &env->segs[R_SS]);
179 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
180 &env->segs[R_DS]);
182 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
183 env->eip + next_eip_addend);
184 x86_stq_phys(cs,
185 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
186 x86_stq_phys(cs,
187 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
189 /* load the interception bitmaps so we do not need to access the
190 vmcb in svm mode */
191 env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
192 control.intercept));
193 env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
194 offsetof(struct vmcb,
195 control.intercept_cr_read));
196 env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
197 offsetof(struct vmcb,
198 control.intercept_cr_write));
199 env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
200 offsetof(struct vmcb,
201 control.intercept_dr_read));
202 env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
203 offsetof(struct vmcb,
204 control.intercept_dr_write));
205 env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
206 offsetof(struct vmcb,
207 control.intercept_exceptions
210 nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
211 control.nested_ctl));
213 env->nested_pg_mode = 0;
215 if (nested_ctl & SVM_NPT_ENABLED) {
216 env->nested_cr3 = x86_ldq_phys(cs,
217 env->vm_vmcb + offsetof(struct vmcb,
218 control.nested_cr3));
219 env->hflags2 |= HF2_NPT_MASK;
221 if (env->cr[4] & CR4_PAE_MASK) {
222 env->nested_pg_mode |= SVM_NPT_PAE;
224 if (env->cr[4] & CR4_PSE_MASK) {
225 env->nested_pg_mode |= SVM_NPT_PSE;
227 if (env->hflags & HF_LMA_MASK) {
228 env->nested_pg_mode |= SVM_NPT_LMA;
230 if (env->efer & MSR_EFER_NXE) {
231 env->nested_pg_mode |= SVM_NPT_NXE;
235 /* enable intercepts */
236 env->hflags |= HF_GUEST_MASK;
238 env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
239 offsetof(struct vmcb, control.tsc_offset));
241 env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
242 save.gdtr.base));
243 env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
244 save.gdtr.limit));
246 env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
247 save.idtr.base));
248 env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
249 save.idtr.limit));
251 /* clear exit_info_2 so we behave like the real hardware */
252 x86_stq_phys(cs,
253 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
255 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
256 env->vm_vmcb + offsetof(struct vmcb,
257 save.cr0)));
258 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
259 env->vm_vmcb + offsetof(struct vmcb,
260 save.cr4)));
261 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
262 env->vm_vmcb + offsetof(struct vmcb,
263 save.cr3)));
264 env->cr[2] = x86_ldq_phys(cs,
265 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
266 int_ctl = x86_ldl_phys(cs,
267 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
268 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
269 if (int_ctl & V_INTR_MASKING_MASK) {
270 env->v_tpr = int_ctl & V_TPR_MASK;
271 env->hflags2 |= HF2_VINTR_MASK;
272 if (env->eflags & IF_MASK) {
273 env->hflags2 |= HF2_HIF_MASK;
277 cpu_load_efer(env,
278 x86_ldq_phys(cs,
279 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
280 env->eflags = 0;
281 cpu_load_eflags(env, x86_ldq_phys(cs,
282 env->vm_vmcb + offsetof(struct vmcb,
283 save.rflags)),
284 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
286 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
287 R_ES);
288 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
289 R_CS);
290 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
291 R_SS);
292 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
293 R_DS);
295 env->eip = x86_ldq_phys(cs,
296 env->vm_vmcb + offsetof(struct vmcb, save.rip));
298 env->regs[R_ESP] = x86_ldq_phys(cs,
299 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
300 env->regs[R_EAX] = x86_ldq_phys(cs,
301 env->vm_vmcb + offsetof(struct vmcb, save.rax));
302 env->dr[7] = x86_ldq_phys(cs,
303 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
304 env->dr[6] = x86_ldq_phys(cs,
305 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
307 /* FIXME: guest state consistency checks */
309 switch (x86_ldub_phys(cs,
310 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
311 case TLB_CONTROL_DO_NOTHING:
312 break;
313 case TLB_CONTROL_FLUSH_ALL_ASID:
314 /* FIXME: this is not 100% correct but should work for now */
315 tlb_flush(cs);
316 break;
319 env->hflags2 |= HF2_GIF_MASK;
321 if (int_ctl & V_IRQ_MASK) {
322 CPUState *cs = env_cpu(env);
324 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
327 /* maybe we need to inject an event */
328 event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
329 control.event_inj));
330 if (event_inj & SVM_EVTINJ_VALID) {
331 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
332 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
333 uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
334 offsetof(struct vmcb,
335 control.event_inj_err));
337 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
338 /* FIXME: need to implement valid_err */
339 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
340 case SVM_EVTINJ_TYPE_INTR:
341 cs->exception_index = vector;
342 env->error_code = event_inj_err;
343 env->exception_is_int = 0;
344 env->exception_next_eip = -1;
345 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
346 /* XXX: is it always correct? */
347 do_interrupt_x86_hardirq(env, vector, 1);
348 break;
349 case SVM_EVTINJ_TYPE_NMI:
350 cs->exception_index = EXCP02_NMI;
351 env->error_code = event_inj_err;
352 env->exception_is_int = 0;
353 env->exception_next_eip = env->eip;
354 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
355 cpu_loop_exit(cs);
356 break;
357 case SVM_EVTINJ_TYPE_EXEPT:
358 cs->exception_index = vector;
359 env->error_code = event_inj_err;
360 env->exception_is_int = 0;
361 env->exception_next_eip = -1;
362 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
363 cpu_loop_exit(cs);
364 break;
365 case SVM_EVTINJ_TYPE_SOFT:
366 cs->exception_index = vector;
367 env->error_code = event_inj_err;
368 env->exception_is_int = 1;
369 env->exception_next_eip = env->eip;
370 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
371 cpu_loop_exit(cs);
372 break;
374 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
375 env->error_code);
379 void helper_vmmcall(CPUX86State *env)
381 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
382 raise_exception(env, EXCP06_ILLOP);
385 void helper_vmload(CPUX86State *env, int aflag)
387 CPUState *cs = env_cpu(env);
388 target_ulong addr;
390 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
392 if (aflag == 2) {
393 addr = env->regs[R_EAX];
394 } else {
395 addr = (uint32_t)env->regs[R_EAX];
398 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
399 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
400 addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
401 save.fs.base)),
402 env->segs[R_FS].base);
404 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
405 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
406 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
407 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
409 #ifdef TARGET_X86_64
410 env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
411 save.kernel_gs_base));
412 env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
413 env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
414 env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
415 #endif
416 env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
417 env->sysenter_cs = x86_ldq_phys(cs,
418 addr + offsetof(struct vmcb, save.sysenter_cs));
419 env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
420 save.sysenter_esp));
421 env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
422 save.sysenter_eip));
425 void helper_vmsave(CPUX86State *env, int aflag)
427 CPUState *cs = env_cpu(env);
428 target_ulong addr;
430 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
432 if (aflag == 2) {
433 addr = env->regs[R_EAX];
434 } else {
435 addr = (uint32_t)env->regs[R_EAX];
438 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
439 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
440 addr, x86_ldq_phys(cs,
441 addr + offsetof(struct vmcb, save.fs.base)),
442 env->segs[R_FS].base);
444 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
445 &env->segs[R_FS]);
446 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
447 &env->segs[R_GS]);
448 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
449 &env->tr);
450 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
451 &env->ldt);
453 #ifdef TARGET_X86_64
454 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
455 env->kernelgsbase);
456 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
457 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
458 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
459 #endif
460 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
461 x86_stq_phys(cs,
462 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
463 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
464 env->sysenter_esp);
465 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
466 env->sysenter_eip);
469 void helper_stgi(CPUX86State *env)
471 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
472 env->hflags2 |= HF2_GIF_MASK;
475 void helper_clgi(CPUX86State *env)
477 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
478 env->hflags2 &= ~HF2_GIF_MASK;
481 void helper_skinit(CPUX86State *env)
483 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0, GETPC());
484 /* XXX: not implemented */
485 raise_exception(env, EXCP06_ILLOP);
488 void helper_invlpga(CPUX86State *env, int aflag)
490 X86CPU *cpu = env_archcpu(env);
491 target_ulong addr;
493 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0, GETPC());
495 if (aflag == 2) {
496 addr = env->regs[R_EAX];
497 } else {
498 addr = (uint32_t)env->regs[R_EAX];
501 /* XXX: could use the ASID to see if it is needed to do the
502 flush */
503 tlb_flush_page(CPU(cpu), addr);
506 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
507 uint64_t param, uintptr_t retaddr)
509 CPUState *cs = env_cpu(env);
511 if (likely(!(env->hflags & HF_GUEST_MASK))) {
512 return;
514 switch (type) {
515 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
516 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
517 cpu_vmexit(env, type, param, retaddr);
519 break;
520 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
521 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
522 cpu_vmexit(env, type, param, retaddr);
524 break;
525 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
526 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
527 cpu_vmexit(env, type, param, retaddr);
529 break;
530 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
531 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
532 cpu_vmexit(env, type, param, retaddr);
534 break;
535 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
536 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
537 cpu_vmexit(env, type, param, retaddr);
539 break;
540 case SVM_EXIT_MSR:
541 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
542 /* FIXME: this should be read in at vmrun (faster this way?) */
543 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
544 offsetof(struct vmcb,
545 control.msrpm_base_pa));
546 uint32_t t0, t1;
548 switch ((uint32_t)env->regs[R_ECX]) {
549 case 0 ... 0x1fff:
550 t0 = (env->regs[R_ECX] * 2) % 8;
551 t1 = (env->regs[R_ECX] * 2) / 8;
552 break;
553 case 0xc0000000 ... 0xc0001fff:
554 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
555 t1 = (t0 / 8);
556 t0 %= 8;
557 break;
558 case 0xc0010000 ... 0xc0011fff:
559 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
560 t1 = (t0 / 8);
561 t0 %= 8;
562 break;
563 default:
564 cpu_vmexit(env, type, param, retaddr);
565 t0 = 0;
566 t1 = 0;
567 break;
569 if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
570 cpu_vmexit(env, type, param, retaddr);
573 break;
574 default:
575 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
576 cpu_vmexit(env, type, param, retaddr);
578 break;
582 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
583 uint64_t param)
585 cpu_svm_check_intercept_param(env, type, param, GETPC());
588 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
589 uint32_t next_eip_addend)
591 CPUState *cs = env_cpu(env);
593 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
594 /* FIXME: this should be read in at vmrun (faster this way?) */
595 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
596 offsetof(struct vmcb, control.iopm_base_pa));
597 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
599 if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
600 /* next env->eip */
601 x86_stq_phys(cs,
602 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
603 env->eip + next_eip_addend);
604 cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
609 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
610 uintptr_t retaddr)
612 CPUState *cs = env_cpu(env);
614 cpu_restore_state(cs, retaddr, true);
616 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
617 PRIx64 ", " TARGET_FMT_lx ")!\n",
618 exit_code, exit_info_1,
619 x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
620 control.exit_info_2)),
621 env->eip);
623 cs->exception_index = EXCP_VMEXIT + exit_code;
624 env->error_code = exit_info_1;
626 /* remove any pending exception */
627 env->old_exception = -1;
628 cpu_loop_exit(cs);
631 void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
633 CPUState *cs = env_cpu(env);
634 uint32_t int_ctl;
636 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
637 x86_stl_phys(cs,
638 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
639 SVM_INTERRUPT_SHADOW_MASK);
640 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
641 } else {
642 x86_stl_phys(cs,
643 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
645 env->hflags2 &= ~HF2_NPT_MASK;
647 /* Save the VM state in the vmcb */
648 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
649 &env->segs[R_ES]);
650 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
651 &env->segs[R_CS]);
652 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
653 &env->segs[R_SS]);
654 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
655 &env->segs[R_DS]);
657 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
658 env->gdt.base);
659 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
660 env->gdt.limit);
662 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
663 env->idt.base);
664 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
665 env->idt.limit);
667 x86_stq_phys(cs,
668 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
669 x86_stq_phys(cs,
670 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
671 x86_stq_phys(cs,
672 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
673 x86_stq_phys(cs,
674 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
675 x86_stq_phys(cs,
676 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
678 int_ctl = x86_ldl_phys(cs,
679 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
680 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
681 int_ctl |= env->v_tpr & V_TPR_MASK;
682 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
683 int_ctl |= V_IRQ_MASK;
685 x86_stl_phys(cs,
686 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
688 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
689 cpu_compute_eflags(env));
690 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
691 env->eip);
692 x86_stq_phys(cs,
693 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
694 x86_stq_phys(cs,
695 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
696 x86_stq_phys(cs,
697 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
698 x86_stq_phys(cs,
699 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
700 x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
701 env->hflags & HF_CPL_MASK);
703 /* Reload the host state from vm_hsave */
704 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
705 env->hflags &= ~HF_GUEST_MASK;
706 env->intercept = 0;
707 env->intercept_exceptions = 0;
708 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
709 env->tsc_offset = 0;
711 env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
712 save.gdtr.base));
713 env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
714 save.gdtr.limit));
716 env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
717 save.idtr.base));
718 env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
719 save.idtr.limit));
721 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
722 env->vm_hsave + offsetof(struct vmcb,
723 save.cr0)) |
724 CR0_PE_MASK);
725 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
726 env->vm_hsave + offsetof(struct vmcb,
727 save.cr4)));
728 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
729 env->vm_hsave + offsetof(struct vmcb,
730 save.cr3)));
731 /* we need to set the efer after the crs so the hidden flags get
732 set properly */
733 cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
734 save.efer)));
735 env->eflags = 0;
736 cpu_load_eflags(env, x86_ldq_phys(cs,
737 env->vm_hsave + offsetof(struct vmcb,
738 save.rflags)),
739 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
740 VM_MASK));
742 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
743 R_ES);
744 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
745 R_CS);
746 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
747 R_SS);
748 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
749 R_DS);
751 env->eip = x86_ldq_phys(cs,
752 env->vm_hsave + offsetof(struct vmcb, save.rip));
753 env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
754 offsetof(struct vmcb, save.rsp));
755 env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
756 offsetof(struct vmcb, save.rax));
758 env->dr[6] = x86_ldq_phys(cs,
759 env->vm_hsave + offsetof(struct vmcb, save.dr6));
760 env->dr[7] = x86_ldq_phys(cs,
761 env->vm_hsave + offsetof(struct vmcb, save.dr7));
763 /* other setups */
764 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
765 exit_code);
766 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
767 exit_info_1);
769 x86_stl_phys(cs,
770 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
771 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
772 control.event_inj)));
773 x86_stl_phys(cs,
774 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
775 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
776 control.event_inj_err)));
777 x86_stl_phys(cs,
778 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
780 env->hflags2 &= ~HF2_GIF_MASK;
781 /* FIXME: Resets the current ASID register to zero (host ASID). */
783 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
785 /* Clears the TSC_OFFSET inside the processor. */
787 /* If the host is in PAE mode, the processor reloads the host's PDPEs
788 from the page table indicated the host's CR3. If the PDPEs contain
789 illegal state, the processor causes a shutdown. */
791 /* Disables all breakpoints in the host DR7 register. */
793 /* Checks the reloaded host state for consistency. */
795 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
796 host's code segment or non-canonical (in the case of long mode), a
797 #GP fault is delivered inside the host. */
800 #endif