Improve log message for unaligned i/o
[qemu/ar7.git] / target-i386 / svm_helper.c
blob6622e1f962846c31ecda124b3b9915e0420885b7
1 /*
2 * x86 SVM helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "exec/cpu-all.h"
22 #include "exec/helper-proto.h"
23 #include "exec/cpu_ldst.h"
25 /* Secure Virtual Machine helpers */
27 #if defined(CONFIG_USER_ONLY)
29 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
33 void helper_vmmcall(CPUX86State *env)
37 void helper_vmload(CPUX86State *env, int aflag)
41 void helper_vmsave(CPUX86State *env, int aflag)
45 void helper_stgi(CPUX86State *env)
49 void helper_clgi(CPUX86State *env)
53 void helper_skinit(CPUX86State *env)
55 abort();
58 void helper_invlpga(CPUX86State *env, int aflag)
62 void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
64 abort();
67 void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
69 abort();
72 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
73 uint64_t param)
77 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
78 uint64_t param)
82 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
83 uint32_t next_eip_addend)
86 #else
88 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
89 const SegmentCache *sc)
91 CPUState *cs = CPU(x86_env_get_cpu(env));
93 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
94 sc->selector);
95 x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
96 sc->base);
97 x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
98 sc->limit);
99 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
100 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
103 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
104 SegmentCache *sc)
106 CPUState *cs = CPU(x86_env_get_cpu(env));
107 unsigned int flags;
109 sc->selector = x86_lduw_phys(cs,
110 addr + offsetof(struct vmcb_seg, selector));
111 sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
112 sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
113 flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
114 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
117 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
118 int seg_reg)
120 SegmentCache sc1, *sc = &sc1;
122 svm_load_seg(env, addr, sc);
123 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
124 sc->base, sc->limit, sc->flags);
127 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
129 CPUState *cs = CPU(x86_env_get_cpu(env));
130 target_ulong addr;
131 uint32_t event_inj;
132 uint32_t int_ctl;
134 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
136 if (aflag == 2) {
137 addr = env->regs[R_EAX];
138 } else {
139 addr = (uint32_t)env->regs[R_EAX];
142 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
144 env->vm_vmcb = addr;
146 /* save the current CPU state in the hsave page */
147 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
148 env->gdt.base);
149 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
150 env->gdt.limit);
152 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
153 env->idt.base);
154 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
155 env->idt.limit);
157 x86_stq_phys(cs,
158 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
159 x86_stq_phys(cs,
160 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
161 x86_stq_phys(cs,
162 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
163 x86_stq_phys(cs,
164 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
165 x86_stq_phys(cs,
166 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
167 x86_stq_phys(cs,
168 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
170 x86_stq_phys(cs,
171 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
172 x86_stq_phys(cs,
173 env->vm_hsave + offsetof(struct vmcb, save.rflags),
174 cpu_compute_eflags(env));
176 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
177 &env->segs[R_ES]);
178 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
179 &env->segs[R_CS]);
180 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
181 &env->segs[R_SS]);
182 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
183 &env->segs[R_DS]);
185 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
186 env->eip + next_eip_addend);
187 x86_stq_phys(cs,
188 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
189 x86_stq_phys(cs,
190 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
192 /* load the interception bitmaps so we do not need to access the
193 vmcb in svm mode */
194 env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
195 control.intercept));
196 env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
197 offsetof(struct vmcb,
198 control.intercept_cr_read));
199 env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
200 offsetof(struct vmcb,
201 control.intercept_cr_write));
202 env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
203 offsetof(struct vmcb,
204 control.intercept_dr_read));
205 env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
206 offsetof(struct vmcb,
207 control.intercept_dr_write));
208 env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
209 offsetof(struct vmcb,
210 control.intercept_exceptions
213 /* enable intercepts */
214 env->hflags |= HF_SVMI_MASK;
216 env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
217 offsetof(struct vmcb, control.tsc_offset));
219 env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
220 save.gdtr.base));
221 env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
222 save.gdtr.limit));
224 env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
225 save.idtr.base));
226 env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
227 save.idtr.limit));
229 /* clear exit_info_2 so we behave like the real hardware */
230 x86_stq_phys(cs,
231 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
233 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
234 env->vm_vmcb + offsetof(struct vmcb,
235 save.cr0)));
236 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
237 env->vm_vmcb + offsetof(struct vmcb,
238 save.cr4)));
239 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
240 env->vm_vmcb + offsetof(struct vmcb,
241 save.cr3)));
242 env->cr[2] = x86_ldq_phys(cs,
243 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
244 int_ctl = x86_ldl_phys(cs,
245 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
246 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
247 if (int_ctl & V_INTR_MASKING_MASK) {
248 env->v_tpr = int_ctl & V_TPR_MASK;
249 env->hflags2 |= HF2_VINTR_MASK;
250 if (env->eflags & IF_MASK) {
251 env->hflags2 |= HF2_HIF_MASK;
255 cpu_load_efer(env,
256 x86_ldq_phys(cs,
257 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
258 env->eflags = 0;
259 cpu_load_eflags(env, x86_ldq_phys(cs,
260 env->vm_vmcb + offsetof(struct vmcb,
261 save.rflags)),
262 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
264 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
265 R_ES);
266 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
267 R_CS);
268 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
269 R_SS);
270 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
271 R_DS);
273 env->eip = x86_ldq_phys(cs,
274 env->vm_vmcb + offsetof(struct vmcb, save.rip));
276 env->regs[R_ESP] = x86_ldq_phys(cs,
277 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
278 env->regs[R_EAX] = x86_ldq_phys(cs,
279 env->vm_vmcb + offsetof(struct vmcb, save.rax));
280 env->dr[7] = x86_ldq_phys(cs,
281 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
282 env->dr[6] = x86_ldq_phys(cs,
283 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
285 /* FIXME: guest state consistency checks */
287 switch (x86_ldub_phys(cs,
288 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
289 case TLB_CONTROL_DO_NOTHING:
290 break;
291 case TLB_CONTROL_FLUSH_ALL_ASID:
292 /* FIXME: this is not 100% correct but should work for now */
293 tlb_flush(cs, 1);
294 break;
297 env->hflags2 |= HF2_GIF_MASK;
299 if (int_ctl & V_IRQ_MASK) {
300 CPUState *cs = CPU(x86_env_get_cpu(env));
302 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
305 /* maybe we need to inject an event */
306 event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
307 control.event_inj));
308 if (event_inj & SVM_EVTINJ_VALID) {
309 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
310 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
311 uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
312 offsetof(struct vmcb,
313 control.event_inj_err));
315 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
316 /* FIXME: need to implement valid_err */
317 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
318 case SVM_EVTINJ_TYPE_INTR:
319 cs->exception_index = vector;
320 env->error_code = event_inj_err;
321 env->exception_is_int = 0;
322 env->exception_next_eip = -1;
323 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
324 /* XXX: is it always correct? */
325 do_interrupt_x86_hardirq(env, vector, 1);
326 break;
327 case SVM_EVTINJ_TYPE_NMI:
328 cs->exception_index = EXCP02_NMI;
329 env->error_code = event_inj_err;
330 env->exception_is_int = 0;
331 env->exception_next_eip = env->eip;
332 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
333 cpu_loop_exit(cs);
334 break;
335 case SVM_EVTINJ_TYPE_EXEPT:
336 cs->exception_index = vector;
337 env->error_code = event_inj_err;
338 env->exception_is_int = 0;
339 env->exception_next_eip = -1;
340 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
341 cpu_loop_exit(cs);
342 break;
343 case SVM_EVTINJ_TYPE_SOFT:
344 cs->exception_index = vector;
345 env->error_code = event_inj_err;
346 env->exception_is_int = 1;
347 env->exception_next_eip = env->eip;
348 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
349 cpu_loop_exit(cs);
350 break;
352 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
353 env->error_code);
357 void helper_vmmcall(CPUX86State *env)
359 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0);
360 raise_exception(env, EXCP06_ILLOP);
363 void helper_vmload(CPUX86State *env, int aflag)
365 CPUState *cs = CPU(x86_env_get_cpu(env));
366 target_ulong addr;
368 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
370 if (aflag == 2) {
371 addr = env->regs[R_EAX];
372 } else {
373 addr = (uint32_t)env->regs[R_EAX];
376 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
377 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
378 addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
379 save.fs.base)),
380 env->segs[R_FS].base);
382 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
383 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
384 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
385 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
387 #ifdef TARGET_X86_64
388 env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
389 save.kernel_gs_base));
390 env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
391 env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
392 env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
393 #endif
394 env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
395 env->sysenter_cs = x86_ldq_phys(cs,
396 addr + offsetof(struct vmcb, save.sysenter_cs));
397 env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
398 save.sysenter_esp));
399 env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
400 save.sysenter_eip));
403 void helper_vmsave(CPUX86State *env, int aflag)
405 CPUState *cs = CPU(x86_env_get_cpu(env));
406 target_ulong addr;
408 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
410 if (aflag == 2) {
411 addr = env->regs[R_EAX];
412 } else {
413 addr = (uint32_t)env->regs[R_EAX];
416 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
417 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
418 addr, x86_ldq_phys(cs,
419 addr + offsetof(struct vmcb, save.fs.base)),
420 env->segs[R_FS].base);
422 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
423 &env->segs[R_FS]);
424 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
425 &env->segs[R_GS]);
426 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
427 &env->tr);
428 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
429 &env->ldt);
431 #ifdef TARGET_X86_64
432 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
433 env->kernelgsbase);
434 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
435 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
436 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
437 #endif
438 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
439 x86_stq_phys(cs,
440 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
441 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
442 env->sysenter_esp);
443 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
444 env->sysenter_eip);
447 void helper_stgi(CPUX86State *env)
449 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0);
450 env->hflags2 |= HF2_GIF_MASK;
453 void helper_clgi(CPUX86State *env)
455 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0);
456 env->hflags2 &= ~HF2_GIF_MASK;
459 void QEMU_NORETURN helper_skinit(CPUX86State *env)
461 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0);
462 /* XXX: not implemented */
463 raise_exception(env, EXCP06_ILLOP);
466 void helper_invlpga(CPUX86State *env, int aflag)
468 X86CPU *cpu = x86_env_get_cpu(env);
469 target_ulong addr;
471 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
473 if (aflag == 2) {
474 addr = env->regs[R_EAX];
475 } else {
476 addr = (uint32_t)env->regs[R_EAX];
479 /* XXX: could use the ASID to see if it is needed to do the
480 flush */
481 tlb_flush_page(CPU(cpu), addr);
484 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
485 uint64_t param)
487 CPUState *cs = CPU(x86_env_get_cpu(env));
489 if (likely(!(env->hflags & HF_SVMI_MASK))) {
490 return;
492 switch (type) {
493 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
494 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
495 helper_vmexit(env, type, param);
497 break;
498 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
499 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
500 helper_vmexit(env, type, param);
502 break;
503 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
504 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
505 helper_vmexit(env, type, param);
507 break;
508 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
509 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
510 helper_vmexit(env, type, param);
512 break;
513 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
514 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
515 helper_vmexit(env, type, param);
517 break;
518 case SVM_EXIT_MSR:
519 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
520 /* FIXME: this should be read in at vmrun (faster this way?) */
521 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
522 offsetof(struct vmcb,
523 control.msrpm_base_pa));
524 uint32_t t0, t1;
526 switch ((uint32_t)env->regs[R_ECX]) {
527 case 0 ... 0x1fff:
528 t0 = (env->regs[R_ECX] * 2) % 8;
529 t1 = (env->regs[R_ECX] * 2) / 8;
530 break;
531 case 0xc0000000 ... 0xc0001fff:
532 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
533 t1 = (t0 / 8);
534 t0 %= 8;
535 break;
536 case 0xc0010000 ... 0xc0011fff:
537 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
538 t1 = (t0 / 8);
539 t0 %= 8;
540 break;
541 default:
542 helper_vmexit(env, type, param);
543 t0 = 0;
544 t1 = 0;
545 break;
547 if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
548 helper_vmexit(env, type, param);
551 break;
552 default:
553 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
554 helper_vmexit(env, type, param);
556 break;
560 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
561 uint64_t param)
563 helper_svm_check_intercept_param(env, type, param);
566 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
567 uint32_t next_eip_addend)
569 CPUState *cs = CPU(x86_env_get_cpu(env));
571 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
572 /* FIXME: this should be read in at vmrun (faster this way?) */
573 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
574 offsetof(struct vmcb, control.iopm_base_pa));
575 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
577 if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
578 /* next env->eip */
579 x86_stq_phys(cs,
580 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
581 env->eip + next_eip_addend);
582 helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
587 /* Note: currently only 32 bits of exit_code are used */
588 void QEMU_NORETURN helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
590 CPUState *cs = CPU(x86_env_get_cpu(env));
591 uint32_t int_ctl;
593 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
594 PRIx64 ", " TARGET_FMT_lx ")!\n",
595 exit_code, exit_info_1,
596 x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
597 control.exit_info_2)),
598 env->eip);
600 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
601 x86_stl_phys(cs,
602 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
603 SVM_INTERRUPT_SHADOW_MASK);
604 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
605 } else {
606 x86_stl_phys(cs,
607 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
610 /* Save the VM state in the vmcb */
611 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
612 &env->segs[R_ES]);
613 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
614 &env->segs[R_CS]);
615 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
616 &env->segs[R_SS]);
617 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
618 &env->segs[R_DS]);
620 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
621 env->gdt.base);
622 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
623 env->gdt.limit);
625 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
626 env->idt.base);
627 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
628 env->idt.limit);
630 x86_stq_phys(cs,
631 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
632 x86_stq_phys(cs,
633 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
634 x86_stq_phys(cs,
635 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
636 x86_stq_phys(cs,
637 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
638 x86_stq_phys(cs,
639 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
641 int_ctl = x86_ldl_phys(cs,
642 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
643 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
644 int_ctl |= env->v_tpr & V_TPR_MASK;
645 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
646 int_ctl |= V_IRQ_MASK;
648 x86_stl_phys(cs,
649 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
651 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
652 cpu_compute_eflags(env));
653 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
654 env->eip);
655 x86_stq_phys(cs,
656 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
657 x86_stq_phys(cs,
658 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
659 x86_stq_phys(cs,
660 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
661 x86_stq_phys(cs,
662 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
663 x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
664 env->hflags & HF_CPL_MASK);
666 /* Reload the host state from vm_hsave */
667 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
668 env->hflags &= ~HF_SVMI_MASK;
669 env->intercept = 0;
670 env->intercept_exceptions = 0;
671 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
672 env->tsc_offset = 0;
674 env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
675 save.gdtr.base));
676 env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
677 save.gdtr.limit));
679 env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
680 save.idtr.base));
681 env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
682 save.idtr.limit));
684 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
685 env->vm_hsave + offsetof(struct vmcb,
686 save.cr0)) |
687 CR0_PE_MASK);
688 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
689 env->vm_hsave + offsetof(struct vmcb,
690 save.cr4)));
691 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
692 env->vm_hsave + offsetof(struct vmcb,
693 save.cr3)));
694 /* we need to set the efer after the crs so the hidden flags get
695 set properly */
696 cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
697 save.efer)));
698 env->eflags = 0;
699 cpu_load_eflags(env, x86_ldq_phys(cs,
700 env->vm_hsave + offsetof(struct vmcb,
701 save.rflags)),
702 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
703 VM_MASK));
705 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
706 R_ES);
707 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
708 R_CS);
709 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
710 R_SS);
711 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
712 R_DS);
714 env->eip = x86_ldq_phys(cs,
715 env->vm_hsave + offsetof(struct vmcb, save.rip));
716 env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
717 offsetof(struct vmcb, save.rsp));
718 env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
719 offsetof(struct vmcb, save.rax));
721 env->dr[6] = x86_ldq_phys(cs,
722 env->vm_hsave + offsetof(struct vmcb, save.dr6));
723 env->dr[7] = x86_ldq_phys(cs,
724 env->vm_hsave + offsetof(struct vmcb, save.dr7));
726 /* other setups */
727 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
728 exit_code);
729 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
730 exit_info_1);
732 x86_stl_phys(cs,
733 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
734 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
735 control.event_inj)));
736 x86_stl_phys(cs,
737 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
738 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
739 control.event_inj_err)));
740 x86_stl_phys(cs,
741 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
743 env->hflags2 &= ~HF2_GIF_MASK;
744 /* FIXME: Resets the current ASID register to zero (host ASID). */
746 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
748 /* Clears the TSC_OFFSET inside the processor. */
750 /* If the host is in PAE mode, the processor reloads the host's PDPEs
751 from the page table indicated the host's CR3. If the PDPEs contain
752 illegal state, the processor causes a shutdown. */
754 /* Disables all breakpoints in the host DR7 register. */
756 /* Checks the reloaded host state for consistency. */
758 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
759 host's code segment or non-canonical (in the case of long mode), a
760 #GP fault is delivered inside the host. */
762 /* remove any pending exception */
763 cs->exception_index = -1;
764 env->error_code = 0;
765 env->old_exception = -1;
767 cpu_loop_exit(cs);
770 void QEMU_NORETURN cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
772 helper_vmexit(env, exit_code, exit_info_1);
775 #endif