ARM: PL061: Checking register r/w accesses to reserved area
[qemu/ar7.git] / target-i386 / svm_helper.c
blobab472f6eea354fb6ab74fe118c1aa48adf13e075
1 /*
2 * x86 SVM helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/cpu-all.h"
23 #include "exec/helper-proto.h"
24 #include "exec/cpu_ldst.h"
26 /* Secure Virtual Machine helpers */
28 #if defined(CONFIG_USER_ONLY)
30 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
34 void helper_vmmcall(CPUX86State *env)
38 void helper_vmload(CPUX86State *env, int aflag)
42 void helper_vmsave(CPUX86State *env, int aflag)
46 void helper_stgi(CPUX86State *env)
50 void helper_clgi(CPUX86State *env)
54 void helper_skinit(CPUX86State *env)
58 void helper_invlpga(CPUX86State *env, int aflag)
62 void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
66 void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
70 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
71 uint64_t param)
75 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
76 uint64_t param)
80 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
81 uint32_t next_eip_addend)
84 #else
86 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
87 const SegmentCache *sc)
89 CPUState *cs = CPU(x86_env_get_cpu(env));
91 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
92 sc->selector);
93 x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
94 sc->base);
95 x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
96 sc->limit);
97 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
98 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
101 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
102 SegmentCache *sc)
104 CPUState *cs = CPU(x86_env_get_cpu(env));
105 unsigned int flags;
107 sc->selector = x86_lduw_phys(cs,
108 addr + offsetof(struct vmcb_seg, selector));
109 sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
110 sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
111 flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
112 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
115 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
116 int seg_reg)
118 SegmentCache sc1, *sc = &sc1;
120 svm_load_seg(env, addr, sc);
121 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
122 sc->base, sc->limit, sc->flags);
125 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
127 CPUState *cs = CPU(x86_env_get_cpu(env));
128 target_ulong addr;
129 uint32_t event_inj;
130 uint32_t int_ctl;
132 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
134 if (aflag == 2) {
135 addr = env->regs[R_EAX];
136 } else {
137 addr = (uint32_t)env->regs[R_EAX];
140 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
142 env->vm_vmcb = addr;
144 /* save the current CPU state in the hsave page */
145 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
146 env->gdt.base);
147 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
148 env->gdt.limit);
150 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
151 env->idt.base);
152 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
153 env->idt.limit);
155 x86_stq_phys(cs,
156 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
157 x86_stq_phys(cs,
158 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
159 x86_stq_phys(cs,
160 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
161 x86_stq_phys(cs,
162 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
163 x86_stq_phys(cs,
164 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
165 x86_stq_phys(cs,
166 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
168 x86_stq_phys(cs,
169 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
170 x86_stq_phys(cs,
171 env->vm_hsave + offsetof(struct vmcb, save.rflags),
172 cpu_compute_eflags(env));
174 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
175 &env->segs[R_ES]);
176 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
177 &env->segs[R_CS]);
178 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
179 &env->segs[R_SS]);
180 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
181 &env->segs[R_DS]);
183 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
184 env->eip + next_eip_addend);
185 x86_stq_phys(cs,
186 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
187 x86_stq_phys(cs,
188 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
190 /* load the interception bitmaps so we do not need to access the
191 vmcb in svm mode */
192 env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
193 control.intercept));
194 env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
195 offsetof(struct vmcb,
196 control.intercept_cr_read));
197 env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
198 offsetof(struct vmcb,
199 control.intercept_cr_write));
200 env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
201 offsetof(struct vmcb,
202 control.intercept_dr_read));
203 env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
204 offsetof(struct vmcb,
205 control.intercept_dr_write));
206 env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
207 offsetof(struct vmcb,
208 control.intercept_exceptions
211 /* enable intercepts */
212 env->hflags |= HF_SVMI_MASK;
214 env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
215 offsetof(struct vmcb, control.tsc_offset));
217 env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
218 save.gdtr.base));
219 env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
220 save.gdtr.limit));
222 env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
223 save.idtr.base));
224 env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
225 save.idtr.limit));
227 /* clear exit_info_2 so we behave like the real hardware */
228 x86_stq_phys(cs,
229 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
231 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
232 env->vm_vmcb + offsetof(struct vmcb,
233 save.cr0)));
234 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
235 env->vm_vmcb + offsetof(struct vmcb,
236 save.cr4)));
237 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
238 env->vm_vmcb + offsetof(struct vmcb,
239 save.cr3)));
240 env->cr[2] = x86_ldq_phys(cs,
241 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
242 int_ctl = x86_ldl_phys(cs,
243 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
244 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
245 if (int_ctl & V_INTR_MASKING_MASK) {
246 env->v_tpr = int_ctl & V_TPR_MASK;
247 env->hflags2 |= HF2_VINTR_MASK;
248 if (env->eflags & IF_MASK) {
249 env->hflags2 |= HF2_HIF_MASK;
253 cpu_load_efer(env,
254 x86_ldq_phys(cs,
255 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
256 env->eflags = 0;
257 cpu_load_eflags(env, x86_ldq_phys(cs,
258 env->vm_vmcb + offsetof(struct vmcb,
259 save.rflags)),
260 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
262 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
263 R_ES);
264 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
265 R_CS);
266 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
267 R_SS);
268 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
269 R_DS);
271 env->eip = x86_ldq_phys(cs,
272 env->vm_vmcb + offsetof(struct vmcb, save.rip));
274 env->regs[R_ESP] = x86_ldq_phys(cs,
275 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
276 env->regs[R_EAX] = x86_ldq_phys(cs,
277 env->vm_vmcb + offsetof(struct vmcb, save.rax));
278 env->dr[7] = x86_ldq_phys(cs,
279 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
280 env->dr[6] = x86_ldq_phys(cs,
281 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
283 /* FIXME: guest state consistency checks */
285 switch (x86_ldub_phys(cs,
286 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
287 case TLB_CONTROL_DO_NOTHING:
288 break;
289 case TLB_CONTROL_FLUSH_ALL_ASID:
290 /* FIXME: this is not 100% correct but should work for now */
291 tlb_flush(cs, 1);
292 break;
295 env->hflags2 |= HF2_GIF_MASK;
297 if (int_ctl & V_IRQ_MASK) {
298 CPUState *cs = CPU(x86_env_get_cpu(env));
300 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
303 /* maybe we need to inject an event */
304 event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
305 control.event_inj));
306 if (event_inj & SVM_EVTINJ_VALID) {
307 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
308 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
309 uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
310 offsetof(struct vmcb,
311 control.event_inj_err));
313 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
314 /* FIXME: need to implement valid_err */
315 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
316 case SVM_EVTINJ_TYPE_INTR:
317 cs->exception_index = vector;
318 env->error_code = event_inj_err;
319 env->exception_is_int = 0;
320 env->exception_next_eip = -1;
321 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
322 /* XXX: is it always correct? */
323 do_interrupt_x86_hardirq(env, vector, 1);
324 break;
325 case SVM_EVTINJ_TYPE_NMI:
326 cs->exception_index = EXCP02_NMI;
327 env->error_code = event_inj_err;
328 env->exception_is_int = 0;
329 env->exception_next_eip = env->eip;
330 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
331 cpu_loop_exit(cs);
332 break;
333 case SVM_EVTINJ_TYPE_EXEPT:
334 cs->exception_index = vector;
335 env->error_code = event_inj_err;
336 env->exception_is_int = 0;
337 env->exception_next_eip = -1;
338 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
339 cpu_loop_exit(cs);
340 break;
341 case SVM_EVTINJ_TYPE_SOFT:
342 cs->exception_index = vector;
343 env->error_code = event_inj_err;
344 env->exception_is_int = 1;
345 env->exception_next_eip = env->eip;
346 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
347 cpu_loop_exit(cs);
348 break;
350 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
351 env->error_code);
355 void helper_vmmcall(CPUX86State *env)
357 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0);
358 raise_exception(env, EXCP06_ILLOP);
361 void helper_vmload(CPUX86State *env, int aflag)
363 CPUState *cs = CPU(x86_env_get_cpu(env));
364 target_ulong addr;
366 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
368 if (aflag == 2) {
369 addr = env->regs[R_EAX];
370 } else {
371 addr = (uint32_t)env->regs[R_EAX];
374 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
375 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
376 addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
377 save.fs.base)),
378 env->segs[R_FS].base);
380 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
381 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
382 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
383 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
385 #ifdef TARGET_X86_64
386 env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
387 save.kernel_gs_base));
388 env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
389 env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
390 env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
391 #endif
392 env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
393 env->sysenter_cs = x86_ldq_phys(cs,
394 addr + offsetof(struct vmcb, save.sysenter_cs));
395 env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
396 save.sysenter_esp));
397 env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
398 save.sysenter_eip));
401 void helper_vmsave(CPUX86State *env, int aflag)
403 CPUState *cs = CPU(x86_env_get_cpu(env));
404 target_ulong addr;
406 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
408 if (aflag == 2) {
409 addr = env->regs[R_EAX];
410 } else {
411 addr = (uint32_t)env->regs[R_EAX];
414 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
415 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
416 addr, x86_ldq_phys(cs,
417 addr + offsetof(struct vmcb, save.fs.base)),
418 env->segs[R_FS].base);
420 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
421 &env->segs[R_FS]);
422 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
423 &env->segs[R_GS]);
424 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
425 &env->tr);
426 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
427 &env->ldt);
429 #ifdef TARGET_X86_64
430 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
431 env->kernelgsbase);
432 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
433 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
434 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
435 #endif
436 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
437 x86_stq_phys(cs,
438 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
439 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
440 env->sysenter_esp);
441 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
442 env->sysenter_eip);
445 void helper_stgi(CPUX86State *env)
447 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0);
448 env->hflags2 |= HF2_GIF_MASK;
451 void helper_clgi(CPUX86State *env)
453 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0);
454 env->hflags2 &= ~HF2_GIF_MASK;
457 void helper_skinit(CPUX86State *env)
459 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0);
460 /* XXX: not implemented */
461 raise_exception(env, EXCP06_ILLOP);
464 void helper_invlpga(CPUX86State *env, int aflag)
466 X86CPU *cpu = x86_env_get_cpu(env);
467 target_ulong addr;
469 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
471 if (aflag == 2) {
472 addr = env->regs[R_EAX];
473 } else {
474 addr = (uint32_t)env->regs[R_EAX];
477 /* XXX: could use the ASID to see if it is needed to do the
478 flush */
479 tlb_flush_page(CPU(cpu), addr);
482 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
483 uint64_t param)
485 CPUState *cs = CPU(x86_env_get_cpu(env));
487 if (likely(!(env->hflags & HF_SVMI_MASK))) {
488 return;
490 switch (type) {
491 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
492 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
493 helper_vmexit(env, type, param);
495 break;
496 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
497 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
498 helper_vmexit(env, type, param);
500 break;
501 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
502 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
503 helper_vmexit(env, type, param);
505 break;
506 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
507 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
508 helper_vmexit(env, type, param);
510 break;
511 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
512 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
513 helper_vmexit(env, type, param);
515 break;
516 case SVM_EXIT_MSR:
517 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
518 /* FIXME: this should be read in at vmrun (faster this way?) */
519 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
520 offsetof(struct vmcb,
521 control.msrpm_base_pa));
522 uint32_t t0, t1;
524 switch ((uint32_t)env->regs[R_ECX]) {
525 case 0 ... 0x1fff:
526 t0 = (env->regs[R_ECX] * 2) % 8;
527 t1 = (env->regs[R_ECX] * 2) / 8;
528 break;
529 case 0xc0000000 ... 0xc0001fff:
530 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
531 t1 = (t0 / 8);
532 t0 %= 8;
533 break;
534 case 0xc0010000 ... 0xc0011fff:
535 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
536 t1 = (t0 / 8);
537 t0 %= 8;
538 break;
539 default:
540 helper_vmexit(env, type, param);
541 t0 = 0;
542 t1 = 0;
543 break;
545 if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
546 helper_vmexit(env, type, param);
549 break;
550 default:
551 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
552 helper_vmexit(env, type, param);
554 break;
558 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
559 uint64_t param)
561 helper_svm_check_intercept_param(env, type, param);
564 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
565 uint32_t next_eip_addend)
567 CPUState *cs = CPU(x86_env_get_cpu(env));
569 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
570 /* FIXME: this should be read in at vmrun (faster this way?) */
571 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
572 offsetof(struct vmcb, control.iopm_base_pa));
573 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
575 if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
576 /* next env->eip */
577 x86_stq_phys(cs,
578 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
579 env->eip + next_eip_addend);
580 helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
585 /* Note: currently only 32 bits of exit_code are used */
586 void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
588 CPUState *cs = CPU(x86_env_get_cpu(env));
589 uint32_t int_ctl;
591 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
592 PRIx64 ", " TARGET_FMT_lx ")!\n",
593 exit_code, exit_info_1,
594 x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
595 control.exit_info_2)),
596 env->eip);
598 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
599 x86_stl_phys(cs,
600 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
601 SVM_INTERRUPT_SHADOW_MASK);
602 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
603 } else {
604 x86_stl_phys(cs,
605 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
608 /* Save the VM state in the vmcb */
609 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
610 &env->segs[R_ES]);
611 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
612 &env->segs[R_CS]);
613 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
614 &env->segs[R_SS]);
615 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
616 &env->segs[R_DS]);
618 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
619 env->gdt.base);
620 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
621 env->gdt.limit);
623 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
624 env->idt.base);
625 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
626 env->idt.limit);
628 x86_stq_phys(cs,
629 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
630 x86_stq_phys(cs,
631 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
632 x86_stq_phys(cs,
633 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
634 x86_stq_phys(cs,
635 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
636 x86_stq_phys(cs,
637 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
639 int_ctl = x86_ldl_phys(cs,
640 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
641 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
642 int_ctl |= env->v_tpr & V_TPR_MASK;
643 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
644 int_ctl |= V_IRQ_MASK;
646 x86_stl_phys(cs,
647 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
649 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
650 cpu_compute_eflags(env));
651 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
652 env->eip);
653 x86_stq_phys(cs,
654 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
655 x86_stq_phys(cs,
656 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
657 x86_stq_phys(cs,
658 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
659 x86_stq_phys(cs,
660 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
661 x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
662 env->hflags & HF_CPL_MASK);
664 /* Reload the host state from vm_hsave */
665 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
666 env->hflags &= ~HF_SVMI_MASK;
667 env->intercept = 0;
668 env->intercept_exceptions = 0;
669 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
670 env->tsc_offset = 0;
672 env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
673 save.gdtr.base));
674 env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
675 save.gdtr.limit));
677 env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
678 save.idtr.base));
679 env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
680 save.idtr.limit));
682 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
683 env->vm_hsave + offsetof(struct vmcb,
684 save.cr0)) |
685 CR0_PE_MASK);
686 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
687 env->vm_hsave + offsetof(struct vmcb,
688 save.cr4)));
689 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
690 env->vm_hsave + offsetof(struct vmcb,
691 save.cr3)));
692 /* we need to set the efer after the crs so the hidden flags get
693 set properly */
694 cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
695 save.efer)));
696 env->eflags = 0;
697 cpu_load_eflags(env, x86_ldq_phys(cs,
698 env->vm_hsave + offsetof(struct vmcb,
699 save.rflags)),
700 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
701 VM_MASK));
703 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
704 R_ES);
705 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
706 R_CS);
707 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
708 R_SS);
709 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
710 R_DS);
712 env->eip = x86_ldq_phys(cs,
713 env->vm_hsave + offsetof(struct vmcb, save.rip));
714 env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
715 offsetof(struct vmcb, save.rsp));
716 env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
717 offsetof(struct vmcb, save.rax));
719 env->dr[6] = x86_ldq_phys(cs,
720 env->vm_hsave + offsetof(struct vmcb, save.dr6));
721 env->dr[7] = x86_ldq_phys(cs,
722 env->vm_hsave + offsetof(struct vmcb, save.dr7));
724 /* other setups */
725 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
726 exit_code);
727 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
728 exit_info_1);
730 x86_stl_phys(cs,
731 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
732 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
733 control.event_inj)));
734 x86_stl_phys(cs,
735 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
736 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
737 control.event_inj_err)));
738 x86_stl_phys(cs,
739 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
741 env->hflags2 &= ~HF2_GIF_MASK;
742 /* FIXME: Resets the current ASID register to zero (host ASID). */
744 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
746 /* Clears the TSC_OFFSET inside the processor. */
748 /* If the host is in PAE mode, the processor reloads the host's PDPEs
749 from the page table indicated the host's CR3. If the PDPEs contain
750 illegal state, the processor causes a shutdown. */
752 /* Disables all breakpoints in the host DR7 register. */
754 /* Checks the reloaded host state for consistency. */
756 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
757 host's code segment or non-canonical (in the case of long mode), a
758 #GP fault is delivered inside the host. */
760 /* remove any pending exception */
761 cs->exception_index = -1;
762 env->error_code = 0;
763 env->old_exception = -1;
765 cpu_loop_exit(cs);
768 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
770 helper_vmexit(env, exit_code, exit_info_1);
773 #endif