4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/helper-proto.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
26 /* Secure Virtual Machine helpers */
28 #if defined(CONFIG_USER_ONLY)
30 void helper_vmrun(CPUX86State
*env
, int aflag
, int next_eip_addend
)
34 void helper_vmmcall(CPUX86State
*env
)
38 void helper_vmload(CPUX86State
*env
, int aflag
)
42 void helper_vmsave(CPUX86State
*env
, int aflag
)
46 void helper_stgi(CPUX86State
*env
)
50 void helper_clgi(CPUX86State
*env
)
54 void helper_skinit(CPUX86State
*env
)
58 void helper_invlpga(CPUX86State
*env
, int aflag
)
62 void cpu_vmexit(CPUX86State
*nenv
, uint32_t exit_code
, uint64_t exit_info_1
,
67 void helper_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
72 void cpu_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
73 uint64_t param
, uintptr_t retaddr
)
77 void helper_svm_check_io(CPUX86State
*env
, uint32_t port
, uint32_t param
,
78 uint32_t next_eip_addend
)
83 static inline void svm_save_seg(CPUX86State
*env
, hwaddr addr
,
84 const SegmentCache
*sc
)
86 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
88 x86_stw_phys(cs
, addr
+ offsetof(struct vmcb_seg
, selector
),
90 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb_seg
, base
),
92 x86_stl_phys(cs
, addr
+ offsetof(struct vmcb_seg
, limit
),
94 x86_stw_phys(cs
, addr
+ offsetof(struct vmcb_seg
, attrib
),
95 ((sc
->flags
>> 8) & 0xff) | ((sc
->flags
>> 12) & 0x0f00));
98 static inline void svm_load_seg(CPUX86State
*env
, hwaddr addr
,
101 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
104 sc
->selector
= x86_lduw_phys(cs
,
105 addr
+ offsetof(struct vmcb_seg
, selector
));
106 sc
->base
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb_seg
, base
));
107 sc
->limit
= x86_ldl_phys(cs
, addr
+ offsetof(struct vmcb_seg
, limit
));
108 flags
= x86_lduw_phys(cs
, addr
+ offsetof(struct vmcb_seg
, attrib
));
109 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
112 static inline void svm_load_seg_cache(CPUX86State
*env
, hwaddr addr
,
115 SegmentCache sc1
, *sc
= &sc1
;
117 svm_load_seg(env
, addr
, sc
);
118 cpu_x86_load_seg_cache(env
, seg_reg
, sc
->selector
,
119 sc
->base
, sc
->limit
, sc
->flags
);
122 void helper_vmrun(CPUX86State
*env
, int aflag
, int next_eip_addend
)
124 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
129 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMRUN
, 0, GETPC());
132 addr
= env
->regs
[R_EAX
];
134 addr
= (uint32_t)env
->regs
[R_EAX
];
137 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmrun! " TARGET_FMT_lx
"\n", addr
);
141 /* save the current CPU state in the hsave page */
142 x86_stq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
),
144 x86_stl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
),
147 x86_stq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
),
149 x86_stl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
),
153 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
155 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
157 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
159 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
161 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
163 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
166 env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
168 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
),
169 cpu_compute_eflags(env
));
171 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
173 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
175 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
177 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
180 x86_stq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
),
181 env
->eip
+ next_eip_addend
);
183 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), env
->regs
[R_ESP
]);
185 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), env
->regs
[R_EAX
]);
187 /* load the interception bitmaps so we do not need to access the
189 env
->intercept
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
191 env
->intercept_cr_read
= x86_lduw_phys(cs
, env
->vm_vmcb
+
192 offsetof(struct vmcb
,
193 control
.intercept_cr_read
));
194 env
->intercept_cr_write
= x86_lduw_phys(cs
, env
->vm_vmcb
+
195 offsetof(struct vmcb
,
196 control
.intercept_cr_write
));
197 env
->intercept_dr_read
= x86_lduw_phys(cs
, env
->vm_vmcb
+
198 offsetof(struct vmcb
,
199 control
.intercept_dr_read
));
200 env
->intercept_dr_write
= x86_lduw_phys(cs
, env
->vm_vmcb
+
201 offsetof(struct vmcb
,
202 control
.intercept_dr_write
));
203 env
->intercept_exceptions
= x86_ldl_phys(cs
, env
->vm_vmcb
+
204 offsetof(struct vmcb
,
205 control
.intercept_exceptions
208 /* enable intercepts */
209 env
->hflags
|= HF_SVMI_MASK
;
211 env
->tsc_offset
= x86_ldq_phys(cs
, env
->vm_vmcb
+
212 offsetof(struct vmcb
, control
.tsc_offset
));
214 env
->gdt
.base
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
216 env
->gdt
.limit
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
219 env
->idt
.base
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
221 env
->idt
.limit
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
224 /* clear exit_info_2 so we behave like the real hardware */
226 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
228 cpu_x86_update_cr0(env
, x86_ldq_phys(cs
,
229 env
->vm_vmcb
+ offsetof(struct vmcb
,
231 cpu_x86_update_cr4(env
, x86_ldq_phys(cs
,
232 env
->vm_vmcb
+ offsetof(struct vmcb
,
234 cpu_x86_update_cr3(env
, x86_ldq_phys(cs
,
235 env
->vm_vmcb
+ offsetof(struct vmcb
,
237 env
->cr
[2] = x86_ldq_phys(cs
,
238 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
239 int_ctl
= x86_ldl_phys(cs
,
240 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
241 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
242 if (int_ctl
& V_INTR_MASKING_MASK
) {
243 env
->v_tpr
= int_ctl
& V_TPR_MASK
;
244 env
->hflags2
|= HF2_VINTR_MASK
;
245 if (env
->eflags
& IF_MASK
) {
246 env
->hflags2
|= HF2_HIF_MASK
;
252 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
)));
254 cpu_load_eflags(env
, x86_ldq_phys(cs
,
255 env
->vm_vmcb
+ offsetof(struct vmcb
,
257 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
259 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
261 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
263 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
265 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
268 env
->eip
= x86_ldq_phys(cs
,
269 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
271 env
->regs
[R_ESP
] = x86_ldq_phys(cs
,
272 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
273 env
->regs
[R_EAX
] = x86_ldq_phys(cs
,
274 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
275 env
->dr
[7] = x86_ldq_phys(cs
,
276 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
277 env
->dr
[6] = x86_ldq_phys(cs
,
278 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
280 /* FIXME: guest state consistency checks */
282 switch (x86_ldub_phys(cs
,
283 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
284 case TLB_CONTROL_DO_NOTHING
:
286 case TLB_CONTROL_FLUSH_ALL_ASID
:
287 /* FIXME: this is not 100% correct but should work for now */
292 env
->hflags2
|= HF2_GIF_MASK
;
294 if (int_ctl
& V_IRQ_MASK
) {
295 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
297 cs
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
300 /* maybe we need to inject an event */
301 event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
303 if (event_inj
& SVM_EVTINJ_VALID
) {
304 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
305 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
306 uint32_t event_inj_err
= x86_ldl_phys(cs
, env
->vm_vmcb
+
307 offsetof(struct vmcb
,
308 control
.event_inj_err
));
310 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Injecting(%#hx): ", valid_err
);
311 /* FIXME: need to implement valid_err */
312 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
313 case SVM_EVTINJ_TYPE_INTR
:
314 cs
->exception_index
= vector
;
315 env
->error_code
= event_inj_err
;
316 env
->exception_is_int
= 0;
317 env
->exception_next_eip
= -1;
318 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "INTR");
319 /* XXX: is it always correct? */
320 do_interrupt_x86_hardirq(env
, vector
, 1);
322 case SVM_EVTINJ_TYPE_NMI
:
323 cs
->exception_index
= EXCP02_NMI
;
324 env
->error_code
= event_inj_err
;
325 env
->exception_is_int
= 0;
326 env
->exception_next_eip
= env
->eip
;
327 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "NMI");
330 case SVM_EVTINJ_TYPE_EXEPT
:
331 cs
->exception_index
= vector
;
332 env
->error_code
= event_inj_err
;
333 env
->exception_is_int
= 0;
334 env
->exception_next_eip
= -1;
335 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "EXEPT");
338 case SVM_EVTINJ_TYPE_SOFT
:
339 cs
->exception_index
= vector
;
340 env
->error_code
= event_inj_err
;
341 env
->exception_is_int
= 1;
342 env
->exception_next_eip
= env
->eip
;
343 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "SOFT");
347 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " %#x %#x\n", cs
->exception_index
,
352 void helper_vmmcall(CPUX86State
*env
)
354 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMMCALL
, 0, GETPC());
355 raise_exception(env
, EXCP06_ILLOP
);
358 void helper_vmload(CPUX86State
*env
, int aflag
)
360 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
363 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMLOAD
, 0, GETPC());
366 addr
= env
->regs
[R_EAX
];
368 addr
= (uint32_t)env
->regs
[R_EAX
];
371 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmload! " TARGET_FMT_lx
372 "\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
373 addr
, x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
,
375 env
->segs
[R_FS
].base
);
377 svm_load_seg_cache(env
, addr
+ offsetof(struct vmcb
, save
.fs
), R_FS
);
378 svm_load_seg_cache(env
, addr
+ offsetof(struct vmcb
, save
.gs
), R_GS
);
379 svm_load_seg(env
, addr
+ offsetof(struct vmcb
, save
.tr
), &env
->tr
);
380 svm_load_seg(env
, addr
+ offsetof(struct vmcb
, save
.ldtr
), &env
->ldt
);
383 env
->kernelgsbase
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
,
384 save
.kernel_gs_base
));
385 env
->lstar
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.lstar
));
386 env
->cstar
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.cstar
));
387 env
->fmask
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.sfmask
));
389 env
->star
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.star
));
390 env
->sysenter_cs
= x86_ldq_phys(cs
,
391 addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
392 env
->sysenter_esp
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
,
394 env
->sysenter_eip
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
,
398 void helper_vmsave(CPUX86State
*env
, int aflag
)
400 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
403 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMSAVE
, 0, GETPC());
406 addr
= env
->regs
[R_EAX
];
408 addr
= (uint32_t)env
->regs
[R_EAX
];
411 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmsave! " TARGET_FMT_lx
412 "\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
413 addr
, x86_ldq_phys(cs
,
414 addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
415 env
->segs
[R_FS
].base
);
417 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.fs
),
419 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.gs
),
421 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.tr
),
423 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.ldtr
),
427 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
),
429 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
430 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
431 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
433 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
435 addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
436 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.sysenter_esp
),
438 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.sysenter_eip
),
442 void helper_stgi(CPUX86State
*env
)
444 cpu_svm_check_intercept_param(env
, SVM_EXIT_STGI
, 0, GETPC());
445 env
->hflags2
|= HF2_GIF_MASK
;
448 void helper_clgi(CPUX86State
*env
)
450 cpu_svm_check_intercept_param(env
, SVM_EXIT_CLGI
, 0, GETPC());
451 env
->hflags2
&= ~HF2_GIF_MASK
;
454 void helper_skinit(CPUX86State
*env
)
456 cpu_svm_check_intercept_param(env
, SVM_EXIT_SKINIT
, 0, GETPC());
457 /* XXX: not implemented */
458 raise_exception(env
, EXCP06_ILLOP
);
461 void helper_invlpga(CPUX86State
*env
, int aflag
)
463 X86CPU
*cpu
= x86_env_get_cpu(env
);
466 cpu_svm_check_intercept_param(env
, SVM_EXIT_INVLPGA
, 0, GETPC());
469 addr
= env
->regs
[R_EAX
];
471 addr
= (uint32_t)env
->regs
[R_EAX
];
474 /* XXX: could use the ASID to see if it is needed to do the
476 tlb_flush_page(CPU(cpu
), addr
);
479 void cpu_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
480 uint64_t param
, uintptr_t retaddr
)
482 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
484 if (likely(!(env
->hflags
& HF_SVMI_MASK
))) {
488 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
489 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
490 cpu_vmexit(env
, type
, param
, retaddr
);
493 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
494 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
495 cpu_vmexit(env
, type
, param
, retaddr
);
498 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
499 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
500 cpu_vmexit(env
, type
, param
, retaddr
);
503 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
504 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
505 cpu_vmexit(env
, type
, param
, retaddr
);
508 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
509 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
510 cpu_vmexit(env
, type
, param
, retaddr
);
514 if (env
->intercept
& (1ULL << (SVM_EXIT_MSR
- SVM_EXIT_INTR
))) {
515 /* FIXME: this should be read in at vmrun (faster this way?) */
516 uint64_t addr
= x86_ldq_phys(cs
, env
->vm_vmcb
+
517 offsetof(struct vmcb
,
518 control
.msrpm_base_pa
));
521 switch ((uint32_t)env
->regs
[R_ECX
]) {
523 t0
= (env
->regs
[R_ECX
] * 2) % 8;
524 t1
= (env
->regs
[R_ECX
] * 2) / 8;
526 case 0xc0000000 ... 0xc0001fff:
527 t0
= (8192 + env
->regs
[R_ECX
] - 0xc0000000) * 2;
531 case 0xc0010000 ... 0xc0011fff:
532 t0
= (16384 + env
->regs
[R_ECX
] - 0xc0010000) * 2;
537 cpu_vmexit(env
, type
, param
, retaddr
);
542 if (x86_ldub_phys(cs
, addr
+ t1
) & ((1 << param
) << t0
)) {
543 cpu_vmexit(env
, type
, param
, retaddr
);
548 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
549 cpu_vmexit(env
, type
, param
, retaddr
);
555 void helper_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
558 cpu_svm_check_intercept_param(env
, type
, param
, GETPC());
561 void helper_svm_check_io(CPUX86State
*env
, uint32_t port
, uint32_t param
,
562 uint32_t next_eip_addend
)
564 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
566 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
567 /* FIXME: this should be read in at vmrun (faster this way?) */
568 uint64_t addr
= x86_ldq_phys(cs
, env
->vm_vmcb
+
569 offsetof(struct vmcb
, control
.iopm_base_pa
));
570 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
572 if (x86_lduw_phys(cs
, addr
+ port
/ 8) & (mask
<< (port
& 7))) {
575 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
576 env
->eip
+ next_eip_addend
);
577 cpu_vmexit(env
, SVM_EXIT_IOIO
, param
| (port
<< 16), GETPC());
582 void cpu_vmexit(CPUX86State
*env
, uint32_t exit_code
, uint64_t exit_info_1
,
585 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
588 cpu_restore_state(cs
, retaddr
);
591 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmexit(%08x, %016" PRIx64
", %016"
592 PRIx64
", " TARGET_FMT_lx
")!\n",
593 exit_code
, exit_info_1
,
594 x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
595 control
.exit_info_2
)),
598 cs
->exception_index
= EXCP_VMEXIT
+ exit_code
;
599 env
->error_code
= exit_info_1
;
601 /* remove any pending exception */
602 env
->old_exception
= -1;
606 void do_vmexit(CPUX86State
*env
, uint32_t exit_code
, uint64_t exit_info_1
)
608 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
611 if (env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
613 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
),
614 SVM_INTERRUPT_SHADOW_MASK
);
615 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
618 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
621 /* Save the VM state in the vmcb */
622 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
624 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
626 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
628 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
631 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
),
633 x86_stl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
),
636 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
),
638 x86_stl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
),
642 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
644 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
646 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
648 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
650 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
652 int_ctl
= x86_ldl_phys(cs
,
653 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
654 int_ctl
&= ~(V_TPR_MASK
| V_IRQ_MASK
);
655 int_ctl
|= env
->v_tpr
& V_TPR_MASK
;
656 if (cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) {
657 int_ctl
|= V_IRQ_MASK
;
660 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
662 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
),
663 cpu_compute_eflags(env
));
664 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
),
667 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), env
->regs
[R_ESP
]);
669 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), env
->regs
[R_EAX
]);
671 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
673 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
674 x86_stb_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
),
675 env
->hflags
& HF_CPL_MASK
);
677 /* Reload the host state from vm_hsave */
678 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
679 env
->hflags
&= ~HF_SVMI_MASK
;
681 env
->intercept_exceptions
= 0;
682 cs
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
685 env
->gdt
.base
= x86_ldq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
687 env
->gdt
.limit
= x86_ldl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
690 env
->idt
.base
= x86_ldq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
692 env
->idt
.limit
= x86_ldl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
695 cpu_x86_update_cr0(env
, x86_ldq_phys(cs
,
696 env
->vm_hsave
+ offsetof(struct vmcb
,
699 cpu_x86_update_cr4(env
, x86_ldq_phys(cs
,
700 env
->vm_hsave
+ offsetof(struct vmcb
,
702 cpu_x86_update_cr3(env
, x86_ldq_phys(cs
,
703 env
->vm_hsave
+ offsetof(struct vmcb
,
705 /* we need to set the efer after the crs so the hidden flags get
707 cpu_load_efer(env
, x86_ldq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
710 cpu_load_eflags(env
, x86_ldq_phys(cs
,
711 env
->vm_hsave
+ offsetof(struct vmcb
,
713 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
|
716 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
718 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
720 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
722 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
725 env
->eip
= x86_ldq_phys(cs
,
726 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
727 env
->regs
[R_ESP
] = x86_ldq_phys(cs
, env
->vm_hsave
+
728 offsetof(struct vmcb
, save
.rsp
));
729 env
->regs
[R_EAX
] = x86_ldq_phys(cs
, env
->vm_hsave
+
730 offsetof(struct vmcb
, save
.rax
));
732 env
->dr
[6] = x86_ldq_phys(cs
,
733 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
734 env
->dr
[7] = x86_ldq_phys(cs
,
735 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
738 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
),
740 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
),
744 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info
),
745 x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
746 control
.event_inj
)));
748 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info_err
),
749 x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
750 control
.event_inj_err
)));
752 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), 0);
754 env
->hflags2
&= ~HF2_GIF_MASK
;
755 /* FIXME: Resets the current ASID register to zero (host ASID). */
757 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
759 /* Clears the TSC_OFFSET inside the processor. */
761 /* If the host is in PAE mode, the processor reloads the host's PDPEs
762 from the page table indicated the host's CR3. If the PDPEs contain
763 illegal state, the processor causes a shutdown. */
765 /* Disables all breakpoints in the host DR7 register. */
767 /* Checks the reloaded host state for consistency. */
769 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
770 host's code segment or non-canonical (in the case of long mode), a
771 #GP fault is delivered inside the host. */