4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/helper-proto.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
26 /* Secure Virtual Machine helpers */
28 #if defined(CONFIG_USER_ONLY)
30 void helper_vmrun(CPUX86State
*env
, int aflag
, int next_eip_addend
)
34 void helper_vmmcall(CPUX86State
*env
)
38 void helper_vmload(CPUX86State
*env
, int aflag
)
42 void helper_vmsave(CPUX86State
*env
, int aflag
)
46 void helper_stgi(CPUX86State
*env
)
50 void helper_clgi(CPUX86State
*env
)
54 void helper_skinit(CPUX86State
*env
)
58 void helper_invlpga(CPUX86State
*env
, int aflag
)
62 void cpu_vmexit(CPUX86State
*nenv
, uint32_t exit_code
, uint64_t exit_info_1
,
67 void helper_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
72 void cpu_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
73 uint64_t param
, uintptr_t retaddr
)
77 void helper_svm_check_io(CPUX86State
*env
, uint32_t port
, uint32_t param
,
78 uint32_t next_eip_addend
)
83 static inline void svm_save_seg(CPUX86State
*env
, hwaddr addr
,
84 const SegmentCache
*sc
)
86 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
88 x86_stw_phys(cs
, addr
+ offsetof(struct vmcb_seg
, selector
),
90 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb_seg
, base
),
92 x86_stl_phys(cs
, addr
+ offsetof(struct vmcb_seg
, limit
),
94 x86_stw_phys(cs
, addr
+ offsetof(struct vmcb_seg
, attrib
),
95 ((sc
->flags
>> 8) & 0xff) | ((sc
->flags
>> 12) & 0x0f00));
98 static inline void svm_load_seg(CPUX86State
*env
, hwaddr addr
,
101 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
104 sc
->selector
= x86_lduw_phys(cs
,
105 addr
+ offsetof(struct vmcb_seg
, selector
));
106 sc
->base
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb_seg
, base
));
107 sc
->limit
= x86_ldl_phys(cs
, addr
+ offsetof(struct vmcb_seg
, limit
));
108 flags
= x86_lduw_phys(cs
, addr
+ offsetof(struct vmcb_seg
, attrib
));
109 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
112 static inline void svm_load_seg_cache(CPUX86State
*env
, hwaddr addr
,
115 SegmentCache sc1
, *sc
= &sc1
;
117 svm_load_seg(env
, addr
, sc
);
118 cpu_x86_load_seg_cache(env
, seg_reg
, sc
->selector
,
119 sc
->base
, sc
->limit
, sc
->flags
);
122 void helper_vmrun(CPUX86State
*env
, int aflag
, int next_eip_addend
)
124 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
129 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMRUN
, 0, GETPC());
132 addr
= env
->regs
[R_EAX
];
134 addr
= (uint32_t)env
->regs
[R_EAX
];
137 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmrun! " TARGET_FMT_lx
"\n", addr
);
141 /* save the current CPU state in the hsave page */
142 x86_stq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
),
144 x86_stl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
),
147 x86_stq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
),
149 x86_stl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
),
153 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
155 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
157 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
159 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
161 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
163 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
166 env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
168 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
),
169 cpu_compute_eflags(env
));
171 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
173 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
175 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
177 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
180 x86_stq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
),
181 env
->eip
+ next_eip_addend
);
183 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), env
->regs
[R_ESP
]);
185 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), env
->regs
[R_EAX
]);
187 /* load the interception bitmaps so we do not need to access the
189 env
->intercept
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
191 env
->intercept_cr_read
= x86_lduw_phys(cs
, env
->vm_vmcb
+
192 offsetof(struct vmcb
,
193 control
.intercept_cr_read
));
194 env
->intercept_cr_write
= x86_lduw_phys(cs
, env
->vm_vmcb
+
195 offsetof(struct vmcb
,
196 control
.intercept_cr_write
));
197 env
->intercept_dr_read
= x86_lduw_phys(cs
, env
->vm_vmcb
+
198 offsetof(struct vmcb
,
199 control
.intercept_dr_read
));
200 env
->intercept_dr_write
= x86_lduw_phys(cs
, env
->vm_vmcb
+
201 offsetof(struct vmcb
,
202 control
.intercept_dr_write
));
203 env
->intercept_exceptions
= x86_ldl_phys(cs
, env
->vm_vmcb
+
204 offsetof(struct vmcb
,
205 control
.intercept_exceptions
208 /* enable intercepts */
209 env
->hflags
|= HF_SVMI_MASK
;
211 env
->tsc_offset
= x86_ldq_phys(cs
, env
->vm_vmcb
+
212 offsetof(struct vmcb
, control
.tsc_offset
));
214 env
->gdt
.base
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
216 env
->gdt
.limit
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
219 env
->idt
.base
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
221 env
->idt
.limit
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
224 /* clear exit_info_2 so we behave like the real hardware */
226 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
228 cpu_x86_update_cr0(env
, x86_ldq_phys(cs
,
229 env
->vm_vmcb
+ offsetof(struct vmcb
,
231 cpu_x86_update_cr4(env
, x86_ldq_phys(cs
,
232 env
->vm_vmcb
+ offsetof(struct vmcb
,
234 cpu_x86_update_cr3(env
, x86_ldq_phys(cs
,
235 env
->vm_vmcb
+ offsetof(struct vmcb
,
237 env
->cr
[2] = x86_ldq_phys(cs
,
238 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
239 int_ctl
= x86_ldl_phys(cs
,
240 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
241 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
242 if (int_ctl
& V_INTR_MASKING_MASK
) {
243 env
->v_tpr
= int_ctl
& V_TPR_MASK
;
244 env
->hflags2
|= HF2_VINTR_MASK
;
245 if (env
->eflags
& IF_MASK
) {
246 env
->hflags2
|= HF2_HIF_MASK
;
252 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
)));
254 cpu_load_eflags(env
, x86_ldq_phys(cs
,
255 env
->vm_vmcb
+ offsetof(struct vmcb
,
257 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
259 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
261 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
263 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
265 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
268 env
->eip
= x86_ldq_phys(cs
,
269 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
271 env
->regs
[R_ESP
] = x86_ldq_phys(cs
,
272 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
273 env
->regs
[R_EAX
] = x86_ldq_phys(cs
,
274 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
275 env
->dr
[7] = x86_ldq_phys(cs
,
276 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
277 env
->dr
[6] = x86_ldq_phys(cs
,
278 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
280 /* FIXME: guest state consistency checks */
282 switch (x86_ldub_phys(cs
,
283 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
284 case TLB_CONTROL_DO_NOTHING
:
286 case TLB_CONTROL_FLUSH_ALL_ASID
:
287 /* FIXME: this is not 100% correct but should work for now */
292 env
->hflags2
|= HF2_GIF_MASK
;
294 if (int_ctl
& V_IRQ_MASK
) {
295 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
297 cs
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
300 /* maybe we need to inject an event */
301 event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
303 if (event_inj
& SVM_EVTINJ_VALID
) {
304 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
305 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
306 uint32_t event_inj_err
= x86_ldl_phys(cs
, env
->vm_vmcb
+
307 offsetof(struct vmcb
,
308 control
.event_inj_err
));
310 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Injecting(%#hx): ", valid_err
);
311 /* FIXME: need to implement valid_err */
312 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
313 case SVM_EVTINJ_TYPE_INTR
:
314 cs
->exception_index
= vector
;
315 env
->error_code
= event_inj_err
;
316 env
->exception_is_int
= 0;
317 env
->exception_next_eip
= -1;
318 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "INTR");
319 /* XXX: is it always correct? */
320 do_interrupt_x86_hardirq(env
, vector
, 1);
322 case SVM_EVTINJ_TYPE_NMI
:
323 cs
->exception_index
= EXCP02_NMI
;
324 env
->error_code
= event_inj_err
;
325 env
->exception_is_int
= 0;
326 env
->exception_next_eip
= env
->eip
;
327 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "NMI");
330 case SVM_EVTINJ_TYPE_EXEPT
:
331 cs
->exception_index
= vector
;
332 env
->error_code
= event_inj_err
;
333 env
->exception_is_int
= 0;
334 env
->exception_next_eip
= -1;
335 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "EXEPT");
338 case SVM_EVTINJ_TYPE_SOFT
:
339 cs
->exception_index
= vector
;
340 env
->error_code
= event_inj_err
;
341 env
->exception_is_int
= 1;
342 env
->exception_next_eip
= env
->eip
;
343 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "SOFT");
347 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " %#x %#x\n", cs
->exception_index
,
352 void helper_vmmcall(CPUX86State
*env
)
354 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMMCALL
, 0, GETPC());
355 raise_exception(env
, EXCP06_ILLOP
);
358 void helper_vmload(CPUX86State
*env
, int aflag
)
360 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
363 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMLOAD
, 0, GETPC());
366 addr
= env
->regs
[R_EAX
];
368 addr
= (uint32_t)env
->regs
[R_EAX
];
371 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmload! " TARGET_FMT_lx
372 "\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
373 addr
, x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
,
375 env
->segs
[R_FS
].base
);
377 svm_load_seg_cache(env
, addr
+ offsetof(struct vmcb
, save
.fs
), R_FS
);
378 svm_load_seg_cache(env
, addr
+ offsetof(struct vmcb
, save
.gs
), R_GS
);
379 svm_load_seg(env
, addr
+ offsetof(struct vmcb
, save
.tr
), &env
->tr
);
380 svm_load_seg(env
, addr
+ offsetof(struct vmcb
, save
.ldtr
), &env
->ldt
);
383 env
->kernelgsbase
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
,
384 save
.kernel_gs_base
));
385 env
->lstar
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.lstar
));
386 env
->cstar
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.cstar
));
387 env
->fmask
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.sfmask
));
389 env
->star
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.star
));
390 env
->sysenter_cs
= x86_ldq_phys(cs
,
391 addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
392 env
->sysenter_esp
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
,
394 env
->sysenter_eip
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
,
398 void helper_vmsave(CPUX86State
*env
, int aflag
)
400 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
403 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMSAVE
, 0, GETPC());
406 addr
= env
->regs
[R_EAX
];
408 addr
= (uint32_t)env
->regs
[R_EAX
];
411 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmsave! " TARGET_FMT_lx
412 "\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
413 addr
, x86_ldq_phys(cs
,
414 addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
415 env
->segs
[R_FS
].base
);
417 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.fs
),
419 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.gs
),
421 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.tr
),
423 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.ldtr
),
427 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
),
429 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
430 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
431 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
433 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
435 addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
436 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.sysenter_esp
),
438 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.sysenter_eip
),
442 void helper_stgi(CPUX86State
*env
)
444 cpu_svm_check_intercept_param(env
, SVM_EXIT_STGI
, 0, GETPC());
445 env
->hflags2
|= HF2_GIF_MASK
;
448 void helper_clgi(CPUX86State
*env
)
450 cpu_svm_check_intercept_param(env
, SVM_EXIT_CLGI
, 0, GETPC());
451 env
->hflags2
&= ~HF2_GIF_MASK
;
454 void helper_skinit(CPUX86State
*env
)
456 cpu_svm_check_intercept_param(env
, SVM_EXIT_SKINIT
, 0, GETPC());
457 /* XXX: not implemented */
458 raise_exception(env
, EXCP06_ILLOP
);
461 void helper_invlpga(CPUX86State
*env
, int aflag
)
463 X86CPU
*cpu
= x86_env_get_cpu(env
);
466 cpu_svm_check_intercept_param(env
, SVM_EXIT_INVLPGA
, 0, GETPC());
469 addr
= env
->regs
[R_EAX
];
471 addr
= (uint32_t)env
->regs
[R_EAX
];
474 /* XXX: could use the ASID to see if it is needed to do the
476 tlb_flush_page(CPU(cpu
), addr
);
479 void cpu_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
480 uint64_t param
, uintptr_t retaddr
)
482 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
484 if (likely(!(env
->hflags
& HF_SVMI_MASK
))) {
488 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
489 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
490 cpu_vmexit(env
, type
, param
, retaddr
);
493 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
494 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
495 cpu_vmexit(env
, type
, param
, retaddr
);
498 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
499 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
500 cpu_vmexit(env
, type
, param
, retaddr
);
503 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
504 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
505 cpu_vmexit(env
, type
, param
, retaddr
);
508 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
509 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
510 cpu_vmexit(env
, type
, param
, retaddr
);
514 if (env
->intercept
& (1ULL << (SVM_EXIT_MSR
- SVM_EXIT_INTR
))) {
515 /* FIXME: this should be read in at vmrun (faster this way?) */
516 uint64_t addr
= x86_ldq_phys(cs
, env
->vm_vmcb
+
517 offsetof(struct vmcb
,
518 control
.msrpm_base_pa
));
521 switch ((uint32_t)env
->regs
[R_ECX
]) {
523 t0
= (env
->regs
[R_ECX
] * 2) % 8;
524 t1
= (env
->regs
[R_ECX
] * 2) / 8;
526 case 0xc0000000 ... 0xc0001fff:
527 t0
= (8192 + env
->regs
[R_ECX
] - 0xc0000000) * 2;
531 case 0xc0010000 ... 0xc0011fff:
532 t0
= (16384 + env
->regs
[R_ECX
] - 0xc0010000) * 2;
537 cpu_vmexit(env
, type
, param
, retaddr
);
542 if (x86_ldub_phys(cs
, addr
+ t1
) & ((1 << param
) << t0
)) {
543 cpu_vmexit(env
, type
, param
, retaddr
);
548 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
549 cpu_vmexit(env
, type
, param
, retaddr
);
555 void helper_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
558 cpu_svm_check_intercept_param(env
, type
, param
, GETPC());
561 void helper_svm_check_io(CPUX86State
*env
, uint32_t port
, uint32_t param
,
562 uint32_t next_eip_addend
)
564 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
566 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
567 /* FIXME: this should be read in at vmrun (faster this way?) */
568 uint64_t addr
= x86_ldq_phys(cs
, env
->vm_vmcb
+
569 offsetof(struct vmcb
, control
.iopm_base_pa
));
570 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
572 if (x86_lduw_phys(cs
, addr
+ port
/ 8) & (mask
<< (port
& 7))) {
575 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
576 env
->eip
+ next_eip_addend
);
577 cpu_vmexit(env
, SVM_EXIT_IOIO
, param
| (port
<< 16), GETPC());
582 void cpu_vmexit(CPUX86State
*env
, uint32_t exit_code
, uint64_t exit_info_1
,
585 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
587 cpu_restore_state(cs
, retaddr
);
589 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmexit(%08x, %016" PRIx64
", %016"
590 PRIx64
", " TARGET_FMT_lx
")!\n",
591 exit_code
, exit_info_1
,
592 x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
593 control
.exit_info_2
)),
596 cs
->exception_index
= EXCP_VMEXIT
+ exit_code
;
597 env
->error_code
= exit_info_1
;
599 /* remove any pending exception */
600 env
->old_exception
= -1;
604 void do_vmexit(CPUX86State
*env
, uint32_t exit_code
, uint64_t exit_info_1
)
606 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
609 if (env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
611 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
),
612 SVM_INTERRUPT_SHADOW_MASK
);
613 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
616 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
619 /* Save the VM state in the vmcb */
620 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
622 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
624 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
626 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
629 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
),
631 x86_stl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
),
634 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
),
636 x86_stl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
),
640 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
642 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
644 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
646 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
648 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
650 int_ctl
= x86_ldl_phys(cs
,
651 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
652 int_ctl
&= ~(V_TPR_MASK
| V_IRQ_MASK
);
653 int_ctl
|= env
->v_tpr
& V_TPR_MASK
;
654 if (cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) {
655 int_ctl
|= V_IRQ_MASK
;
658 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
660 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
),
661 cpu_compute_eflags(env
));
662 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
),
665 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), env
->regs
[R_ESP
]);
667 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), env
->regs
[R_EAX
]);
669 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
671 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
672 x86_stb_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
),
673 env
->hflags
& HF_CPL_MASK
);
675 /* Reload the host state from vm_hsave */
676 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
677 env
->hflags
&= ~HF_SVMI_MASK
;
679 env
->intercept_exceptions
= 0;
680 cs
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
683 env
->gdt
.base
= x86_ldq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
685 env
->gdt
.limit
= x86_ldl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
688 env
->idt
.base
= x86_ldq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
690 env
->idt
.limit
= x86_ldl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
693 cpu_x86_update_cr0(env
, x86_ldq_phys(cs
,
694 env
->vm_hsave
+ offsetof(struct vmcb
,
697 cpu_x86_update_cr4(env
, x86_ldq_phys(cs
,
698 env
->vm_hsave
+ offsetof(struct vmcb
,
700 cpu_x86_update_cr3(env
, x86_ldq_phys(cs
,
701 env
->vm_hsave
+ offsetof(struct vmcb
,
703 /* we need to set the efer after the crs so the hidden flags get
705 cpu_load_efer(env
, x86_ldq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
708 cpu_load_eflags(env
, x86_ldq_phys(cs
,
709 env
->vm_hsave
+ offsetof(struct vmcb
,
711 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
|
714 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
716 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
718 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
720 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
723 env
->eip
= x86_ldq_phys(cs
,
724 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
725 env
->regs
[R_ESP
] = x86_ldq_phys(cs
, env
->vm_hsave
+
726 offsetof(struct vmcb
, save
.rsp
));
727 env
->regs
[R_EAX
] = x86_ldq_phys(cs
, env
->vm_hsave
+
728 offsetof(struct vmcb
, save
.rax
));
730 env
->dr
[6] = x86_ldq_phys(cs
,
731 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
732 env
->dr
[7] = x86_ldq_phys(cs
,
733 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
736 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
),
738 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
),
742 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info
),
743 x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
744 control
.event_inj
)));
746 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info_err
),
747 x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
748 control
.event_inj_err
)));
750 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), 0);
752 env
->hflags2
&= ~HF2_GIF_MASK
;
753 /* FIXME: Resets the current ASID register to zero (host ASID). */
755 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
757 /* Clears the TSC_OFFSET inside the processor. */
759 /* If the host is in PAE mode, the processor reloads the host's PDPEs
760 from the page table indicated the host's CR3. If the PDPEs contain
761 illegal state, the processor causes a shutdown. */
763 /* Disables all breakpoints in the host DR7 register. */
765 /* Checks the reloaded host state for consistency. */
767 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
768 host's code segment or non-canonical (in the case of long mode), a
769 #GP fault is delivered inside the host. */