4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/cpu-all.h"
23 #include "exec/helper-proto.h"
24 #include "exec/exec-all.h"
25 #include "exec/cpu_ldst.h"
27 /* Secure Virtual Machine helpers */
29 #if defined(CONFIG_USER_ONLY)
31 void helper_vmrun(CPUX86State
*env
, int aflag
, int next_eip_addend
)
35 void helper_vmmcall(CPUX86State
*env
)
39 void helper_vmload(CPUX86State
*env
, int aflag
)
43 void helper_vmsave(CPUX86State
*env
, int aflag
)
47 void helper_stgi(CPUX86State
*env
)
51 void helper_clgi(CPUX86State
*env
)
55 void helper_skinit(CPUX86State
*env
)
59 void helper_invlpga(CPUX86State
*env
, int aflag
)
63 void cpu_vmexit(CPUX86State
*nenv
, uint32_t exit_code
, uint64_t exit_info_1
,
68 void helper_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
73 void cpu_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
74 uint64_t param
, uintptr_t retaddr
)
78 void helper_svm_check_io(CPUX86State
*env
, uint32_t port
, uint32_t param
,
79 uint32_t next_eip_addend
)
84 static inline void svm_save_seg(CPUX86State
*env
, hwaddr addr
,
85 const SegmentCache
*sc
)
87 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
89 x86_stw_phys(cs
, addr
+ offsetof(struct vmcb_seg
, selector
),
91 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb_seg
, base
),
93 x86_stl_phys(cs
, addr
+ offsetof(struct vmcb_seg
, limit
),
95 x86_stw_phys(cs
, addr
+ offsetof(struct vmcb_seg
, attrib
),
96 ((sc
->flags
>> 8) & 0xff) | ((sc
->flags
>> 12) & 0x0f00));
99 static inline void svm_load_seg(CPUX86State
*env
, hwaddr addr
,
102 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
105 sc
->selector
= x86_lduw_phys(cs
,
106 addr
+ offsetof(struct vmcb_seg
, selector
));
107 sc
->base
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb_seg
, base
));
108 sc
->limit
= x86_ldl_phys(cs
, addr
+ offsetof(struct vmcb_seg
, limit
));
109 flags
= x86_lduw_phys(cs
, addr
+ offsetof(struct vmcb_seg
, attrib
));
110 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
113 static inline void svm_load_seg_cache(CPUX86State
*env
, hwaddr addr
,
116 SegmentCache sc1
, *sc
= &sc1
;
118 svm_load_seg(env
, addr
, sc
);
119 cpu_x86_load_seg_cache(env
, seg_reg
, sc
->selector
,
120 sc
->base
, sc
->limit
, sc
->flags
);
123 void helper_vmrun(CPUX86State
*env
, int aflag
, int next_eip_addend
)
125 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
130 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMRUN
, 0, GETPC());
133 addr
= env
->regs
[R_EAX
];
135 addr
= (uint32_t)env
->regs
[R_EAX
];
138 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmrun! " TARGET_FMT_lx
"\n", addr
);
142 /* save the current CPU state in the hsave page */
143 x86_stq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
),
145 x86_stl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
),
148 x86_stq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
),
150 x86_stl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
),
154 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
156 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
158 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
160 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
162 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
164 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
167 env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
169 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
),
170 cpu_compute_eflags(env
));
172 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
174 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
176 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
178 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
181 x86_stq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
),
182 env
->eip
+ next_eip_addend
);
184 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), env
->regs
[R_ESP
]);
186 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), env
->regs
[R_EAX
]);
188 /* load the interception bitmaps so we do not need to access the
190 env
->intercept
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
192 env
->intercept_cr_read
= x86_lduw_phys(cs
, env
->vm_vmcb
+
193 offsetof(struct vmcb
,
194 control
.intercept_cr_read
));
195 env
->intercept_cr_write
= x86_lduw_phys(cs
, env
->vm_vmcb
+
196 offsetof(struct vmcb
,
197 control
.intercept_cr_write
));
198 env
->intercept_dr_read
= x86_lduw_phys(cs
, env
->vm_vmcb
+
199 offsetof(struct vmcb
,
200 control
.intercept_dr_read
));
201 env
->intercept_dr_write
= x86_lduw_phys(cs
, env
->vm_vmcb
+
202 offsetof(struct vmcb
,
203 control
.intercept_dr_write
));
204 env
->intercept_exceptions
= x86_ldl_phys(cs
, env
->vm_vmcb
+
205 offsetof(struct vmcb
,
206 control
.intercept_exceptions
209 /* enable intercepts */
210 env
->hflags
|= HF_SVMI_MASK
;
212 env
->tsc_offset
= x86_ldq_phys(cs
, env
->vm_vmcb
+
213 offsetof(struct vmcb
, control
.tsc_offset
));
215 env
->gdt
.base
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
217 env
->gdt
.limit
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
220 env
->idt
.base
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
222 env
->idt
.limit
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
225 /* clear exit_info_2 so we behave like the real hardware */
227 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
229 cpu_x86_update_cr0(env
, x86_ldq_phys(cs
,
230 env
->vm_vmcb
+ offsetof(struct vmcb
,
232 cpu_x86_update_cr4(env
, x86_ldq_phys(cs
,
233 env
->vm_vmcb
+ offsetof(struct vmcb
,
235 cpu_x86_update_cr3(env
, x86_ldq_phys(cs
,
236 env
->vm_vmcb
+ offsetof(struct vmcb
,
238 env
->cr
[2] = x86_ldq_phys(cs
,
239 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
240 int_ctl
= x86_ldl_phys(cs
,
241 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
242 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
243 if (int_ctl
& V_INTR_MASKING_MASK
) {
244 env
->v_tpr
= int_ctl
& V_TPR_MASK
;
245 env
->hflags2
|= HF2_VINTR_MASK
;
246 if (env
->eflags
& IF_MASK
) {
247 env
->hflags2
|= HF2_HIF_MASK
;
253 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
)));
255 cpu_load_eflags(env
, x86_ldq_phys(cs
,
256 env
->vm_vmcb
+ offsetof(struct vmcb
,
258 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
260 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
262 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
264 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
266 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
269 env
->eip
= x86_ldq_phys(cs
,
270 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
272 env
->regs
[R_ESP
] = x86_ldq_phys(cs
,
273 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
274 env
->regs
[R_EAX
] = x86_ldq_phys(cs
,
275 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
276 env
->dr
[7] = x86_ldq_phys(cs
,
277 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
278 env
->dr
[6] = x86_ldq_phys(cs
,
279 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
281 /* FIXME: guest state consistency checks */
283 switch (x86_ldub_phys(cs
,
284 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
285 case TLB_CONTROL_DO_NOTHING
:
287 case TLB_CONTROL_FLUSH_ALL_ASID
:
288 /* FIXME: this is not 100% correct but should work for now */
293 env
->hflags2
|= HF2_GIF_MASK
;
295 if (int_ctl
& V_IRQ_MASK
) {
296 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
298 cs
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
301 /* maybe we need to inject an event */
302 event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
304 if (event_inj
& SVM_EVTINJ_VALID
) {
305 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
306 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
307 uint32_t event_inj_err
= x86_ldl_phys(cs
, env
->vm_vmcb
+
308 offsetof(struct vmcb
,
309 control
.event_inj_err
));
311 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Injecting(%#hx): ", valid_err
);
312 /* FIXME: need to implement valid_err */
313 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
314 case SVM_EVTINJ_TYPE_INTR
:
315 cs
->exception_index
= vector
;
316 env
->error_code
= event_inj_err
;
317 env
->exception_is_int
= 0;
318 env
->exception_next_eip
= -1;
319 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "INTR");
320 /* XXX: is it always correct? */
321 do_interrupt_x86_hardirq(env
, vector
, 1);
323 case SVM_EVTINJ_TYPE_NMI
:
324 cs
->exception_index
= EXCP02_NMI
;
325 env
->error_code
= event_inj_err
;
326 env
->exception_is_int
= 0;
327 env
->exception_next_eip
= env
->eip
;
328 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "NMI");
331 case SVM_EVTINJ_TYPE_EXEPT
:
332 cs
->exception_index
= vector
;
333 env
->error_code
= event_inj_err
;
334 env
->exception_is_int
= 0;
335 env
->exception_next_eip
= -1;
336 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "EXEPT");
339 case SVM_EVTINJ_TYPE_SOFT
:
340 cs
->exception_index
= vector
;
341 env
->error_code
= event_inj_err
;
342 env
->exception_is_int
= 1;
343 env
->exception_next_eip
= env
->eip
;
344 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "SOFT");
348 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " %#x %#x\n", cs
->exception_index
,
353 void helper_vmmcall(CPUX86State
*env
)
355 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMMCALL
, 0, GETPC());
356 raise_exception(env
, EXCP06_ILLOP
);
359 void helper_vmload(CPUX86State
*env
, int aflag
)
361 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
364 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMLOAD
, 0, GETPC());
367 addr
= env
->regs
[R_EAX
];
369 addr
= (uint32_t)env
->regs
[R_EAX
];
372 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmload! " TARGET_FMT_lx
373 "\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
374 addr
, x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
,
376 env
->segs
[R_FS
].base
);
378 svm_load_seg_cache(env
, addr
+ offsetof(struct vmcb
, save
.fs
), R_FS
);
379 svm_load_seg_cache(env
, addr
+ offsetof(struct vmcb
, save
.gs
), R_GS
);
380 svm_load_seg(env
, addr
+ offsetof(struct vmcb
, save
.tr
), &env
->tr
);
381 svm_load_seg(env
, addr
+ offsetof(struct vmcb
, save
.ldtr
), &env
->ldt
);
384 env
->kernelgsbase
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
,
385 save
.kernel_gs_base
));
386 env
->lstar
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.lstar
));
387 env
->cstar
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.cstar
));
388 env
->fmask
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.sfmask
));
390 env
->star
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.star
));
391 env
->sysenter_cs
= x86_ldq_phys(cs
,
392 addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
393 env
->sysenter_esp
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
,
395 env
->sysenter_eip
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
,
399 void helper_vmsave(CPUX86State
*env
, int aflag
)
401 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
404 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMSAVE
, 0, GETPC());
407 addr
= env
->regs
[R_EAX
];
409 addr
= (uint32_t)env
->regs
[R_EAX
];
412 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmsave! " TARGET_FMT_lx
413 "\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
414 addr
, x86_ldq_phys(cs
,
415 addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
416 env
->segs
[R_FS
].base
);
418 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.fs
),
420 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.gs
),
422 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.tr
),
424 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.ldtr
),
428 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
),
430 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
431 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
432 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
434 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
436 addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
437 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.sysenter_esp
),
439 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.sysenter_eip
),
443 void helper_stgi(CPUX86State
*env
)
445 cpu_svm_check_intercept_param(env
, SVM_EXIT_STGI
, 0, GETPC());
446 env
->hflags2
|= HF2_GIF_MASK
;
449 void helper_clgi(CPUX86State
*env
)
451 cpu_svm_check_intercept_param(env
, SVM_EXIT_CLGI
, 0, GETPC());
452 env
->hflags2
&= ~HF2_GIF_MASK
;
455 void helper_skinit(CPUX86State
*env
)
457 cpu_svm_check_intercept_param(env
, SVM_EXIT_SKINIT
, 0, GETPC());
458 /* XXX: not implemented */
459 raise_exception(env
, EXCP06_ILLOP
);
462 void helper_invlpga(CPUX86State
*env
, int aflag
)
464 X86CPU
*cpu
= x86_env_get_cpu(env
);
467 cpu_svm_check_intercept_param(env
, SVM_EXIT_INVLPGA
, 0, GETPC());
470 addr
= env
->regs
[R_EAX
];
472 addr
= (uint32_t)env
->regs
[R_EAX
];
475 /* XXX: could use the ASID to see if it is needed to do the
477 tlb_flush_page(CPU(cpu
), addr
);
480 void cpu_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
481 uint64_t param
, uintptr_t retaddr
)
483 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
485 if (likely(!(env
->hflags
& HF_SVMI_MASK
))) {
489 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
490 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
491 cpu_vmexit(env
, type
, param
, retaddr
);
494 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
495 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
496 cpu_vmexit(env
, type
, param
, retaddr
);
499 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
500 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
501 cpu_vmexit(env
, type
, param
, retaddr
);
504 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
505 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
506 cpu_vmexit(env
, type
, param
, retaddr
);
509 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
510 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
511 cpu_vmexit(env
, type
, param
, retaddr
);
515 if (env
->intercept
& (1ULL << (SVM_EXIT_MSR
- SVM_EXIT_INTR
))) {
516 /* FIXME: this should be read in at vmrun (faster this way?) */
517 uint64_t addr
= x86_ldq_phys(cs
, env
->vm_vmcb
+
518 offsetof(struct vmcb
,
519 control
.msrpm_base_pa
));
522 switch ((uint32_t)env
->regs
[R_ECX
]) {
524 t0
= (env
->regs
[R_ECX
] * 2) % 8;
525 t1
= (env
->regs
[R_ECX
] * 2) / 8;
527 case 0xc0000000 ... 0xc0001fff:
528 t0
= (8192 + env
->regs
[R_ECX
] - 0xc0000000) * 2;
532 case 0xc0010000 ... 0xc0011fff:
533 t0
= (16384 + env
->regs
[R_ECX
] - 0xc0010000) * 2;
538 cpu_vmexit(env
, type
, param
, retaddr
);
543 if (x86_ldub_phys(cs
, addr
+ t1
) & ((1 << param
) << t0
)) {
544 cpu_vmexit(env
, type
, param
, retaddr
);
549 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
550 cpu_vmexit(env
, type
, param
, retaddr
);
556 void helper_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
559 cpu_svm_check_intercept_param(env
, type
, param
, GETPC());
562 void helper_svm_check_io(CPUX86State
*env
, uint32_t port
, uint32_t param
,
563 uint32_t next_eip_addend
)
565 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
567 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
568 /* FIXME: this should be read in at vmrun (faster this way?) */
569 uint64_t addr
= x86_ldq_phys(cs
, env
->vm_vmcb
+
570 offsetof(struct vmcb
, control
.iopm_base_pa
));
571 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
573 if (x86_lduw_phys(cs
, addr
+ port
/ 8) & (mask
<< (port
& 7))) {
576 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
577 env
->eip
+ next_eip_addend
);
578 cpu_vmexit(env
, SVM_EXIT_IOIO
, param
| (port
<< 16), GETPC());
583 /* Note: currently only 32 bits of exit_code are used */
584 void cpu_vmexit(CPUX86State
*env
, uint32_t exit_code
, uint64_t exit_info_1
,
587 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
591 cpu_restore_state(cs
, retaddr
);
594 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmexit(%08x, %016" PRIx64
", %016"
595 PRIx64
", " TARGET_FMT_lx
")!\n",
596 exit_code
, exit_info_1
,
597 x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
598 control
.exit_info_2
)),
601 if (env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
603 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
),
604 SVM_INTERRUPT_SHADOW_MASK
);
605 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
608 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
611 /* Save the VM state in the vmcb */
612 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
614 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
616 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
618 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
621 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
),
623 x86_stl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
),
626 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
),
628 x86_stl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
),
632 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
634 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
636 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
638 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
640 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
642 int_ctl
= x86_ldl_phys(cs
,
643 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
644 int_ctl
&= ~(V_TPR_MASK
| V_IRQ_MASK
);
645 int_ctl
|= env
->v_tpr
& V_TPR_MASK
;
646 if (cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) {
647 int_ctl
|= V_IRQ_MASK
;
650 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
652 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
),
653 cpu_compute_eflags(env
));
654 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
),
657 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), env
->regs
[R_ESP
]);
659 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), env
->regs
[R_EAX
]);
661 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
663 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
664 x86_stb_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
),
665 env
->hflags
& HF_CPL_MASK
);
667 /* Reload the host state from vm_hsave */
668 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
669 env
->hflags
&= ~HF_SVMI_MASK
;
671 env
->intercept_exceptions
= 0;
672 cs
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
675 env
->gdt
.base
= x86_ldq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
677 env
->gdt
.limit
= x86_ldl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
680 env
->idt
.base
= x86_ldq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
682 env
->idt
.limit
= x86_ldl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
685 cpu_x86_update_cr0(env
, x86_ldq_phys(cs
,
686 env
->vm_hsave
+ offsetof(struct vmcb
,
689 cpu_x86_update_cr4(env
, x86_ldq_phys(cs
,
690 env
->vm_hsave
+ offsetof(struct vmcb
,
692 cpu_x86_update_cr3(env
, x86_ldq_phys(cs
,
693 env
->vm_hsave
+ offsetof(struct vmcb
,
695 /* we need to set the efer after the crs so the hidden flags get
697 cpu_load_efer(env
, x86_ldq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
700 cpu_load_eflags(env
, x86_ldq_phys(cs
,
701 env
->vm_hsave
+ offsetof(struct vmcb
,
703 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
|
706 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
708 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
710 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
712 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
715 env
->eip
= x86_ldq_phys(cs
,
716 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
717 env
->regs
[R_ESP
] = x86_ldq_phys(cs
, env
->vm_hsave
+
718 offsetof(struct vmcb
, save
.rsp
));
719 env
->regs
[R_EAX
] = x86_ldq_phys(cs
, env
->vm_hsave
+
720 offsetof(struct vmcb
, save
.rax
));
722 env
->dr
[6] = x86_ldq_phys(cs
,
723 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
724 env
->dr
[7] = x86_ldq_phys(cs
,
725 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
728 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
),
730 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
),
734 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info
),
735 x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
736 control
.event_inj
)));
738 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info_err
),
739 x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
740 control
.event_inj_err
)));
742 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), 0);
744 env
->hflags2
&= ~HF2_GIF_MASK
;
745 /* FIXME: Resets the current ASID register to zero (host ASID). */
747 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
749 /* Clears the TSC_OFFSET inside the processor. */
751 /* If the host is in PAE mode, the processor reloads the host's PDPEs
752 from the page table indicated the host's CR3. If the PDPEs contain
753 illegal state, the processor causes a shutdown. */
755 /* Disables all breakpoints in the host DR7 register. */
757 /* Checks the reloaded host state for consistency. */
759 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
760 host's code segment or non-canonical (in the case of long mode), a
761 #GP fault is delivered inside the host. */
763 /* remove any pending exception */
764 cs
->exception_index
= -1;
766 env
->old_exception
= -1;