4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/helper-proto.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
26 /* Secure Virtual Machine helpers */
28 #if defined(CONFIG_USER_ONLY)
30 void helper_vmrun(CPUX86State
*env
, int aflag
, int next_eip_addend
)
34 void helper_vmmcall(CPUX86State
*env
)
38 void helper_vmload(CPUX86State
*env
, int aflag
)
42 void helper_vmsave(CPUX86State
*env
, int aflag
)
46 void helper_stgi(CPUX86State
*env
)
50 void helper_clgi(CPUX86State
*env
)
54 void helper_skinit(CPUX86State
*env
)
58 void helper_invlpga(CPUX86State
*env
, int aflag
)
62 void cpu_vmexit(CPUX86State
*nenv
, uint32_t exit_code
, uint64_t exit_info_1
,
68 void helper_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
73 void cpu_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
74 uint64_t param
, uintptr_t retaddr
)
78 void helper_svm_check_io(CPUX86State
*env
, uint32_t port
, uint32_t param
,
79 uint32_t next_eip_addend
)
84 static inline void svm_save_seg(CPUX86State
*env
, hwaddr addr
,
85 const SegmentCache
*sc
)
87 CPUState
*cs
= env_cpu(env
);
89 x86_stw_phys(cs
, addr
+ offsetof(struct vmcb_seg
, selector
),
91 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb_seg
, base
),
93 x86_stl_phys(cs
, addr
+ offsetof(struct vmcb_seg
, limit
),
95 x86_stw_phys(cs
, addr
+ offsetof(struct vmcb_seg
, attrib
),
96 ((sc
->flags
>> 8) & 0xff) | ((sc
->flags
>> 12) & 0x0f00));
99 static inline void svm_load_seg(CPUX86State
*env
, hwaddr addr
,
102 CPUState
*cs
= env_cpu(env
);
105 sc
->selector
= x86_lduw_phys(cs
,
106 addr
+ offsetof(struct vmcb_seg
, selector
));
107 sc
->base
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb_seg
, base
));
108 sc
->limit
= x86_ldl_phys(cs
, addr
+ offsetof(struct vmcb_seg
, limit
));
109 flags
= x86_lduw_phys(cs
, addr
+ offsetof(struct vmcb_seg
, attrib
));
110 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
113 static inline void svm_load_seg_cache(CPUX86State
*env
, hwaddr addr
,
116 SegmentCache sc1
, *sc
= &sc1
;
118 svm_load_seg(env
, addr
, sc
);
119 cpu_x86_load_seg_cache(env
, seg_reg
, sc
->selector
,
120 sc
->base
, sc
->limit
, sc
->flags
);
123 void helper_vmrun(CPUX86State
*env
, int aflag
, int next_eip_addend
)
125 CPUState
*cs
= env_cpu(env
);
131 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMRUN
, 0, GETPC());
134 addr
= env
->regs
[R_EAX
];
136 addr
= (uint32_t)env
->regs
[R_EAX
];
139 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmrun! " TARGET_FMT_lx
"\n", addr
);
143 /* save the current CPU state in the hsave page */
144 x86_stq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
),
146 x86_stl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
),
149 x86_stq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
),
151 x86_stl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
),
155 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
157 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
159 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
161 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
163 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
165 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
168 env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
170 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
),
171 cpu_compute_eflags(env
));
173 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
175 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
177 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
179 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
182 x86_stq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
),
183 env
->eip
+ next_eip_addend
);
185 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), env
->regs
[R_ESP
]);
187 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), env
->regs
[R_EAX
]);
189 /* load the interception bitmaps so we do not need to access the
191 env
->intercept
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
193 env
->intercept_cr_read
= x86_lduw_phys(cs
, env
->vm_vmcb
+
194 offsetof(struct vmcb
,
195 control
.intercept_cr_read
));
196 env
->intercept_cr_write
= x86_lduw_phys(cs
, env
->vm_vmcb
+
197 offsetof(struct vmcb
,
198 control
.intercept_cr_write
));
199 env
->intercept_dr_read
= x86_lduw_phys(cs
, env
->vm_vmcb
+
200 offsetof(struct vmcb
,
201 control
.intercept_dr_read
));
202 env
->intercept_dr_write
= x86_lduw_phys(cs
, env
->vm_vmcb
+
203 offsetof(struct vmcb
,
204 control
.intercept_dr_write
));
205 env
->intercept_exceptions
= x86_ldl_phys(cs
, env
->vm_vmcb
+
206 offsetof(struct vmcb
,
207 control
.intercept_exceptions
210 nested_ctl
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
211 control
.nested_ctl
));
213 env
->nested_pg_mode
= 0;
215 if (nested_ctl
& SVM_NPT_ENABLED
) {
216 env
->nested_cr3
= x86_ldq_phys(cs
,
217 env
->vm_vmcb
+ offsetof(struct vmcb
,
218 control
.nested_cr3
));
219 env
->hflags2
|= HF2_NPT_MASK
;
221 if (env
->cr
[4] & CR4_PAE_MASK
) {
222 env
->nested_pg_mode
|= SVM_NPT_PAE
;
224 if (env
->cr
[4] & CR4_PSE_MASK
) {
225 env
->nested_pg_mode
|= SVM_NPT_PSE
;
227 if (env
->hflags
& HF_LMA_MASK
) {
228 env
->nested_pg_mode
|= SVM_NPT_LMA
;
230 if (env
->efer
& MSR_EFER_NXE
) {
231 env
->nested_pg_mode
|= SVM_NPT_NXE
;
235 /* enable intercepts */
236 env
->hflags
|= HF_GUEST_MASK
;
238 env
->tsc_offset
= x86_ldq_phys(cs
, env
->vm_vmcb
+
239 offsetof(struct vmcb
, control
.tsc_offset
));
241 env
->gdt
.base
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
243 env
->gdt
.limit
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
246 env
->idt
.base
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
248 env
->idt
.limit
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
251 /* clear exit_info_2 so we behave like the real hardware */
253 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
255 cpu_x86_update_cr0(env
, x86_ldq_phys(cs
,
256 env
->vm_vmcb
+ offsetof(struct vmcb
,
258 cpu_x86_update_cr4(env
, x86_ldq_phys(cs
,
259 env
->vm_vmcb
+ offsetof(struct vmcb
,
261 cpu_x86_update_cr3(env
, x86_ldq_phys(cs
,
262 env
->vm_vmcb
+ offsetof(struct vmcb
,
264 env
->cr
[2] = x86_ldq_phys(cs
,
265 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
266 int_ctl
= x86_ldl_phys(cs
,
267 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
268 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
269 if (int_ctl
& V_INTR_MASKING_MASK
) {
270 env
->v_tpr
= int_ctl
& V_TPR_MASK
;
271 env
->hflags2
|= HF2_VINTR_MASK
;
272 if (env
->eflags
& IF_MASK
) {
273 env
->hflags2
|= HF2_HIF_MASK
;
279 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
)));
281 cpu_load_eflags(env
, x86_ldq_phys(cs
,
282 env
->vm_vmcb
+ offsetof(struct vmcb
,
284 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
286 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
288 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
290 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
292 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
295 env
->eip
= x86_ldq_phys(cs
,
296 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
298 env
->regs
[R_ESP
] = x86_ldq_phys(cs
,
299 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
300 env
->regs
[R_EAX
] = x86_ldq_phys(cs
,
301 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
302 env
->dr
[7] = x86_ldq_phys(cs
,
303 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
304 env
->dr
[6] = x86_ldq_phys(cs
,
305 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
307 /* FIXME: guest state consistency checks */
309 switch (x86_ldub_phys(cs
,
310 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
311 case TLB_CONTROL_DO_NOTHING
:
313 case TLB_CONTROL_FLUSH_ALL_ASID
:
314 /* FIXME: this is not 100% correct but should work for now */
319 env
->hflags2
|= HF2_GIF_MASK
;
321 if (int_ctl
& V_IRQ_MASK
) {
322 CPUState
*cs
= env_cpu(env
);
324 cs
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
327 /* maybe we need to inject an event */
328 event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
330 if (event_inj
& SVM_EVTINJ_VALID
) {
331 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
332 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
333 uint32_t event_inj_err
= x86_ldl_phys(cs
, env
->vm_vmcb
+
334 offsetof(struct vmcb
,
335 control
.event_inj_err
));
337 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Injecting(%#hx): ", valid_err
);
338 /* FIXME: need to implement valid_err */
339 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
340 case SVM_EVTINJ_TYPE_INTR
:
341 cs
->exception_index
= vector
;
342 env
->error_code
= event_inj_err
;
343 env
->exception_is_int
= 0;
344 env
->exception_next_eip
= -1;
345 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "INTR");
346 /* XXX: is it always correct? */
347 do_interrupt_x86_hardirq(env
, vector
, 1);
349 case SVM_EVTINJ_TYPE_NMI
:
350 cs
->exception_index
= EXCP02_NMI
;
351 env
->error_code
= event_inj_err
;
352 env
->exception_is_int
= 0;
353 env
->exception_next_eip
= env
->eip
;
354 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "NMI");
357 case SVM_EVTINJ_TYPE_EXEPT
:
358 cs
->exception_index
= vector
;
359 env
->error_code
= event_inj_err
;
360 env
->exception_is_int
= 0;
361 env
->exception_next_eip
= -1;
362 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "EXEPT");
365 case SVM_EVTINJ_TYPE_SOFT
:
366 cs
->exception_index
= vector
;
367 env
->error_code
= event_inj_err
;
368 env
->exception_is_int
= 1;
369 env
->exception_next_eip
= env
->eip
;
370 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "SOFT");
374 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " %#x %#x\n", cs
->exception_index
,
379 void helper_vmmcall(CPUX86State
*env
)
381 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMMCALL
, 0, GETPC());
382 raise_exception(env
, EXCP06_ILLOP
);
385 void helper_vmload(CPUX86State
*env
, int aflag
)
387 CPUState
*cs
= env_cpu(env
);
390 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMLOAD
, 0, GETPC());
393 addr
= env
->regs
[R_EAX
];
395 addr
= (uint32_t)env
->regs
[R_EAX
];
398 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmload! " TARGET_FMT_lx
399 "\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
400 addr
, x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
,
402 env
->segs
[R_FS
].base
);
404 svm_load_seg_cache(env
, addr
+ offsetof(struct vmcb
, save
.fs
), R_FS
);
405 svm_load_seg_cache(env
, addr
+ offsetof(struct vmcb
, save
.gs
), R_GS
);
406 svm_load_seg(env
, addr
+ offsetof(struct vmcb
, save
.tr
), &env
->tr
);
407 svm_load_seg(env
, addr
+ offsetof(struct vmcb
, save
.ldtr
), &env
->ldt
);
410 env
->kernelgsbase
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
,
411 save
.kernel_gs_base
));
412 env
->lstar
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.lstar
));
413 env
->cstar
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.cstar
));
414 env
->fmask
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.sfmask
));
416 env
->star
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.star
));
417 env
->sysenter_cs
= x86_ldq_phys(cs
,
418 addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
419 env
->sysenter_esp
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
,
421 env
->sysenter_eip
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
,
425 void helper_vmsave(CPUX86State
*env
, int aflag
)
427 CPUState
*cs
= env_cpu(env
);
430 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMSAVE
, 0, GETPC());
433 addr
= env
->regs
[R_EAX
];
435 addr
= (uint32_t)env
->regs
[R_EAX
];
438 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmsave! " TARGET_FMT_lx
439 "\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
440 addr
, x86_ldq_phys(cs
,
441 addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
442 env
->segs
[R_FS
].base
);
444 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.fs
),
446 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.gs
),
448 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.tr
),
450 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.ldtr
),
454 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
),
456 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
457 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
458 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
460 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
462 addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
463 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.sysenter_esp
),
465 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.sysenter_eip
),
469 void helper_stgi(CPUX86State
*env
)
471 cpu_svm_check_intercept_param(env
, SVM_EXIT_STGI
, 0, GETPC());
472 env
->hflags2
|= HF2_GIF_MASK
;
475 void helper_clgi(CPUX86State
*env
)
477 cpu_svm_check_intercept_param(env
, SVM_EXIT_CLGI
, 0, GETPC());
478 env
->hflags2
&= ~HF2_GIF_MASK
;
481 void helper_skinit(CPUX86State
*env
)
483 cpu_svm_check_intercept_param(env
, SVM_EXIT_SKINIT
, 0, GETPC());
484 /* XXX: not implemented */
485 raise_exception(env
, EXCP06_ILLOP
);
488 void helper_invlpga(CPUX86State
*env
, int aflag
)
490 X86CPU
*cpu
= env_archcpu(env
);
493 cpu_svm_check_intercept_param(env
, SVM_EXIT_INVLPGA
, 0, GETPC());
496 addr
= env
->regs
[R_EAX
];
498 addr
= (uint32_t)env
->regs
[R_EAX
];
501 /* XXX: could use the ASID to see if it is needed to do the
503 tlb_flush_page(CPU(cpu
), addr
);
506 void cpu_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
507 uint64_t param
, uintptr_t retaddr
)
509 CPUState
*cs
= env_cpu(env
);
511 if (likely(!(env
->hflags
& HF_GUEST_MASK
))) {
515 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
516 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
517 cpu_vmexit(env
, type
, param
, retaddr
);
520 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
521 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
522 cpu_vmexit(env
, type
, param
, retaddr
);
525 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
526 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
527 cpu_vmexit(env
, type
, param
, retaddr
);
530 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
531 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
532 cpu_vmexit(env
, type
, param
, retaddr
);
535 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
536 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
537 cpu_vmexit(env
, type
, param
, retaddr
);
541 if (env
->intercept
& (1ULL << (SVM_EXIT_MSR
- SVM_EXIT_INTR
))) {
542 /* FIXME: this should be read in at vmrun (faster this way?) */
543 uint64_t addr
= x86_ldq_phys(cs
, env
->vm_vmcb
+
544 offsetof(struct vmcb
,
545 control
.msrpm_base_pa
));
548 switch ((uint32_t)env
->regs
[R_ECX
]) {
550 t0
= (env
->regs
[R_ECX
] * 2) % 8;
551 t1
= (env
->regs
[R_ECX
] * 2) / 8;
553 case 0xc0000000 ... 0xc0001fff:
554 t0
= (8192 + env
->regs
[R_ECX
] - 0xc0000000) * 2;
558 case 0xc0010000 ... 0xc0011fff:
559 t0
= (16384 + env
->regs
[R_ECX
] - 0xc0010000) * 2;
564 cpu_vmexit(env
, type
, param
, retaddr
);
569 if (x86_ldub_phys(cs
, addr
+ t1
) & ((1 << param
) << t0
)) {
570 cpu_vmexit(env
, type
, param
, retaddr
);
575 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
576 cpu_vmexit(env
, type
, param
, retaddr
);
582 void helper_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
585 cpu_svm_check_intercept_param(env
, type
, param
, GETPC());
588 void helper_svm_check_io(CPUX86State
*env
, uint32_t port
, uint32_t param
,
589 uint32_t next_eip_addend
)
591 CPUState
*cs
= env_cpu(env
);
593 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
594 /* FIXME: this should be read in at vmrun (faster this way?) */
595 uint64_t addr
= x86_ldq_phys(cs
, env
->vm_vmcb
+
596 offsetof(struct vmcb
, control
.iopm_base_pa
));
597 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
599 if (x86_lduw_phys(cs
, addr
+ port
/ 8) & (mask
<< (port
& 7))) {
602 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
603 env
->eip
+ next_eip_addend
);
604 cpu_vmexit(env
, SVM_EXIT_IOIO
, param
| (port
<< 16), GETPC());
609 void cpu_vmexit(CPUX86State
*env
, uint32_t exit_code
, uint64_t exit_info_1
,
612 CPUState
*cs
= env_cpu(env
);
614 cpu_restore_state(cs
, retaddr
, true);
616 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmexit(%08x, %016" PRIx64
", %016"
617 PRIx64
", " TARGET_FMT_lx
")!\n",
618 exit_code
, exit_info_1
,
619 x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
620 control
.exit_info_2
)),
623 cs
->exception_index
= EXCP_VMEXIT
+ exit_code
;
624 env
->error_code
= exit_info_1
;
626 /* remove any pending exception */
627 env
->old_exception
= -1;
631 void do_vmexit(CPUX86State
*env
, uint32_t exit_code
, uint64_t exit_info_1
)
633 CPUState
*cs
= env_cpu(env
);
636 if (env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
638 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
),
639 SVM_INTERRUPT_SHADOW_MASK
);
640 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
643 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
645 env
->hflags2
&= ~HF2_NPT_MASK
;
647 /* Save the VM state in the vmcb */
648 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
650 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
652 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
654 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
657 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
),
659 x86_stl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
),
662 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
),
664 x86_stl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
),
668 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
670 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
672 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
674 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
676 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
678 int_ctl
= x86_ldl_phys(cs
,
679 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
680 int_ctl
&= ~(V_TPR_MASK
| V_IRQ_MASK
);
681 int_ctl
|= env
->v_tpr
& V_TPR_MASK
;
682 if (cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) {
683 int_ctl
|= V_IRQ_MASK
;
686 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
688 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
),
689 cpu_compute_eflags(env
));
690 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
),
693 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), env
->regs
[R_ESP
]);
695 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), env
->regs
[R_EAX
]);
697 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
699 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
700 x86_stb_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
),
701 env
->hflags
& HF_CPL_MASK
);
703 /* Reload the host state from vm_hsave */
704 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
705 env
->hflags
&= ~HF_GUEST_MASK
;
707 env
->intercept_exceptions
= 0;
708 cs
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
711 env
->gdt
.base
= x86_ldq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
713 env
->gdt
.limit
= x86_ldl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
716 env
->idt
.base
= x86_ldq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
718 env
->idt
.limit
= x86_ldl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
721 cpu_x86_update_cr0(env
, x86_ldq_phys(cs
,
722 env
->vm_hsave
+ offsetof(struct vmcb
,
725 cpu_x86_update_cr4(env
, x86_ldq_phys(cs
,
726 env
->vm_hsave
+ offsetof(struct vmcb
,
728 cpu_x86_update_cr3(env
, x86_ldq_phys(cs
,
729 env
->vm_hsave
+ offsetof(struct vmcb
,
731 /* we need to set the efer after the crs so the hidden flags get
733 cpu_load_efer(env
, x86_ldq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
736 cpu_load_eflags(env
, x86_ldq_phys(cs
,
737 env
->vm_hsave
+ offsetof(struct vmcb
,
739 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
|
742 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
744 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
746 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
748 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
751 env
->eip
= x86_ldq_phys(cs
,
752 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
753 env
->regs
[R_ESP
] = x86_ldq_phys(cs
, env
->vm_hsave
+
754 offsetof(struct vmcb
, save
.rsp
));
755 env
->regs
[R_EAX
] = x86_ldq_phys(cs
, env
->vm_hsave
+
756 offsetof(struct vmcb
, save
.rax
));
758 env
->dr
[6] = x86_ldq_phys(cs
,
759 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
760 env
->dr
[7] = x86_ldq_phys(cs
,
761 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
764 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
),
766 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
),
770 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info
),
771 x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
772 control
.event_inj
)));
774 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info_err
),
775 x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
776 control
.event_inj_err
)));
778 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), 0);
780 env
->hflags2
&= ~HF2_GIF_MASK
;
781 /* FIXME: Resets the current ASID register to zero (host ASID). */
783 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
785 /* Clears the TSC_OFFSET inside the processor. */
787 /* If the host is in PAE mode, the processor reloads the host's PDPEs
788 from the page table indicated the host's CR3. If the PDPEs contain
789 illegal state, the processor causes a shutdown. */
791 /* Disables all breakpoints in the host DR7 register. */
793 /* Checks the reloaded host state for consistency. */
795 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
796 host's code segment or non-canonical (in the case of long mode), a
797 #GP fault is delivered inside the host. */