2 * Copyright (c) 2003-2008 Fabrice Bellard
3 * Copyright (C) 2016 Veertu Inc,
4 * Copyright (C) 2017 Google Inc,
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "qemu-common.h"
27 #include "x86_descr.h"
28 #include "x86_decode.h"
30 #include "hw/i386/apic_internal.h"
32 #include <Hypervisor/hv.h>
33 #include <Hypervisor/hv_vmx.h>
37 void hvf_set_segment(struct CPUState
*cpu
, struct vmx_segment
*vmx_seg
,
38 SegmentCache
*qseg
, bool is_tr
)
40 vmx_seg
->sel
= qseg
->selector
;
41 vmx_seg
->base
= qseg
->base
;
42 vmx_seg
->limit
= qseg
->limit
;
44 if (!qseg
->selector
&& !x86_is_real(cpu
) && !is_tr
) {
45 /* the TR register is usable after processor reset despite
46 * having a null selector */
47 vmx_seg
->ar
= 1 << 16;
50 vmx_seg
->ar
= (qseg
->flags
>> DESC_TYPE_SHIFT
) & 0xf;
51 vmx_seg
->ar
|= ((qseg
->flags
>> DESC_G_SHIFT
) & 1) << 15;
52 vmx_seg
->ar
|= ((qseg
->flags
>> DESC_B_SHIFT
) & 1) << 14;
53 vmx_seg
->ar
|= ((qseg
->flags
>> DESC_L_SHIFT
) & 1) << 13;
54 vmx_seg
->ar
|= ((qseg
->flags
>> DESC_AVL_SHIFT
) & 1) << 12;
55 vmx_seg
->ar
|= ((qseg
->flags
>> DESC_P_SHIFT
) & 1) << 7;
56 vmx_seg
->ar
|= ((qseg
->flags
>> DESC_DPL_SHIFT
) & 3) << 5;
57 vmx_seg
->ar
|= ((qseg
->flags
>> DESC_S_SHIFT
) & 1) << 4;
60 void hvf_get_segment(SegmentCache
*qseg
, struct vmx_segment
*vmx_seg
)
62 qseg
->limit
= vmx_seg
->limit
;
63 qseg
->base
= vmx_seg
->base
;
64 qseg
->selector
= vmx_seg
->sel
;
65 qseg
->flags
= ((vmx_seg
->ar
& 0xf) << DESC_TYPE_SHIFT
) |
66 (((vmx_seg
->ar
>> 4) & 1) << DESC_S_SHIFT
) |
67 (((vmx_seg
->ar
>> 5) & 3) << DESC_DPL_SHIFT
) |
68 (((vmx_seg
->ar
>> 7) & 1) << DESC_P_SHIFT
) |
69 (((vmx_seg
->ar
>> 12) & 1) << DESC_AVL_SHIFT
) |
70 (((vmx_seg
->ar
>> 13) & 1) << DESC_L_SHIFT
) |
71 (((vmx_seg
->ar
>> 14) & 1) << DESC_B_SHIFT
) |
72 (((vmx_seg
->ar
>> 15) & 1) << DESC_G_SHIFT
);
75 void hvf_put_xsave(CPUState
*cpu_state
)
78 struct X86XSaveArea
*xsave
;
80 xsave
= X86_CPU(cpu_state
)->env
.xsave_buf
;
82 x86_cpu_xsave_all_areas(X86_CPU(cpu_state
), xsave
);
84 if (hv_vcpu_write_fpstate(cpu_state
->hvf_fd
, (void*)xsave
, 4096)) {
89 void hvf_put_segments(CPUState
*cpu_state
)
91 CPUX86State
*env
= &X86_CPU(cpu_state
)->env
;
92 struct vmx_segment seg
;
94 wvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_IDTR_LIMIT
, env
->idt
.limit
);
95 wvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_IDTR_BASE
, env
->idt
.base
);
97 wvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_GDTR_LIMIT
, env
->gdt
.limit
);
98 wvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_GDTR_BASE
, env
->gdt
.base
);
100 /* wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR2, env->cr[2]); */
101 wvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_CR3
, env
->cr
[3]);
102 vmx_update_tpr(cpu_state
);
103 wvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_IA32_EFER
, env
->efer
);
105 macvm_set_cr4(cpu_state
->hvf_fd
, env
->cr
[4]);
106 macvm_set_cr0(cpu_state
->hvf_fd
, env
->cr
[0]);
108 hvf_set_segment(cpu_state
, &seg
, &env
->segs
[R_CS
], false);
109 vmx_write_segment_descriptor(cpu_state
, &seg
, R_CS
);
111 hvf_set_segment(cpu_state
, &seg
, &env
->segs
[R_DS
], false);
112 vmx_write_segment_descriptor(cpu_state
, &seg
, R_DS
);
114 hvf_set_segment(cpu_state
, &seg
, &env
->segs
[R_ES
], false);
115 vmx_write_segment_descriptor(cpu_state
, &seg
, R_ES
);
117 hvf_set_segment(cpu_state
, &seg
, &env
->segs
[R_SS
], false);
118 vmx_write_segment_descriptor(cpu_state
, &seg
, R_SS
);
120 hvf_set_segment(cpu_state
, &seg
, &env
->segs
[R_FS
], false);
121 vmx_write_segment_descriptor(cpu_state
, &seg
, R_FS
);
123 hvf_set_segment(cpu_state
, &seg
, &env
->segs
[R_GS
], false);
124 vmx_write_segment_descriptor(cpu_state
, &seg
, R_GS
);
126 hvf_set_segment(cpu_state
, &seg
, &env
->tr
, true);
127 vmx_write_segment_descriptor(cpu_state
, &seg
, R_TR
);
129 hvf_set_segment(cpu_state
, &seg
, &env
->ldt
, false);
130 vmx_write_segment_descriptor(cpu_state
, &seg
, R_LDTR
);
132 hv_vcpu_flush(cpu_state
->hvf_fd
);
135 void hvf_put_msrs(CPUState
*cpu_state
)
137 CPUX86State
*env
= &X86_CPU(cpu_state
)->env
;
139 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_IA32_SYSENTER_CS
,
141 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_IA32_SYSENTER_ESP
,
143 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_IA32_SYSENTER_EIP
,
146 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_STAR
, env
->star
);
149 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_CSTAR
, env
->cstar
);
150 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_KERNELGSBASE
, env
->kernelgsbase
);
151 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_FMASK
, env
->fmask
);
152 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_LSTAR
, env
->lstar
);
155 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_GSBASE
, env
->segs
[R_GS
].base
);
156 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_FSBASE
, env
->segs
[R_FS
].base
);
160 void hvf_get_xsave(CPUState
*cpu_state
)
162 struct X86XSaveArea
*xsave
;
164 xsave
= X86_CPU(cpu_state
)->env
.xsave_buf
;
166 if (hv_vcpu_read_fpstate(cpu_state
->hvf_fd
, (void*)xsave
, 4096)) {
170 x86_cpu_xrstor_all_areas(X86_CPU(cpu_state
), xsave
);
173 void hvf_get_segments(CPUState
*cpu_state
)
175 CPUX86State
*env
= &X86_CPU(cpu_state
)->env
;
177 struct vmx_segment seg
;
179 env
->interrupt_injected
= -1;
181 vmx_read_segment_descriptor(cpu_state
, &seg
, R_CS
);
182 hvf_get_segment(&env
->segs
[R_CS
], &seg
);
184 vmx_read_segment_descriptor(cpu_state
, &seg
, R_DS
);
185 hvf_get_segment(&env
->segs
[R_DS
], &seg
);
187 vmx_read_segment_descriptor(cpu_state
, &seg
, R_ES
);
188 hvf_get_segment(&env
->segs
[R_ES
], &seg
);
190 vmx_read_segment_descriptor(cpu_state
, &seg
, R_FS
);
191 hvf_get_segment(&env
->segs
[R_FS
], &seg
);
193 vmx_read_segment_descriptor(cpu_state
, &seg
, R_GS
);
194 hvf_get_segment(&env
->segs
[R_GS
], &seg
);
196 vmx_read_segment_descriptor(cpu_state
, &seg
, R_SS
);
197 hvf_get_segment(&env
->segs
[R_SS
], &seg
);
199 vmx_read_segment_descriptor(cpu_state
, &seg
, R_TR
);
200 hvf_get_segment(&env
->tr
, &seg
);
202 vmx_read_segment_descriptor(cpu_state
, &seg
, R_LDTR
);
203 hvf_get_segment(&env
->ldt
, &seg
);
205 env
->idt
.limit
= rvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_IDTR_LIMIT
);
206 env
->idt
.base
= rvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_IDTR_BASE
);
207 env
->gdt
.limit
= rvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_GDTR_LIMIT
);
208 env
->gdt
.base
= rvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_GDTR_BASE
);
210 env
->cr
[0] = rvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_CR0
);
212 env
->cr
[3] = rvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_CR3
);
213 env
->cr
[4] = rvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_CR4
);
215 env
->efer
= rvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_IA32_EFER
);
218 void hvf_get_msrs(CPUState
*cpu_state
)
220 CPUX86State
*env
= &X86_CPU(cpu_state
)->env
;
223 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_IA32_SYSENTER_CS
, &tmp
);
224 env
->sysenter_cs
= tmp
;
226 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_IA32_SYSENTER_ESP
, &tmp
);
227 env
->sysenter_esp
= tmp
;
229 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_IA32_SYSENTER_EIP
, &tmp
);
230 env
->sysenter_eip
= tmp
;
232 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_STAR
, &env
->star
);
235 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_CSTAR
, &env
->cstar
);
236 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_KERNELGSBASE
, &env
->kernelgsbase
);
237 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_FMASK
, &env
->fmask
);
238 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_LSTAR
, &env
->lstar
);
241 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_IA32_APICBASE
, &tmp
);
243 env
->tsc
= rdtscp() + rvmcs(cpu_state
->hvf_fd
, VMCS_TSC_OFFSET
);
246 int hvf_put_registers(CPUState
*cpu_state
)
248 X86CPU
*x86cpu
= X86_CPU(cpu_state
);
249 CPUX86State
*env
= &x86cpu
->env
;
251 wreg(cpu_state
->hvf_fd
, HV_X86_RAX
, env
->regs
[R_EAX
]);
252 wreg(cpu_state
->hvf_fd
, HV_X86_RBX
, env
->regs
[R_EBX
]);
253 wreg(cpu_state
->hvf_fd
, HV_X86_RCX
, env
->regs
[R_ECX
]);
254 wreg(cpu_state
->hvf_fd
, HV_X86_RDX
, env
->regs
[R_EDX
]);
255 wreg(cpu_state
->hvf_fd
, HV_X86_RBP
, env
->regs
[R_EBP
]);
256 wreg(cpu_state
->hvf_fd
, HV_X86_RSP
, env
->regs
[R_ESP
]);
257 wreg(cpu_state
->hvf_fd
, HV_X86_RSI
, env
->regs
[R_ESI
]);
258 wreg(cpu_state
->hvf_fd
, HV_X86_RDI
, env
->regs
[R_EDI
]);
259 wreg(cpu_state
->hvf_fd
, HV_X86_R8
, env
->regs
[8]);
260 wreg(cpu_state
->hvf_fd
, HV_X86_R9
, env
->regs
[9]);
261 wreg(cpu_state
->hvf_fd
, HV_X86_R10
, env
->regs
[10]);
262 wreg(cpu_state
->hvf_fd
, HV_X86_R11
, env
->regs
[11]);
263 wreg(cpu_state
->hvf_fd
, HV_X86_R12
, env
->regs
[12]);
264 wreg(cpu_state
->hvf_fd
, HV_X86_R13
, env
->regs
[13]);
265 wreg(cpu_state
->hvf_fd
, HV_X86_R14
, env
->regs
[14]);
266 wreg(cpu_state
->hvf_fd
, HV_X86_R15
, env
->regs
[15]);
267 wreg(cpu_state
->hvf_fd
, HV_X86_RFLAGS
, env
->eflags
);
268 wreg(cpu_state
->hvf_fd
, HV_X86_RIP
, env
->eip
);
270 wreg(cpu_state
->hvf_fd
, HV_X86_XCR0
, env
->xcr0
);
272 hvf_put_xsave(cpu_state
);
274 hvf_put_segments(cpu_state
);
276 hvf_put_msrs(cpu_state
);
278 wreg(cpu_state
->hvf_fd
, HV_X86_DR0
, env
->dr
[0]);
279 wreg(cpu_state
->hvf_fd
, HV_X86_DR1
, env
->dr
[1]);
280 wreg(cpu_state
->hvf_fd
, HV_X86_DR2
, env
->dr
[2]);
281 wreg(cpu_state
->hvf_fd
, HV_X86_DR3
, env
->dr
[3]);
282 wreg(cpu_state
->hvf_fd
, HV_X86_DR4
, env
->dr
[4]);
283 wreg(cpu_state
->hvf_fd
, HV_X86_DR5
, env
->dr
[5]);
284 wreg(cpu_state
->hvf_fd
, HV_X86_DR6
, env
->dr
[6]);
285 wreg(cpu_state
->hvf_fd
, HV_X86_DR7
, env
->dr
[7]);
290 int hvf_get_registers(CPUState
*cpu_state
)
292 X86CPU
*x86cpu
= X86_CPU(cpu_state
);
293 CPUX86State
*env
= &x86cpu
->env
;
295 env
->regs
[R_EAX
] = rreg(cpu_state
->hvf_fd
, HV_X86_RAX
);
296 env
->regs
[R_EBX
] = rreg(cpu_state
->hvf_fd
, HV_X86_RBX
);
297 env
->regs
[R_ECX
] = rreg(cpu_state
->hvf_fd
, HV_X86_RCX
);
298 env
->regs
[R_EDX
] = rreg(cpu_state
->hvf_fd
, HV_X86_RDX
);
299 env
->regs
[R_EBP
] = rreg(cpu_state
->hvf_fd
, HV_X86_RBP
);
300 env
->regs
[R_ESP
] = rreg(cpu_state
->hvf_fd
, HV_X86_RSP
);
301 env
->regs
[R_ESI
] = rreg(cpu_state
->hvf_fd
, HV_X86_RSI
);
302 env
->regs
[R_EDI
] = rreg(cpu_state
->hvf_fd
, HV_X86_RDI
);
303 env
->regs
[8] = rreg(cpu_state
->hvf_fd
, HV_X86_R8
);
304 env
->regs
[9] = rreg(cpu_state
->hvf_fd
, HV_X86_R9
);
305 env
->regs
[10] = rreg(cpu_state
->hvf_fd
, HV_X86_R10
);
306 env
->regs
[11] = rreg(cpu_state
->hvf_fd
, HV_X86_R11
);
307 env
->regs
[12] = rreg(cpu_state
->hvf_fd
, HV_X86_R12
);
308 env
->regs
[13] = rreg(cpu_state
->hvf_fd
, HV_X86_R13
);
309 env
->regs
[14] = rreg(cpu_state
->hvf_fd
, HV_X86_R14
);
310 env
->regs
[15] = rreg(cpu_state
->hvf_fd
, HV_X86_R15
);
312 env
->eflags
= rreg(cpu_state
->hvf_fd
, HV_X86_RFLAGS
);
313 env
->eip
= rreg(cpu_state
->hvf_fd
, HV_X86_RIP
);
315 hvf_get_xsave(cpu_state
);
316 env
->xcr0
= rreg(cpu_state
->hvf_fd
, HV_X86_XCR0
);
318 hvf_get_segments(cpu_state
);
319 hvf_get_msrs(cpu_state
);
321 env
->dr
[0] = rreg(cpu_state
->hvf_fd
, HV_X86_DR0
);
322 env
->dr
[1] = rreg(cpu_state
->hvf_fd
, HV_X86_DR1
);
323 env
->dr
[2] = rreg(cpu_state
->hvf_fd
, HV_X86_DR2
);
324 env
->dr
[3] = rreg(cpu_state
->hvf_fd
, HV_X86_DR3
);
325 env
->dr
[4] = rreg(cpu_state
->hvf_fd
, HV_X86_DR4
);
326 env
->dr
[5] = rreg(cpu_state
->hvf_fd
, HV_X86_DR5
);
327 env
->dr
[6] = rreg(cpu_state
->hvf_fd
, HV_X86_DR6
);
328 env
->dr
[7] = rreg(cpu_state
->hvf_fd
, HV_X86_DR7
);
330 x86_update_hflags(env
);
334 static void vmx_set_int_window_exiting(CPUState
*cpu
)
337 val
= rvmcs(cpu
->hvf_fd
, VMCS_PRI_PROC_BASED_CTLS
);
338 wvmcs(cpu
->hvf_fd
, VMCS_PRI_PROC_BASED_CTLS
, val
|
339 VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING
);
342 void vmx_clear_int_window_exiting(CPUState
*cpu
)
345 val
= rvmcs(cpu
->hvf_fd
, VMCS_PRI_PROC_BASED_CTLS
);
346 wvmcs(cpu
->hvf_fd
, VMCS_PRI_PROC_BASED_CTLS
, val
&
347 ~VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING
);
350 bool hvf_inject_interrupts(CPUState
*cpu_state
)
352 X86CPU
*x86cpu
= X86_CPU(cpu_state
);
353 CPUX86State
*env
= &x86cpu
->env
;
357 bool have_event
= true;
358 if (env
->interrupt_injected
!= -1) {
359 vector
= env
->interrupt_injected
;
361 intr_type
= VMCS_INTR_T_SWINTR
;
363 intr_type
= VMCS_INTR_T_HWINTR
;
365 } else if (env
->exception_nr
!= -1) {
366 vector
= env
->exception_nr
;
367 if (vector
== EXCP03_INT3
|| vector
== EXCP04_INTO
) {
368 intr_type
= VMCS_INTR_T_SWEXCEPTION
;
370 intr_type
= VMCS_INTR_T_HWEXCEPTION
;
372 } else if (env
->nmi_injected
) {
374 intr_type
= VMCS_INTR_T_NMI
;
381 info
= vector
| intr_type
| VMCS_INTR_VALID
;
382 uint64_t reason
= rvmcs(cpu_state
->hvf_fd
, VMCS_EXIT_REASON
);
383 if (env
->nmi_injected
&& reason
!= EXIT_REASON_TASK_SWITCH
) {
384 vmx_clear_nmi_blocking(cpu_state
);
387 if (!(env
->hflags2
& HF2_NMI_MASK
) || intr_type
!= VMCS_INTR_T_NMI
) {
388 info
&= ~(1 << 12); /* clear undefined bit */
389 if (intr_type
== VMCS_INTR_T_SWINTR
||
390 intr_type
== VMCS_INTR_T_SWEXCEPTION
) {
391 wvmcs(cpu_state
->hvf_fd
, VMCS_ENTRY_INST_LENGTH
, env
->ins_len
);
394 if (env
->has_error_code
) {
395 wvmcs(cpu_state
->hvf_fd
, VMCS_ENTRY_EXCEPTION_ERROR
,
397 /* Indicate that VMCS_ENTRY_EXCEPTION_ERROR is valid */
398 info
|= VMCS_INTR_DEL_ERRCODE
;
400 /*printf("reinject %lx err %d\n", info, err);*/
401 wvmcs(cpu_state
->hvf_fd
, VMCS_ENTRY_INTR_INFO
, info
);
405 if (cpu_state
->interrupt_request
& CPU_INTERRUPT_NMI
) {
406 if (!(env
->hflags2
& HF2_NMI_MASK
) && !(info
& VMCS_INTR_VALID
)) {
407 cpu_state
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
408 info
= VMCS_INTR_VALID
| VMCS_INTR_T_NMI
| EXCP02_NMI
;
409 wvmcs(cpu_state
->hvf_fd
, VMCS_ENTRY_INTR_INFO
, info
);
411 vmx_set_nmi_window_exiting(cpu_state
);
415 if (!(env
->hflags
& HF_INHIBIT_IRQ_MASK
) &&
416 (cpu_state
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
417 (env
->eflags
& IF_MASK
) && !(info
& VMCS_INTR_VALID
)) {
418 int line
= cpu_get_pic_interrupt(&x86cpu
->env
);
419 cpu_state
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
421 wvmcs(cpu_state
->hvf_fd
, VMCS_ENTRY_INTR_INFO
, line
|
422 VMCS_INTR_VALID
| VMCS_INTR_T_HWINTR
);
425 if (cpu_state
->interrupt_request
& CPU_INTERRUPT_HARD
) {
426 vmx_set_int_window_exiting(cpu_state
);
428 return (cpu_state
->interrupt_request
429 & (CPU_INTERRUPT_INIT
| CPU_INTERRUPT_TPR
));
432 int hvf_process_events(CPUState
*cpu_state
)
434 X86CPU
*cpu
= X86_CPU(cpu_state
);
435 CPUX86State
*env
= &cpu
->env
;
437 env
->eflags
= rreg(cpu_state
->hvf_fd
, HV_X86_RFLAGS
);
439 if (cpu_state
->interrupt_request
& CPU_INTERRUPT_INIT
) {
440 hvf_cpu_synchronize_state(cpu_state
);
444 if (cpu_state
->interrupt_request
& CPU_INTERRUPT_POLL
) {
445 cpu_state
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
446 apic_poll_irq(cpu
->apic_state
);
448 if (((cpu_state
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
449 (env
->eflags
& IF_MASK
)) ||
450 (cpu_state
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
451 cpu_state
->halted
= 0;
453 if (cpu_state
->interrupt_request
& CPU_INTERRUPT_SIPI
) {
454 hvf_cpu_synchronize_state(cpu_state
);
457 if (cpu_state
->interrupt_request
& CPU_INTERRUPT_TPR
) {
458 cpu_state
->interrupt_request
&= ~CPU_INTERRUPT_TPR
;
459 hvf_cpu_synchronize_state(cpu_state
);
460 apic_handle_tpr_access_report(cpu
->apic_state
, env
->eip
,
461 env
->tpr_access_type
);
463 return cpu_state
->halted
;