2 * Copyright (c) 2003-2008 Fabrice Bellard
3 * Copyright (C) 2016 Veertu Inc,
4 * Copyright (C) 2017 Google Inc,
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "qemu-common.h"
27 #include "x86_descr.h"
28 #include "x86_decode.h"
30 #include "hw/i386/apic_internal.h"
32 #include <Hypervisor/hv.h>
33 #include <Hypervisor/hv_vmx.h>
35 void hvf_set_segment(struct CPUState
*cpu
, struct vmx_segment
*vmx_seg
,
36 SegmentCache
*qseg
, bool is_tr
)
38 vmx_seg
->sel
= qseg
->selector
;
39 vmx_seg
->base
= qseg
->base
;
40 vmx_seg
->limit
= qseg
->limit
;
42 if (!qseg
->selector
&& !x86_is_real(cpu
) && !is_tr
) {
43 /* the TR register is usable after processor reset despite
44 * having a null selector */
45 vmx_seg
->ar
= 1 << 16;
48 vmx_seg
->ar
= (qseg
->flags
>> DESC_TYPE_SHIFT
) & 0xf;
49 vmx_seg
->ar
|= ((qseg
->flags
>> DESC_G_SHIFT
) & 1) << 15;
50 vmx_seg
->ar
|= ((qseg
->flags
>> DESC_B_SHIFT
) & 1) << 14;
51 vmx_seg
->ar
|= ((qseg
->flags
>> DESC_L_SHIFT
) & 1) << 13;
52 vmx_seg
->ar
|= ((qseg
->flags
>> DESC_AVL_SHIFT
) & 1) << 12;
53 vmx_seg
->ar
|= ((qseg
->flags
>> DESC_P_SHIFT
) & 1) << 7;
54 vmx_seg
->ar
|= ((qseg
->flags
>> DESC_DPL_SHIFT
) & 3) << 5;
55 vmx_seg
->ar
|= ((qseg
->flags
>> DESC_S_SHIFT
) & 1) << 4;
58 void hvf_get_segment(SegmentCache
*qseg
, struct vmx_segment
*vmx_seg
)
60 qseg
->limit
= vmx_seg
->limit
;
61 qseg
->base
= vmx_seg
->base
;
62 qseg
->selector
= vmx_seg
->sel
;
63 qseg
->flags
= ((vmx_seg
->ar
& 0xf) << DESC_TYPE_SHIFT
) |
64 (((vmx_seg
->ar
>> 4) & 1) << DESC_S_SHIFT
) |
65 (((vmx_seg
->ar
>> 5) & 3) << DESC_DPL_SHIFT
) |
66 (((vmx_seg
->ar
>> 7) & 1) << DESC_P_SHIFT
) |
67 (((vmx_seg
->ar
>> 12) & 1) << DESC_AVL_SHIFT
) |
68 (((vmx_seg
->ar
>> 13) & 1) << DESC_L_SHIFT
) |
69 (((vmx_seg
->ar
>> 14) & 1) << DESC_B_SHIFT
) |
70 (((vmx_seg
->ar
>> 15) & 1) << DESC_G_SHIFT
);
73 void hvf_put_xsave(CPUState
*cpu_state
)
76 struct X86XSaveArea
*xsave
;
78 xsave
= X86_CPU(cpu_state
)->env
.xsave_buf
;
80 x86_cpu_xsave_all_areas(X86_CPU(cpu_state
), xsave
);
82 if (hv_vcpu_write_fpstate(cpu_state
->hvf_fd
, (void*)xsave
, 4096)) {
87 void hvf_put_segments(CPUState
*cpu_state
)
89 CPUX86State
*env
= &X86_CPU(cpu_state
)->env
;
90 struct vmx_segment seg
;
92 wvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_IDTR_LIMIT
, env
->idt
.limit
);
93 wvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_IDTR_BASE
, env
->idt
.base
);
95 wvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_GDTR_LIMIT
, env
->gdt
.limit
);
96 wvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_GDTR_BASE
, env
->gdt
.base
);
98 /* wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR2, env->cr[2]); */
99 wvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_CR3
, env
->cr
[3]);
100 vmx_update_tpr(cpu_state
);
101 wvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_IA32_EFER
, env
->efer
);
103 macvm_set_cr4(cpu_state
->hvf_fd
, env
->cr
[4]);
104 macvm_set_cr0(cpu_state
->hvf_fd
, env
->cr
[0]);
106 hvf_set_segment(cpu_state
, &seg
, &env
->segs
[R_CS
], false);
107 vmx_write_segment_descriptor(cpu_state
, &seg
, R_CS
);
109 hvf_set_segment(cpu_state
, &seg
, &env
->segs
[R_DS
], false);
110 vmx_write_segment_descriptor(cpu_state
, &seg
, R_DS
);
112 hvf_set_segment(cpu_state
, &seg
, &env
->segs
[R_ES
], false);
113 vmx_write_segment_descriptor(cpu_state
, &seg
, R_ES
);
115 hvf_set_segment(cpu_state
, &seg
, &env
->segs
[R_SS
], false);
116 vmx_write_segment_descriptor(cpu_state
, &seg
, R_SS
);
118 hvf_set_segment(cpu_state
, &seg
, &env
->segs
[R_FS
], false);
119 vmx_write_segment_descriptor(cpu_state
, &seg
, R_FS
);
121 hvf_set_segment(cpu_state
, &seg
, &env
->segs
[R_GS
], false);
122 vmx_write_segment_descriptor(cpu_state
, &seg
, R_GS
);
124 hvf_set_segment(cpu_state
, &seg
, &env
->tr
, true);
125 vmx_write_segment_descriptor(cpu_state
, &seg
, R_TR
);
127 hvf_set_segment(cpu_state
, &seg
, &env
->ldt
, false);
128 vmx_write_segment_descriptor(cpu_state
, &seg
, R_LDTR
);
130 hv_vcpu_flush(cpu_state
->hvf_fd
);
133 void hvf_put_msrs(CPUState
*cpu_state
)
135 CPUX86State
*env
= &X86_CPU(cpu_state
)->env
;
137 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_IA32_SYSENTER_CS
,
139 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_IA32_SYSENTER_ESP
,
141 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_IA32_SYSENTER_EIP
,
144 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_STAR
, env
->star
);
147 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_CSTAR
, env
->cstar
);
148 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_KERNELGSBASE
, env
->kernelgsbase
);
149 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_FMASK
, env
->fmask
);
150 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_LSTAR
, env
->lstar
);
153 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_GSBASE
, env
->segs
[R_GS
].base
);
154 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_FSBASE
, env
->segs
[R_FS
].base
);
158 void hvf_get_xsave(CPUState
*cpu_state
)
160 struct X86XSaveArea
*xsave
;
162 xsave
= X86_CPU(cpu_state
)->env
.xsave_buf
;
164 if (hv_vcpu_read_fpstate(cpu_state
->hvf_fd
, (void*)xsave
, 4096)) {
168 x86_cpu_xrstor_all_areas(X86_CPU(cpu_state
), xsave
);
171 void hvf_get_segments(CPUState
*cpu_state
)
173 CPUX86State
*env
= &X86_CPU(cpu_state
)->env
;
175 struct vmx_segment seg
;
177 env
->interrupt_injected
= -1;
179 vmx_read_segment_descriptor(cpu_state
, &seg
, R_CS
);
180 hvf_get_segment(&env
->segs
[R_CS
], &seg
);
182 vmx_read_segment_descriptor(cpu_state
, &seg
, R_DS
);
183 hvf_get_segment(&env
->segs
[R_DS
], &seg
);
185 vmx_read_segment_descriptor(cpu_state
, &seg
, R_ES
);
186 hvf_get_segment(&env
->segs
[R_ES
], &seg
);
188 vmx_read_segment_descriptor(cpu_state
, &seg
, R_FS
);
189 hvf_get_segment(&env
->segs
[R_FS
], &seg
);
191 vmx_read_segment_descriptor(cpu_state
, &seg
, R_GS
);
192 hvf_get_segment(&env
->segs
[R_GS
], &seg
);
194 vmx_read_segment_descriptor(cpu_state
, &seg
, R_SS
);
195 hvf_get_segment(&env
->segs
[R_SS
], &seg
);
197 vmx_read_segment_descriptor(cpu_state
, &seg
, R_TR
);
198 hvf_get_segment(&env
->tr
, &seg
);
200 vmx_read_segment_descriptor(cpu_state
, &seg
, R_LDTR
);
201 hvf_get_segment(&env
->ldt
, &seg
);
203 env
->idt
.limit
= rvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_IDTR_LIMIT
);
204 env
->idt
.base
= rvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_IDTR_BASE
);
205 env
->gdt
.limit
= rvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_GDTR_LIMIT
);
206 env
->gdt
.base
= rvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_GDTR_BASE
);
208 env
->cr
[0] = rvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_CR0
);
210 env
->cr
[3] = rvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_CR3
);
211 env
->cr
[4] = rvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_CR4
);
213 env
->efer
= rvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_IA32_EFER
);
216 void hvf_get_msrs(CPUState
*cpu_state
)
218 CPUX86State
*env
= &X86_CPU(cpu_state
)->env
;
221 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_IA32_SYSENTER_CS
, &tmp
);
222 env
->sysenter_cs
= tmp
;
224 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_IA32_SYSENTER_ESP
, &tmp
);
225 env
->sysenter_esp
= tmp
;
227 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_IA32_SYSENTER_EIP
, &tmp
);
228 env
->sysenter_eip
= tmp
;
230 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_STAR
, &env
->star
);
233 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_CSTAR
, &env
->cstar
);
234 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_KERNELGSBASE
, &env
->kernelgsbase
);
235 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_FMASK
, &env
->fmask
);
236 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_LSTAR
, &env
->lstar
);
239 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_IA32_APICBASE
, &tmp
);
241 env
->tsc
= rdtscp() + rvmcs(cpu_state
->hvf_fd
, VMCS_TSC_OFFSET
);
244 int hvf_put_registers(CPUState
*cpu_state
)
246 X86CPU
*x86cpu
= X86_CPU(cpu_state
);
247 CPUX86State
*env
= &x86cpu
->env
;
249 wreg(cpu_state
->hvf_fd
, HV_X86_RAX
, env
->regs
[R_EAX
]);
250 wreg(cpu_state
->hvf_fd
, HV_X86_RBX
, env
->regs
[R_EBX
]);
251 wreg(cpu_state
->hvf_fd
, HV_X86_RCX
, env
->regs
[R_ECX
]);
252 wreg(cpu_state
->hvf_fd
, HV_X86_RDX
, env
->regs
[R_EDX
]);
253 wreg(cpu_state
->hvf_fd
, HV_X86_RBP
, env
->regs
[R_EBP
]);
254 wreg(cpu_state
->hvf_fd
, HV_X86_RSP
, env
->regs
[R_ESP
]);
255 wreg(cpu_state
->hvf_fd
, HV_X86_RSI
, env
->regs
[R_ESI
]);
256 wreg(cpu_state
->hvf_fd
, HV_X86_RDI
, env
->regs
[R_EDI
]);
257 wreg(cpu_state
->hvf_fd
, HV_X86_R8
, env
->regs
[8]);
258 wreg(cpu_state
->hvf_fd
, HV_X86_R9
, env
->regs
[9]);
259 wreg(cpu_state
->hvf_fd
, HV_X86_R10
, env
->regs
[10]);
260 wreg(cpu_state
->hvf_fd
, HV_X86_R11
, env
->regs
[11]);
261 wreg(cpu_state
->hvf_fd
, HV_X86_R12
, env
->regs
[12]);
262 wreg(cpu_state
->hvf_fd
, HV_X86_R13
, env
->regs
[13]);
263 wreg(cpu_state
->hvf_fd
, HV_X86_R14
, env
->regs
[14]);
264 wreg(cpu_state
->hvf_fd
, HV_X86_R15
, env
->regs
[15]);
265 wreg(cpu_state
->hvf_fd
, HV_X86_RFLAGS
, env
->eflags
);
266 wreg(cpu_state
->hvf_fd
, HV_X86_RIP
, env
->eip
);
268 wreg(cpu_state
->hvf_fd
, HV_X86_XCR0
, env
->xcr0
);
270 hvf_put_xsave(cpu_state
);
272 hvf_put_segments(cpu_state
);
274 hvf_put_msrs(cpu_state
);
276 wreg(cpu_state
->hvf_fd
, HV_X86_DR0
, env
->dr
[0]);
277 wreg(cpu_state
->hvf_fd
, HV_X86_DR1
, env
->dr
[1]);
278 wreg(cpu_state
->hvf_fd
, HV_X86_DR2
, env
->dr
[2]);
279 wreg(cpu_state
->hvf_fd
, HV_X86_DR3
, env
->dr
[3]);
280 wreg(cpu_state
->hvf_fd
, HV_X86_DR4
, env
->dr
[4]);
281 wreg(cpu_state
->hvf_fd
, HV_X86_DR5
, env
->dr
[5]);
282 wreg(cpu_state
->hvf_fd
, HV_X86_DR6
, env
->dr
[6]);
283 wreg(cpu_state
->hvf_fd
, HV_X86_DR7
, env
->dr
[7]);
288 int hvf_get_registers(CPUState
*cpu_state
)
290 X86CPU
*x86cpu
= X86_CPU(cpu_state
);
291 CPUX86State
*env
= &x86cpu
->env
;
293 env
->regs
[R_EAX
] = rreg(cpu_state
->hvf_fd
, HV_X86_RAX
);
294 env
->regs
[R_EBX
] = rreg(cpu_state
->hvf_fd
, HV_X86_RBX
);
295 env
->regs
[R_ECX
] = rreg(cpu_state
->hvf_fd
, HV_X86_RCX
);
296 env
->regs
[R_EDX
] = rreg(cpu_state
->hvf_fd
, HV_X86_RDX
);
297 env
->regs
[R_EBP
] = rreg(cpu_state
->hvf_fd
, HV_X86_RBP
);
298 env
->regs
[R_ESP
] = rreg(cpu_state
->hvf_fd
, HV_X86_RSP
);
299 env
->regs
[R_ESI
] = rreg(cpu_state
->hvf_fd
, HV_X86_RSI
);
300 env
->regs
[R_EDI
] = rreg(cpu_state
->hvf_fd
, HV_X86_RDI
);
301 env
->regs
[8] = rreg(cpu_state
->hvf_fd
, HV_X86_R8
);
302 env
->regs
[9] = rreg(cpu_state
->hvf_fd
, HV_X86_R9
);
303 env
->regs
[10] = rreg(cpu_state
->hvf_fd
, HV_X86_R10
);
304 env
->regs
[11] = rreg(cpu_state
->hvf_fd
, HV_X86_R11
);
305 env
->regs
[12] = rreg(cpu_state
->hvf_fd
, HV_X86_R12
);
306 env
->regs
[13] = rreg(cpu_state
->hvf_fd
, HV_X86_R13
);
307 env
->regs
[14] = rreg(cpu_state
->hvf_fd
, HV_X86_R14
);
308 env
->regs
[15] = rreg(cpu_state
->hvf_fd
, HV_X86_R15
);
310 env
->eflags
= rreg(cpu_state
->hvf_fd
, HV_X86_RFLAGS
);
311 env
->eip
= rreg(cpu_state
->hvf_fd
, HV_X86_RIP
);
313 hvf_get_xsave(cpu_state
);
314 env
->xcr0
= rreg(cpu_state
->hvf_fd
, HV_X86_XCR0
);
316 hvf_get_segments(cpu_state
);
317 hvf_get_msrs(cpu_state
);
319 env
->dr
[0] = rreg(cpu_state
->hvf_fd
, HV_X86_DR0
);
320 env
->dr
[1] = rreg(cpu_state
->hvf_fd
, HV_X86_DR1
);
321 env
->dr
[2] = rreg(cpu_state
->hvf_fd
, HV_X86_DR2
);
322 env
->dr
[3] = rreg(cpu_state
->hvf_fd
, HV_X86_DR3
);
323 env
->dr
[4] = rreg(cpu_state
->hvf_fd
, HV_X86_DR4
);
324 env
->dr
[5] = rreg(cpu_state
->hvf_fd
, HV_X86_DR5
);
325 env
->dr
[6] = rreg(cpu_state
->hvf_fd
, HV_X86_DR6
);
326 env
->dr
[7] = rreg(cpu_state
->hvf_fd
, HV_X86_DR7
);
328 x86_update_hflags(env
);
332 static void vmx_set_int_window_exiting(CPUState
*cpu
)
335 val
= rvmcs(cpu
->hvf_fd
, VMCS_PRI_PROC_BASED_CTLS
);
336 wvmcs(cpu
->hvf_fd
, VMCS_PRI_PROC_BASED_CTLS
, val
|
337 VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING
);
340 void vmx_clear_int_window_exiting(CPUState
*cpu
)
343 val
= rvmcs(cpu
->hvf_fd
, VMCS_PRI_PROC_BASED_CTLS
);
344 wvmcs(cpu
->hvf_fd
, VMCS_PRI_PROC_BASED_CTLS
, val
&
345 ~VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING
);
348 bool hvf_inject_interrupts(CPUState
*cpu_state
)
350 X86CPU
*x86cpu
= X86_CPU(cpu_state
);
351 CPUX86State
*env
= &x86cpu
->env
;
355 bool have_event
= true;
356 if (env
->interrupt_injected
!= -1) {
357 vector
= env
->interrupt_injected
;
359 intr_type
= VMCS_INTR_T_SWINTR
;
361 intr_type
= VMCS_INTR_T_HWINTR
;
363 } else if (env
->exception_nr
!= -1) {
364 vector
= env
->exception_nr
;
365 if (vector
== EXCP03_INT3
|| vector
== EXCP04_INTO
) {
366 intr_type
= VMCS_INTR_T_SWEXCEPTION
;
368 intr_type
= VMCS_INTR_T_HWEXCEPTION
;
370 } else if (env
->nmi_injected
) {
372 intr_type
= VMCS_INTR_T_NMI
;
379 info
= vector
| intr_type
| VMCS_INTR_VALID
;
380 uint64_t reason
= rvmcs(cpu_state
->hvf_fd
, VMCS_EXIT_REASON
);
381 if (env
->nmi_injected
&& reason
!= EXIT_REASON_TASK_SWITCH
) {
382 vmx_clear_nmi_blocking(cpu_state
);
385 if (!(env
->hflags2
& HF2_NMI_MASK
) || intr_type
!= VMCS_INTR_T_NMI
) {
386 info
&= ~(1 << 12); /* clear undefined bit */
387 if (intr_type
== VMCS_INTR_T_SWINTR
||
388 intr_type
== VMCS_INTR_T_SWEXCEPTION
) {
389 wvmcs(cpu_state
->hvf_fd
, VMCS_ENTRY_INST_LENGTH
, env
->ins_len
);
392 if (env
->has_error_code
) {
393 wvmcs(cpu_state
->hvf_fd
, VMCS_ENTRY_EXCEPTION_ERROR
,
395 /* Indicate that VMCS_ENTRY_EXCEPTION_ERROR is valid */
396 info
|= VMCS_INTR_DEL_ERRCODE
;
398 /*printf("reinject %lx err %d\n", info, err);*/
399 wvmcs(cpu_state
->hvf_fd
, VMCS_ENTRY_INTR_INFO
, info
);
403 if (cpu_state
->interrupt_request
& CPU_INTERRUPT_NMI
) {
404 if (!(env
->hflags2
& HF2_NMI_MASK
) && !(info
& VMCS_INTR_VALID
)) {
405 cpu_state
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
406 info
= VMCS_INTR_VALID
| VMCS_INTR_T_NMI
| EXCP02_NMI
;
407 wvmcs(cpu_state
->hvf_fd
, VMCS_ENTRY_INTR_INFO
, info
);
409 vmx_set_nmi_window_exiting(cpu_state
);
413 if (!(env
->hflags
& HF_INHIBIT_IRQ_MASK
) &&
414 (cpu_state
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
415 (EFLAGS(env
) & IF_MASK
) && !(info
& VMCS_INTR_VALID
)) {
416 int line
= cpu_get_pic_interrupt(&x86cpu
->env
);
417 cpu_state
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
419 wvmcs(cpu_state
->hvf_fd
, VMCS_ENTRY_INTR_INFO
, line
|
420 VMCS_INTR_VALID
| VMCS_INTR_T_HWINTR
);
423 if (cpu_state
->interrupt_request
& CPU_INTERRUPT_HARD
) {
424 vmx_set_int_window_exiting(cpu_state
);
426 return (cpu_state
->interrupt_request
427 & (CPU_INTERRUPT_INIT
| CPU_INTERRUPT_TPR
));
430 int hvf_process_events(CPUState
*cpu_state
)
432 X86CPU
*cpu
= X86_CPU(cpu_state
);
433 CPUX86State
*env
= &cpu
->env
;
435 EFLAGS(env
) = rreg(cpu_state
->hvf_fd
, HV_X86_RFLAGS
);
437 if (cpu_state
->interrupt_request
& CPU_INTERRUPT_INIT
) {
438 hvf_cpu_synchronize_state(cpu_state
);
442 if (cpu_state
->interrupt_request
& CPU_INTERRUPT_POLL
) {
443 cpu_state
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
444 apic_poll_irq(cpu
->apic_state
);
446 if (((cpu_state
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
447 (EFLAGS(env
) & IF_MASK
)) ||
448 (cpu_state
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
449 cpu_state
->halted
= 0;
451 if (cpu_state
->interrupt_request
& CPU_INTERRUPT_SIPI
) {
452 hvf_cpu_synchronize_state(cpu_state
);
455 if (cpu_state
->interrupt_request
& CPU_INTERRUPT_TPR
) {
456 cpu_state
->interrupt_request
&= ~CPU_INTERRUPT_TPR
;
457 hvf_cpu_synchronize_state(cpu_state
);
458 apic_handle_tpr_access_report(cpu
->apic_state
, env
->eip
,
459 env
->tpr_access_type
);
461 return cpu_state
->halted
;