2 * qemu/kvm integration, x86 specific code
4 * Copyright (C) 2006-2008 Qumranet Technologies
6 * Licensed under the terms of the GNU GPL version 2 or higher.
10 #include "config-host.h"
19 #include <sys/utsname.h>
20 #include <linux/kvm_para.h>
22 #define MSR_IA32_TSC 0x10
24 static struct kvm_msr_list
*kvm_msr_list
;
25 extern unsigned int kvm_shadow_memory
;
26 extern kvm_context_t kvm_context
;
27 static int kvm_has_msr_star
;
29 static int lm_capable_kernel
;
31 int kvm_qemu_create_memory_alias(uint64_t phys_start
,
35 return kvm_create_memory_alias(kvm_context
, phys_start
, len
, target_phys
);
38 int kvm_qemu_destroy_memory_alias(uint64_t phys_start
)
40 return kvm_destroy_memory_alias(kvm_context
, phys_start
);
43 int kvm_arch_qemu_create_context(void)
46 struct utsname utsname
;
49 lm_capable_kernel
= strcmp(utsname
.machine
, "x86_64") == 0;
51 if (kvm_shadow_memory
)
52 kvm_set_shadow_pages(kvm_context
, kvm_shadow_memory
);
54 kvm_msr_list
= kvm_get_msr_list(kvm_context
);
57 for (i
= 0; i
< kvm_msr_list
->nmsrs
; ++i
)
58 if (kvm_msr_list
->indices
[i
] == MSR_STAR
)
63 static void set_msr_entry(struct kvm_msr_entry
*entry
, uint32_t index
,
70 /* returns 0 on success, non-0 on failure */
71 static int get_msr_entry(struct kvm_msr_entry
*entry
, CPUState
*env
)
73 switch (entry
->index
) {
74 case MSR_IA32_SYSENTER_CS
:
75 env
->sysenter_cs
= entry
->data
;
77 case MSR_IA32_SYSENTER_ESP
:
78 env
->sysenter_esp
= entry
->data
;
80 case MSR_IA32_SYSENTER_EIP
:
81 env
->sysenter_eip
= entry
->data
;
84 env
->star
= entry
->data
;
88 env
->cstar
= entry
->data
;
90 case MSR_KERNELGSBASE
:
91 env
->kernelgsbase
= entry
->data
;
94 env
->fmask
= entry
->data
;
97 env
->lstar
= entry
->data
;
101 env
->tsc
= entry
->data
;
104 printf("Warning unknown msr index 0x%x\n", entry
->index
);
116 static void set_v8086_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
118 lhs
->selector
= rhs
->selector
;
119 lhs
->base
= rhs
->base
;
120 lhs
->limit
= rhs
->limit
;
132 static void set_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
134 unsigned flags
= rhs
->flags
;
135 lhs
->selector
= rhs
->selector
;
136 lhs
->base
= rhs
->base
;
137 lhs
->limit
= rhs
->limit
;
138 lhs
->type
= (flags
>> DESC_TYPE_SHIFT
) & 15;
139 lhs
->present
= (flags
& DESC_P_MASK
) != 0;
140 lhs
->dpl
= rhs
->selector
& 3;
141 lhs
->db
= (flags
>> DESC_B_SHIFT
) & 1;
142 lhs
->s
= (flags
& DESC_S_MASK
) != 0;
143 lhs
->l
= (flags
>> DESC_L_SHIFT
) & 1;
144 lhs
->g
= (flags
& DESC_G_MASK
) != 0;
145 lhs
->avl
= (flags
& DESC_AVL_MASK
) != 0;
149 static void get_seg(SegmentCache
*lhs
, const struct kvm_segment
*rhs
)
151 lhs
->selector
= rhs
->selector
;
152 lhs
->base
= rhs
->base
;
153 lhs
->limit
= rhs
->limit
;
155 (rhs
->type
<< DESC_TYPE_SHIFT
)
156 | (rhs
->present
* DESC_P_MASK
)
157 | (rhs
->dpl
<< DESC_DPL_SHIFT
)
158 | (rhs
->db
<< DESC_B_SHIFT
)
159 | (rhs
->s
* DESC_S_MASK
)
160 | (rhs
->l
<< DESC_L_SHIFT
)
161 | (rhs
->g
* DESC_G_MASK
)
162 | (rhs
->avl
* DESC_AVL_MASK
);
165 void kvm_arch_load_regs(CPUState
*env
)
167 struct kvm_regs regs
;
169 struct kvm_sregs sregs
;
170 struct kvm_msr_entry msrs
[MSR_COUNT
];
173 regs
.rax
= env
->regs
[R_EAX
];
174 regs
.rbx
= env
->regs
[R_EBX
];
175 regs
.rcx
= env
->regs
[R_ECX
];
176 regs
.rdx
= env
->regs
[R_EDX
];
177 regs
.rsi
= env
->regs
[R_ESI
];
178 regs
.rdi
= env
->regs
[R_EDI
];
179 regs
.rsp
= env
->regs
[R_ESP
];
180 regs
.rbp
= env
->regs
[R_EBP
];
182 regs
.r8
= env
->regs
[8];
183 regs
.r9
= env
->regs
[9];
184 regs
.r10
= env
->regs
[10];
185 regs
.r11
= env
->regs
[11];
186 regs
.r12
= env
->regs
[12];
187 regs
.r13
= env
->regs
[13];
188 regs
.r14
= env
->regs
[14];
189 regs
.r15
= env
->regs
[15];
192 regs
.rflags
= env
->eflags
;
195 kvm_set_regs(kvm_context
, env
->cpu_index
, ®s
);
197 memset(&fpu
, 0, sizeof fpu
);
198 fpu
.fsw
= env
->fpus
& ~(7 << 11);
199 fpu
.fsw
|= (env
->fpstt
& 7) << 11;
201 for (i
= 0; i
< 8; ++i
)
202 fpu
.ftwx
|= (!env
->fptags
[i
]) << i
;
203 memcpy(fpu
.fpr
, env
->fpregs
, sizeof env
->fpregs
);
204 memcpy(fpu
.xmm
, env
->xmm_regs
, sizeof env
->xmm_regs
);
205 fpu
.mxcsr
= env
->mxcsr
;
206 kvm_set_fpu(kvm_context
, env
->cpu_index
, &fpu
);
208 memcpy(sregs
.interrupt_bitmap
, env
->interrupt_bitmap
, sizeof(sregs
.interrupt_bitmap
));
210 if ((env
->eflags
& VM_MASK
)) {
211 set_v8086_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
212 set_v8086_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
213 set_v8086_seg(&sregs
.es
, &env
->segs
[R_ES
]);
214 set_v8086_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
215 set_v8086_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
216 set_v8086_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
218 set_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
219 set_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
220 set_seg(&sregs
.es
, &env
->segs
[R_ES
]);
221 set_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
222 set_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
223 set_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
225 if (env
->cr
[0] & CR0_PE_MASK
) {
226 /* force ss cpl to cs cpl */
227 sregs
.ss
.selector
= (sregs
.ss
.selector
& ~3) |
228 (sregs
.cs
.selector
& 3);
229 sregs
.ss
.dpl
= sregs
.ss
.selector
& 3;
233 set_seg(&sregs
.tr
, &env
->tr
);
234 set_seg(&sregs
.ldt
, &env
->ldt
);
236 sregs
.idt
.limit
= env
->idt
.limit
;
237 sregs
.idt
.base
= env
->idt
.base
;
238 sregs
.gdt
.limit
= env
->gdt
.limit
;
239 sregs
.gdt
.base
= env
->gdt
.base
;
241 sregs
.cr0
= env
->cr
[0];
242 sregs
.cr2
= env
->cr
[2];
243 sregs
.cr3
= env
->cr
[3];
244 sregs
.cr4
= env
->cr
[4];
246 sregs
.cr8
= cpu_get_apic_tpr(env
);
247 sregs
.apic_base
= cpu_get_apic_base(env
);
249 sregs
.efer
= env
->efer
;
251 kvm_set_sregs(kvm_context
, env
->cpu_index
, &sregs
);
255 set_msr_entry(&msrs
[n
++], MSR_IA32_SYSENTER_CS
, env
->sysenter_cs
);
256 set_msr_entry(&msrs
[n
++], MSR_IA32_SYSENTER_ESP
, env
->sysenter_esp
);
257 set_msr_entry(&msrs
[n
++], MSR_IA32_SYSENTER_EIP
, env
->sysenter_eip
);
258 if (kvm_has_msr_star
)
259 set_msr_entry(&msrs
[n
++], MSR_STAR
, env
->star
);
260 set_msr_entry(&msrs
[n
++], MSR_IA32_TSC
, env
->tsc
);
262 if (lm_capable_kernel
) {
263 set_msr_entry(&msrs
[n
++], MSR_CSTAR
, env
->cstar
);
264 set_msr_entry(&msrs
[n
++], MSR_KERNELGSBASE
, env
->kernelgsbase
);
265 set_msr_entry(&msrs
[n
++], MSR_FMASK
, env
->fmask
);
266 set_msr_entry(&msrs
[n
++], MSR_LSTAR
, env
->lstar
);
270 rc
= kvm_set_msrs(kvm_context
, env
->cpu_index
, msrs
, n
);
272 perror("kvm_set_msrs FAILED");
275 void kvm_save_mpstate(CPUState
*env
)
277 #ifdef KVM_CAP_MP_STATE
279 struct kvm_mp_state mp_state
;
281 r
= kvm_get_mpstate(kvm_context
, env
->cpu_index
, &mp_state
);
285 env
->mp_state
= mp_state
.mp_state
;
289 void kvm_load_mpstate(CPUState
*env
)
291 #ifdef KVM_CAP_MP_STATE
292 struct kvm_mp_state mp_state
= { .mp_state
= env
->mp_state
};
295 * -1 indicates that the host did not support GET_MP_STATE ioctl,
298 if (env
->mp_state
!= -1)
299 kvm_set_mpstate(kvm_context
, env
->cpu_index
, &mp_state
);
303 void kvm_arch_save_regs(CPUState
*env
)
305 struct kvm_regs regs
;
307 struct kvm_sregs sregs
;
308 struct kvm_msr_entry msrs
[MSR_COUNT
];
312 kvm_get_regs(kvm_context
, env
->cpu_index
, ®s
);
314 env
->regs
[R_EAX
] = regs
.rax
;
315 env
->regs
[R_EBX
] = regs
.rbx
;
316 env
->regs
[R_ECX
] = regs
.rcx
;
317 env
->regs
[R_EDX
] = regs
.rdx
;
318 env
->regs
[R_ESI
] = regs
.rsi
;
319 env
->regs
[R_EDI
] = regs
.rdi
;
320 env
->regs
[R_ESP
] = regs
.rsp
;
321 env
->regs
[R_EBP
] = regs
.rbp
;
323 env
->regs
[8] = regs
.r8
;
324 env
->regs
[9] = regs
.r9
;
325 env
->regs
[10] = regs
.r10
;
326 env
->regs
[11] = regs
.r11
;
327 env
->regs
[12] = regs
.r12
;
328 env
->regs
[13] = regs
.r13
;
329 env
->regs
[14] = regs
.r14
;
330 env
->regs
[15] = regs
.r15
;
333 env
->eflags
= regs
.rflags
;
336 kvm_get_fpu(kvm_context
, env
->cpu_index
, &fpu
);
337 env
->fpstt
= (fpu
.fsw
>> 11) & 7;
340 for (i
= 0; i
< 8; ++i
)
341 env
->fptags
[i
] = !((fpu
.ftwx
>> i
) & 1);
342 memcpy(env
->fpregs
, fpu
.fpr
, sizeof env
->fpregs
);
343 memcpy(env
->xmm_regs
, fpu
.xmm
, sizeof env
->xmm_regs
);
344 env
->mxcsr
= fpu
.mxcsr
;
346 kvm_get_sregs(kvm_context
, env
->cpu_index
, &sregs
);
348 memcpy(env
->interrupt_bitmap
, sregs
.interrupt_bitmap
, sizeof(env
->interrupt_bitmap
));
350 get_seg(&env
->segs
[R_CS
], &sregs
.cs
);
351 get_seg(&env
->segs
[R_DS
], &sregs
.ds
);
352 get_seg(&env
->segs
[R_ES
], &sregs
.es
);
353 get_seg(&env
->segs
[R_FS
], &sregs
.fs
);
354 get_seg(&env
->segs
[R_GS
], &sregs
.gs
);
355 get_seg(&env
->segs
[R_SS
], &sregs
.ss
);
357 get_seg(&env
->tr
, &sregs
.tr
);
358 get_seg(&env
->ldt
, &sregs
.ldt
);
360 env
->idt
.limit
= sregs
.idt
.limit
;
361 env
->idt
.base
= sregs
.idt
.base
;
362 env
->gdt
.limit
= sregs
.gdt
.limit
;
363 env
->gdt
.base
= sregs
.gdt
.base
;
365 env
->cr
[0] = sregs
.cr0
;
366 env
->cr
[2] = sregs
.cr2
;
367 env
->cr
[3] = sregs
.cr3
;
368 env
->cr
[4] = sregs
.cr4
;
370 cpu_set_apic_base(env
, sregs
.apic_base
);
372 env
->efer
= sregs
.efer
;
373 //cpu_set_apic_tpr(env, sregs.cr8);
375 #define HFLAG_COPY_MASK ~( \
376 HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
377 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
378 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
379 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
383 hflags
= (env
->segs
[R_CS
].flags
>> DESC_DPL_SHIFT
) & HF_CPL_MASK
;
384 hflags
|= (env
->cr
[0] & CR0_PE_MASK
) << (HF_PE_SHIFT
- CR0_PE_SHIFT
);
385 hflags
|= (env
->cr
[0] << (HF_MP_SHIFT
- CR0_MP_SHIFT
)) &
386 (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
);
387 hflags
|= (env
->eflags
& (HF_TF_MASK
| HF_VM_MASK
| HF_IOPL_MASK
));
388 hflags
|= (env
->cr
[4] & CR4_OSFXSR_MASK
) <<
389 (HF_OSFXSR_SHIFT
- CR4_OSFXSR_SHIFT
);
391 if (env
->efer
& MSR_EFER_LMA
) {
392 hflags
|= HF_LMA_MASK
;
395 if ((hflags
& HF_LMA_MASK
) && (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
396 hflags
|= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
398 hflags
|= (env
->segs
[R_CS
].flags
& DESC_B_MASK
) >>
399 (DESC_B_SHIFT
- HF_CS32_SHIFT
);
400 hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
) >>
401 (DESC_B_SHIFT
- HF_SS32_SHIFT
);
402 if (!(env
->cr
[0] & CR0_PE_MASK
) ||
403 (env
->eflags
& VM_MASK
) ||
404 !(hflags
& HF_CS32_MASK
)) {
405 hflags
|= HF_ADDSEG_MASK
;
407 hflags
|= ((env
->segs
[R_DS
].base
|
408 env
->segs
[R_ES
].base
|
409 env
->segs
[R_SS
].base
) != 0) <<
413 env
->hflags
= (env
->hflags
& HFLAG_COPY_MASK
) | hflags
;
414 env
->cc_src
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
415 env
->df
= 1 - (2 * ((env
->eflags
>> 10) & 1));
416 env
->cc_op
= CC_OP_EFLAGS
;
417 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
421 msrs
[n
++].index
= MSR_IA32_SYSENTER_CS
;
422 msrs
[n
++].index
= MSR_IA32_SYSENTER_ESP
;
423 msrs
[n
++].index
= MSR_IA32_SYSENTER_EIP
;
424 if (kvm_has_msr_star
)
425 msrs
[n
++].index
= MSR_STAR
;
426 msrs
[n
++].index
= MSR_IA32_TSC
;
428 if (lm_capable_kernel
) {
429 msrs
[n
++].index
= MSR_CSTAR
;
430 msrs
[n
++].index
= MSR_KERNELGSBASE
;
431 msrs
[n
++].index
= MSR_FMASK
;
432 msrs
[n
++].index
= MSR_LSTAR
;
435 rc
= kvm_get_msrs(kvm_context
, env
->cpu_index
, msrs
, n
);
437 perror("kvm_get_msrs FAILED");
440 n
= rc
; /* actual number of MSRs */
441 for (i
=0 ; i
<n
; i
++) {
442 if (get_msr_entry(&msrs
[i
], env
))
448 static void host_cpuid(uint32_t function
, uint32_t *eax
, uint32_t *ebx
,
449 uint32_t *ecx
, uint32_t *edx
)
455 : "=a"(vec
[0]), "=b"(vec
[1]),
456 "=c"(vec
[2]), "=d"(vec
[3])
457 : "0"(function
) : "cc");
459 asm volatile("pusha \n\t"
461 "mov %%eax, 0(%1) \n\t"
462 "mov %%ebx, 4(%1) \n\t"
463 "mov %%ecx, 8(%1) \n\t"
464 "mov %%edx, 12(%1) \n\t"
466 : : "a"(function
), "S"(vec
)
481 static void do_cpuid_ent(struct kvm_cpuid_entry
*e
, uint32_t function
,
484 env
->regs
[R_EAX
] = function
;
485 qemu_kvm_cpuid_on_env(env
);
486 e
->function
= function
;
487 e
->eax
= env
->regs
[R_EAX
];
488 e
->ebx
= env
->regs
[R_EBX
];
489 e
->ecx
= env
->regs
[R_ECX
];
490 e
->edx
= env
->regs
[R_EDX
];
491 if (function
== 0x80000001) {
492 uint32_t h_eax
, h_edx
;
494 host_cpuid(function
, &h_eax
, NULL
, NULL
, &h_edx
);
497 if ((h_edx
& 0x20000000) == 0 || !lm_capable_kernel
)
498 e
->edx
&= ~0x20000000u
;
500 if ((h_edx
& 0x00000800) == 0)
501 e
->edx
&= ~0x00000800u
;
503 if ((h_edx
& 0x00100000) == 0)
504 e
->edx
&= ~0x00100000u
;
509 // sysenter isn't supported on compatibility mode on AMD. and syscall
510 // isn't supported in compatibility mode on Intel. so advertise the
511 // actuall cpu, and say goodbye to migration between different vendors
512 // is you use compatibility mode.
516 host_cpuid(0, NULL
, &bcd
[0], &bcd
[1], &bcd
[2]);
521 // "Hypervisor present" bit for Microsoft guests
523 e
->ecx
|= (1u << 31);
525 // 3dnow isn't properly emulated yet
526 if (function
== 0x80000001)
527 e
->edx
&= ~0xc0000000;
530 struct kvm_para_features
{
533 } para_features
[] = {
534 #ifdef KVM_CAP_CLOCKSOURCE
535 { KVM_CAP_CLOCKSOURCE
, KVM_FEATURE_CLOCKSOURCE
},
537 #ifdef KVM_CAP_NOP_IO_DELAY
538 { KVM_CAP_NOP_IO_DELAY
, KVM_FEATURE_NOP_IO_DELAY
},
540 #ifdef KVM_CAP_PV_MMU
541 { KVM_CAP_PV_MMU
, KVM_FEATURE_MMU_OP
},
543 #ifdef KVM_CAP_CR3_CACHE
544 { KVM_CAP_CR3_CACHE
, KVM_FEATURE_CR3_CACHE
},
549 static int get_para_features(kvm_context_t kvm_context
)
553 for (i
= 0; i
< ARRAY_SIZE(para_features
)-1; i
++) {
554 if (kvm_check_extension(kvm_context
, para_features
[i
].cap
))
555 features
|= (1 << para_features
[i
].feature
);
561 int kvm_arch_qemu_init_env(CPUState
*cenv
)
563 struct kvm_cpuid_entry cpuid_ent
[100];
564 #ifdef KVM_CPUID_SIGNATURE
565 struct kvm_cpuid_entry
*pv_ent
;
566 uint32_t signature
[3];
574 #ifdef KVM_CPUID_SIGNATURE
575 /* Paravirtualization CPUIDs */
576 memcpy(signature
, "KVMKVMKVM", 12);
577 pv_ent
= &cpuid_ent
[cpuid_nent
++];
578 memset(pv_ent
, 0, sizeof(*pv_ent
));
579 pv_ent
->function
= KVM_CPUID_SIGNATURE
;
581 pv_ent
->ebx
= signature
[0];
582 pv_ent
->ecx
= signature
[1];
583 pv_ent
->edx
= signature
[2];
585 pv_ent
= &cpuid_ent
[cpuid_nent
++];
586 memset(pv_ent
, 0, sizeof(*pv_ent
));
587 pv_ent
->function
= KVM_CPUID_FEATURES
;
588 pv_ent
->eax
= get_para_features(kvm_context
);
591 copy
.regs
[R_EAX
] = 0;
592 qemu_kvm_cpuid_on_env(©
);
593 limit
= copy
.regs
[R_EAX
];
595 for (i
= 0; i
<= limit
; ++i
)
596 do_cpuid_ent(&cpuid_ent
[cpuid_nent
++], i
, ©
);
598 copy
.regs
[R_EAX
] = 0x80000000;
599 qemu_kvm_cpuid_on_env(©
);
600 limit
= copy
.regs
[R_EAX
];
602 for (i
= 0x80000000; i
<= limit
; ++i
)
603 do_cpuid_ent(&cpuid_ent
[cpuid_nent
++], i
, ©
);
605 kvm_setup_cpuid(kvm_context
, cenv
->cpu_index
, cpuid_nent
, cpuid_ent
);
609 int kvm_arch_halt(void *opaque
, int vcpu
)
611 CPUState
*env
= cpu_single_env
;
613 if (!((env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
614 (env
->eflags
& IF_MASK
)) &&
615 !(env
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
617 env
->exception_index
= EXCP_HLT
;
622 void kvm_arch_pre_kvm_run(void *opaque
, CPUState
*env
)
624 if (!kvm_irqchip_in_kernel(kvm_context
))
625 kvm_set_cr8(kvm_context
, env
->cpu_index
, cpu_get_apic_tpr(env
));
628 void kvm_arch_post_kvm_run(void *opaque
, CPUState
*env
)
630 int vcpu
= env
->cpu_index
;
632 cpu_single_env
= env
;
634 env
->eflags
= kvm_get_interrupt_flag(kvm_context
, vcpu
)
635 ? env
->eflags
| IF_MASK
: env
->eflags
& ~IF_MASK
;
637 cpu_set_apic_tpr(env
, kvm_get_cr8(kvm_context
, vcpu
));
638 cpu_set_apic_base(env
, kvm_get_apic_base(kvm_context
, vcpu
));
641 int kvm_arch_has_work(CPUState
*env
)
643 if (((env
->interrupt_request
& (CPU_INTERRUPT_HARD
| CPU_INTERRUPT_EXIT
)) &&
644 (env
->eflags
& IF_MASK
)) ||
645 (env
->interrupt_request
& CPU_INTERRUPT_NMI
))
650 int kvm_arch_try_push_interrupts(void *opaque
)
652 CPUState
*env
= cpu_single_env
;
655 if (kvm_is_ready_for_interrupt_injection(kvm_context
, env
->cpu_index
) &&
656 (env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
657 (env
->eflags
& IF_MASK
)) {
658 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
659 irq
= cpu_get_pic_interrupt(env
);
661 r
= kvm_inject_irq(kvm_context
, env
->cpu_index
, irq
);
663 printf("cpu %d fail inject %x\n", env
->cpu_index
, irq
);
667 return (env
->interrupt_request
& CPU_INTERRUPT_HARD
) != 0;
670 int kvm_arch_try_push_nmi(void *opaque
)
672 CPUState
*env
= cpu_single_env
;
675 if (likely(!(env
->interrupt_request
& CPU_INTERRUPT_NMI
)))
678 if (kvm_is_ready_for_nmi_injection(kvm_context
, env
->cpu_index
)) {
679 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
680 r
= kvm_inject_nmi(kvm_context
, env
->cpu_index
);
682 printf("cpu %d fail inject NMI\n", env
->cpu_index
);
685 return (env
->interrupt_request
& CPU_INTERRUPT_NMI
) != 0;
688 void kvm_arch_update_regs_for_sipi(CPUState
*env
)
690 SegmentCache cs
= env
->segs
[R_CS
];
692 kvm_arch_save_regs(env
);
693 env
->segs
[R_CS
] = cs
;
695 kvm_arch_load_regs(env
);
698 int handle_tpr_access(void *opaque
, int vcpu
,
699 uint64_t rip
, int is_write
)
701 kvm_tpr_access_report(cpu_single_env
, rip
, is_write
);
705 void kvm_arch_cpu_reset(CPUState
*env
)
707 kvm_arch_load_regs(env
);
708 if (env
->cpu_index
!= 0) {
709 if (kvm_irqchip_in_kernel(kvm_context
)) {
710 #ifdef KVM_CAP_MP_STATE
711 kvm_reset_mpstate(kvm_context
, env
->cpu_index
);
714 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
716 env
->exception_index
= EXCP_HLT
;
721 void kvm_arch_do_ioperm(void *_data
)
723 struct ioperm_data
*data
= _data
;
724 ioperm(data
->start_port
, data
->num
, data
->turn_on
);