2 * qemu/kvm integration, x86 specific code
4 * Copyright (C) 2006-2008 Qumranet Technologies
6 * Licensed under the terms of the GNU GPL version 2 or higher.
10 #include "config-host.h"
20 #include <sys/utsname.h>
21 #include <linux/kvm_para.h>
22 #include <sys/ioctl.h>
27 #define MSR_IA32_TSC 0x10
29 static struct kvm_msr_list
*kvm_msr_list
;
30 extern unsigned int kvm_shadow_memory
;
31 static int kvm_has_msr_star
;
32 static int kvm_has_vm_hsave_pa
;
34 static int lm_capable_kernel
;
36 int kvm_set_tss_addr(kvm_context_t kvm
, unsigned long addr
)
38 #ifdef KVM_CAP_SET_TSS_ADDR
41 * Tell fw_cfg to notify the BIOS to reserve the range.
43 if (e820_add_entry(addr
, 0x4000, E820_RESERVED
) < 0) {
44 perror("e820_add_entry() table is full");
48 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_SET_TSS_ADDR
);
50 r
= kvm_vm_ioctl(kvm_state
, KVM_SET_TSS_ADDR
, addr
);
52 fprintf(stderr
, "kvm_set_tss_addr: %m\n");
61 static int kvm_init_tss(kvm_context_t kvm
)
63 #ifdef KVM_CAP_SET_TSS_ADDR
66 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_SET_TSS_ADDR
);
69 * this address is 3 pages before the bios, and the bios should present
72 r
= kvm_set_tss_addr(kvm
, 0xfeffd000);
74 fprintf(stderr
, "kvm_init_tss: unable to set tss addr\n");
83 static int kvm_set_identity_map_addr(kvm_context_t kvm
, uint64_t addr
)
85 #ifdef KVM_CAP_SET_IDENTITY_MAP_ADDR
88 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_SET_IDENTITY_MAP_ADDR
);
90 r
= kvm_vm_ioctl(kvm_state
, KVM_SET_IDENTITY_MAP_ADDR
, &addr
);
92 fprintf(stderr
, "kvm_set_identity_map_addr: %m\n");
101 static int kvm_init_identity_map_page(kvm_context_t kvm
)
103 #ifdef KVM_CAP_SET_IDENTITY_MAP_ADDR
106 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_SET_IDENTITY_MAP_ADDR
);
109 * this address is 4 pages before the bios, and the bios should present
110 * as unavaible memory
112 r
= kvm_set_identity_map_addr(kvm
, 0xfeffc000);
114 fprintf(stderr
, "kvm_init_identity_map_page: "
115 "unable to set identity mapping addr\n");
124 static int kvm_create_pit(kvm_context_t kvm
)
129 kvm_state
->pit_in_kernel
= 0;
130 if (!kvm
->no_pit_creation
) {
131 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_PIT
);
133 r
= kvm_vm_ioctl(kvm_state
, KVM_CREATE_PIT
);
135 kvm_state
->pit_in_kernel
= 1;
137 fprintf(stderr
, "Create kernel PIC irqchip failed\n");
146 int kvm_arch_create(kvm_context_t kvm
, unsigned long phys_mem_bytes
,
151 r
= kvm_init_tss(kvm
);
155 r
= kvm_init_identity_map_page(kvm
);
159 r
= kvm_create_pit(kvm
);
163 r
= kvm_init_coalesced_mmio(kvm
);
170 #ifdef KVM_EXIT_TPR_ACCESS
172 static int kvm_handle_tpr_access(CPUState
*env
)
174 struct kvm_run
*run
= env
->kvm_run
;
175 kvm_tpr_access_report(env
,
177 run
->tpr_access
.is_write
);
182 int kvm_enable_vapic(CPUState
*env
, uint64_t vapic
)
184 struct kvm_vapic_addr va
= {
188 return kvm_vcpu_ioctl(env
, KVM_SET_VAPIC_ADDR
, &va
);
193 int kvm_arch_run(CPUState
*env
)
196 struct kvm_run
*run
= env
->kvm_run
;
199 switch (run
->exit_reason
) {
200 #ifdef KVM_EXIT_SET_TPR
201 case KVM_EXIT_SET_TPR
:
204 #ifdef KVM_EXIT_TPR_ACCESS
205 case KVM_EXIT_TPR_ACCESS
:
206 r
= kvm_handle_tpr_access(env
);
217 #define MAX_ALIAS_SLOTS 4
221 } kvm_aliases
[MAX_ALIAS_SLOTS
];
223 static int get_alias_slot(uint64_t start
)
227 for (i
=0; i
<MAX_ALIAS_SLOTS
; i
++)
228 if (kvm_aliases
[i
].start
== start
)
232 static int get_free_alias_slot(void)
236 for (i
=0; i
<MAX_ALIAS_SLOTS
; i
++)
237 if (kvm_aliases
[i
].len
== 0)
242 static void register_alias(int slot
, uint64_t start
, uint64_t len
)
244 kvm_aliases
[slot
].start
= start
;
245 kvm_aliases
[slot
].len
= len
;
248 int kvm_create_memory_alias(kvm_context_t kvm
,
251 uint64_t target_phys
)
253 struct kvm_memory_alias alias
= {
255 .guest_phys_addr
= phys_start
,
257 .target_phys_addr
= target_phys
,
262 slot
= get_alias_slot(phys_start
);
264 slot
= get_free_alias_slot();
269 r
= kvm_vm_ioctl(kvm_state
, KVM_SET_MEMORY_ALIAS
, &alias
);
273 register_alias(slot
, phys_start
, len
);
277 int kvm_destroy_memory_alias(kvm_context_t kvm
, uint64_t phys_start
)
279 return kvm_create_memory_alias(kvm
, phys_start
, 0, 0);
282 #ifdef KVM_CAP_IRQCHIP
284 int kvm_get_lapic(CPUState
*env
, struct kvm_lapic_state
*s
)
288 if (!kvm_irqchip_in_kernel())
291 r
= kvm_vcpu_ioctl(env
, KVM_GET_LAPIC
, s
);
293 fprintf(stderr
, "KVM_GET_LAPIC failed\n");
297 int kvm_set_lapic(CPUState
*env
, struct kvm_lapic_state
*s
)
301 if (!kvm_irqchip_in_kernel())
304 r
= kvm_vcpu_ioctl(env
, KVM_SET_LAPIC
, s
);
307 fprintf(stderr
, "KVM_SET_LAPIC failed\n");
315 int kvm_get_pit(kvm_context_t kvm
, struct kvm_pit_state
*s
)
317 if (!kvm_pit_in_kernel())
319 return kvm_vm_ioctl(kvm_state
, KVM_GET_PIT
, s
);
322 int kvm_set_pit(kvm_context_t kvm
, struct kvm_pit_state
*s
)
324 if (!kvm_pit_in_kernel())
326 return kvm_vm_ioctl(kvm_state
, KVM_SET_PIT
, s
);
329 #ifdef KVM_CAP_PIT_STATE2
330 int kvm_get_pit2(kvm_context_t kvm
, struct kvm_pit_state2
*ps2
)
332 if (!kvm_pit_in_kernel())
334 return kvm_vm_ioctl(kvm_state
, KVM_GET_PIT2
, ps2
);
337 int kvm_set_pit2(kvm_context_t kvm
, struct kvm_pit_state2
*ps2
)
339 if (!kvm_pit_in_kernel())
341 return kvm_vm_ioctl(kvm_state
, KVM_SET_PIT2
, ps2
);
347 int kvm_has_pit_state2(kvm_context_t kvm
)
351 #ifdef KVM_CAP_PIT_STATE2
352 r
= kvm_check_extension(kvm_state
, KVM_CAP_PIT_STATE2
);
357 void kvm_show_code(CPUState
*env
)
359 #define SHOW_CODE_LEN 50
360 struct kvm_regs regs
;
361 struct kvm_sregs sregs
;
365 char code_str
[SHOW_CODE_LEN
* 3 + 1];
368 r
= kvm_vcpu_ioctl(env
, KVM_GET_SREGS
, &sregs
);
370 perror("KVM_GET_SREGS");
373 r
= kvm_vcpu_ioctl(env
, KVM_GET_REGS
, ®s
);
375 perror("KVM_GET_REGS");
378 rip
= sregs
.cs
.base
+ regs
.rip
;
379 back_offset
= regs
.rip
;
380 if (back_offset
> 20)
383 for (n
= -back_offset
; n
< SHOW_CODE_LEN
-back_offset
; ++n
) {
385 strcat(code_str
, " -->");
386 cpu_physical_memory_rw(rip
+ n
, &code
, 1, 1);
387 sprintf(code_str
+ strlen(code_str
), " %02x", code
);
389 fprintf(stderr
, "code:%s\n", code_str
);
394 * Returns available msr list. User must free.
396 struct kvm_msr_list
*kvm_get_msr_list(kvm_context_t kvm
)
398 struct kvm_msr_list sizer
, *msrs
;
402 r
= kvm_ioctl(kvm_state
, KVM_GET_MSR_INDEX_LIST
, &sizer
);
403 if (r
< 0 && r
!= -E2BIG
)
405 /* Old kernel modules had a bug and could write beyond the provided
406 memory. Allocate at least a safe amount of 1K. */
407 msrs
= qemu_malloc(MAX(1024, sizeof(*msrs
) +
408 sizer
.nmsrs
* sizeof(*msrs
->indices
)));
410 msrs
->nmsrs
= sizer
.nmsrs
;
411 r
= kvm_ioctl(kvm_state
, KVM_GET_MSR_INDEX_LIST
, msrs
);
420 int kvm_get_msrs(CPUState
*env
, struct kvm_msr_entry
*msrs
, int n
)
422 struct kvm_msrs
*kmsrs
= qemu_malloc(sizeof *kmsrs
+ n
* sizeof *msrs
);
426 memcpy(kmsrs
->entries
, msrs
, n
* sizeof *msrs
);
427 r
= kvm_vcpu_ioctl(env
, KVM_GET_MSRS
, kmsrs
);
428 memcpy(msrs
, kmsrs
->entries
, n
* sizeof *msrs
);
433 int kvm_set_msrs(CPUState
*env
, struct kvm_msr_entry
*msrs
, int n
)
435 struct kvm_msrs
*kmsrs
= qemu_malloc(sizeof *kmsrs
+ n
* sizeof *msrs
);
439 memcpy(kmsrs
->entries
, msrs
, n
* sizeof *msrs
);
440 r
= kvm_vcpu_ioctl(env
, KVM_SET_MSRS
, kmsrs
);
445 int kvm_get_mce_cap_supported(kvm_context_t kvm
, uint64_t *mce_cap
,
451 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_MCE
);
454 return kvm_ioctl(kvm_state
, KVM_X86_GET_MCE_CAP_SUPPORTED
, mce_cap
);
460 int kvm_setup_mce(CPUState
*env
, uint64_t *mcg_cap
)
463 return kvm_vcpu_ioctl(env
, KVM_X86_SETUP_MCE
, mcg_cap
);
469 int kvm_set_mce(CPUState
*env
, struct kvm_x86_mce
*m
)
472 return kvm_vcpu_ioctl(env
, KVM_X86_SET_MCE
, m
);
478 static void print_seg(FILE *file
, const char *name
, struct kvm_segment
*seg
)
481 "%s %04x (%08llx/%08x p %d dpl %d db %d s %d type %x l %d"
483 name
, seg
->selector
, seg
->base
, seg
->limit
, seg
->present
,
484 seg
->dpl
, seg
->db
, seg
->s
, seg
->type
, seg
->l
, seg
->g
,
488 static void print_dt(FILE *file
, const char *name
, struct kvm_dtable
*dt
)
490 fprintf(stderr
, "%s %llx/%x\n", name
, dt
->base
, dt
->limit
);
493 void kvm_show_regs(CPUState
*env
)
495 struct kvm_regs regs
;
496 struct kvm_sregs sregs
;
499 r
= kvm_vcpu_ioctl(env
, KVM_GET_REGS
, ®s
);
501 perror("KVM_GET_REGS");
505 "rax %016llx rbx %016llx rcx %016llx rdx %016llx\n"
506 "rsi %016llx rdi %016llx rsp %016llx rbp %016llx\n"
507 "r8 %016llx r9 %016llx r10 %016llx r11 %016llx\n"
508 "r12 %016llx r13 %016llx r14 %016llx r15 %016llx\n"
509 "rip %016llx rflags %08llx\n",
510 regs
.rax
, regs
.rbx
, regs
.rcx
, regs
.rdx
,
511 regs
.rsi
, regs
.rdi
, regs
.rsp
, regs
.rbp
,
512 regs
.r8
, regs
.r9
, regs
.r10
, regs
.r11
,
513 regs
.r12
, regs
.r13
, regs
.r14
, regs
.r15
,
514 regs
.rip
, regs
.rflags
);
515 r
= kvm_vcpu_ioctl(env
, KVM_GET_SREGS
, &sregs
);
517 perror("KVM_GET_SREGS");
520 print_seg(stderr
, "cs", &sregs
.cs
);
521 print_seg(stderr
, "ds", &sregs
.ds
);
522 print_seg(stderr
, "es", &sregs
.es
);
523 print_seg(stderr
, "ss", &sregs
.ss
);
524 print_seg(stderr
, "fs", &sregs
.fs
);
525 print_seg(stderr
, "gs", &sregs
.gs
);
526 print_seg(stderr
, "tr", &sregs
.tr
);
527 print_seg(stderr
, "ldt", &sregs
.ldt
);
528 print_dt(stderr
, "gdt", &sregs
.gdt
);
529 print_dt(stderr
, "idt", &sregs
.idt
);
530 fprintf(stderr
, "cr0 %llx cr2 %llx cr3 %llx cr4 %llx cr8 %llx"
532 sregs
.cr0
, sregs
.cr2
, sregs
.cr3
, sregs
.cr4
, sregs
.cr8
,
536 static void kvm_set_cr8(CPUState
*env
, uint64_t cr8
)
538 env
->kvm_run
->cr8
= cr8
;
541 int kvm_setup_cpuid(CPUState
*env
, int nent
,
542 struct kvm_cpuid_entry
*entries
)
544 struct kvm_cpuid
*cpuid
;
547 cpuid
= qemu_malloc(sizeof(*cpuid
) + nent
* sizeof(*entries
));
550 memcpy(cpuid
->entries
, entries
, nent
* sizeof(*entries
));
551 r
= kvm_vcpu_ioctl(env
, KVM_SET_CPUID
, cpuid
);
557 int kvm_setup_cpuid2(CPUState
*env
, int nent
,
558 struct kvm_cpuid_entry2
*entries
)
560 struct kvm_cpuid2
*cpuid
;
563 cpuid
= qemu_malloc(sizeof(*cpuid
) + nent
* sizeof(*entries
));
566 memcpy(cpuid
->entries
, entries
, nent
* sizeof(*entries
));
567 r
= kvm_vcpu_ioctl(env
, KVM_SET_CPUID2
, cpuid
);
572 int kvm_set_shadow_pages(kvm_context_t kvm
, unsigned int nrshadow_pages
)
574 #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
577 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
,
578 KVM_CAP_MMU_SHADOW_CACHE_CONTROL
);
580 r
= kvm_vm_ioctl(kvm_state
, KVM_SET_NR_MMU_PAGES
, nrshadow_pages
);
582 fprintf(stderr
, "kvm_set_shadow_pages: %m\n");
591 int kvm_get_shadow_pages(kvm_context_t kvm
, unsigned int *nrshadow_pages
)
593 #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
596 r
= kvm_ioctl(kvm_state
, KVM_CHECK_EXTENSION
,
597 KVM_CAP_MMU_SHADOW_CACHE_CONTROL
);
599 *nrshadow_pages
= kvm_vm_ioctl(kvm_state
, KVM_GET_NR_MMU_PAGES
);
607 static int kvm_enable_tpr_access_reporting(CPUState
*env
)
610 struct kvm_tpr_access_ctl tac
= { .enabled
= 1 };
612 r
= kvm_ioctl(env
->kvm_state
, KVM_CHECK_EXTENSION
, KVM_CAP_VAPIC
);
615 return kvm_vcpu_ioctl(env
, KVM_TPR_ACCESS_REPORTING
, &tac
);
619 int kvm_qemu_create_memory_alias(uint64_t phys_start
,
621 uint64_t target_phys
)
623 return kvm_create_memory_alias(kvm_context
, phys_start
, len
, target_phys
);
626 int kvm_qemu_destroy_memory_alias(uint64_t phys_start
)
628 return kvm_destroy_memory_alias(kvm_context
, phys_start
);
631 #ifdef KVM_CAP_ADJUST_CLOCK
632 static struct kvm_clock_data kvmclock_data
;
634 static void kvmclock_pre_save(void *opaque
)
636 struct kvm_clock_data
*cl
= opaque
;
638 kvm_vm_ioctl(kvm_state
, KVM_GET_CLOCK
, cl
);
641 static int kvmclock_post_load(void *opaque
, int version_id
)
643 struct kvm_clock_data
*cl
= opaque
;
645 return kvm_vm_ioctl(kvm_state
, KVM_SET_CLOCK
, cl
);
648 static const VMStateDescription vmstate_kvmclock
= {
651 .minimum_version_id
= 1,
652 .minimum_version_id_old
= 1,
653 .pre_save
= kvmclock_pre_save
,
654 .post_load
= kvmclock_post_load
,
655 .fields
= (VMStateField
[]) {
656 VMSTATE_U64(clock
, struct kvm_clock_data
),
657 VMSTATE_END_OF_LIST()
662 int kvm_arch_qemu_create_context(void)
665 struct utsname utsname
;
668 lm_capable_kernel
= strcmp(utsname
.machine
, "x86_64") == 0;
670 if (kvm_shadow_memory
)
671 kvm_set_shadow_pages(kvm_context
, kvm_shadow_memory
);
673 kvm_msr_list
= kvm_get_msr_list(kvm_context
);
676 for (i
= 0; i
< kvm_msr_list
->nmsrs
; ++i
) {
677 if (kvm_msr_list
->indices
[i
] == MSR_STAR
)
678 kvm_has_msr_star
= 1;
679 if (kvm_msr_list
->indices
[i
] == MSR_VM_HSAVE_PA
)
680 kvm_has_vm_hsave_pa
= 1;
683 #ifdef KVM_CAP_ADJUST_CLOCK
684 if (kvm_check_extension(kvm_state
, KVM_CAP_ADJUST_CLOCK
))
685 vmstate_register(0, &vmstate_kvmclock
, &kvmclock_data
);
688 r
= kvm_set_boot_cpu_id(0);
689 if (r
< 0 && r
!= -ENOSYS
) {
696 static void set_msr_entry(struct kvm_msr_entry
*entry
, uint32_t index
,
699 entry
->index
= index
;
703 /* returns 0 on success, non-0 on failure */
704 static int get_msr_entry(struct kvm_msr_entry
*entry
, CPUState
*env
)
706 switch (entry
->index
) {
707 case MSR_IA32_SYSENTER_CS
:
708 env
->sysenter_cs
= entry
->data
;
710 case MSR_IA32_SYSENTER_ESP
:
711 env
->sysenter_esp
= entry
->data
;
713 case MSR_IA32_SYSENTER_EIP
:
714 env
->sysenter_eip
= entry
->data
;
717 env
->star
= entry
->data
;
721 env
->cstar
= entry
->data
;
723 case MSR_KERNELGSBASE
:
724 env
->kernelgsbase
= entry
->data
;
727 env
->fmask
= entry
->data
;
730 env
->lstar
= entry
->data
;
734 env
->tsc
= entry
->data
;
736 case MSR_VM_HSAVE_PA
:
737 env
->vm_hsave
= entry
->data
;
739 case MSR_KVM_SYSTEM_TIME
:
740 env
->system_time_msr
= entry
->data
;
742 case MSR_KVM_WALL_CLOCK
:
743 env
->wall_clock_msr
= entry
->data
;
747 env
->mcg_status
= entry
->data
;
750 env
->mcg_ctl
= entry
->data
;
755 if (entry
->index
>= MSR_MC0_CTL
&& \
756 entry
->index
< MSR_MC0_CTL
+ (env
->mcg_cap
& 0xff) * 4) {
757 env
->mce_banks
[entry
->index
- MSR_MC0_CTL
] = entry
->data
;
761 printf("Warning unknown msr index 0x%x\n", entry
->index
);
767 static void kvm_arch_save_mpstate(CPUState
*env
)
769 #ifdef KVM_CAP_MP_STATE
771 struct kvm_mp_state mp_state
;
773 r
= kvm_get_mpstate(env
, &mp_state
);
777 env
->mp_state
= mp_state
.mp_state
;
778 if (kvm_irqchip_in_kernel()) {
779 env
->halted
= (env
->mp_state
== KVM_MP_STATE_HALTED
);
787 static void kvm_arch_load_mpstate(CPUState
*env
)
789 #ifdef KVM_CAP_MP_STATE
790 struct kvm_mp_state mp_state
;
793 * -1 indicates that the host did not support GET_MP_STATE ioctl,
796 if (env
->mp_state
!= -1) {
797 mp_state
.mp_state
= env
->mp_state
;
798 kvm_set_mpstate(env
, &mp_state
);
803 static void kvm_reset_mpstate(CPUState
*env
)
805 #ifdef KVM_CAP_MP_STATE
806 if (kvm_check_extension(kvm_state
, KVM_CAP_MP_STATE
)) {
807 if (kvm_irqchip_in_kernel()) {
808 env
->mp_state
= cpu_is_bsp(env
) ? KVM_MP_STATE_RUNNABLE
:
809 KVM_MP_STATE_UNINITIALIZED
;
811 env
->mp_state
= KVM_MP_STATE_RUNNABLE
;
817 static void set_v8086_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
819 lhs
->selector
= rhs
->selector
;
820 lhs
->base
= rhs
->base
;
821 lhs
->limit
= rhs
->limit
;
833 static void set_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
835 unsigned flags
= rhs
->flags
;
836 lhs
->selector
= rhs
->selector
;
837 lhs
->base
= rhs
->base
;
838 lhs
->limit
= rhs
->limit
;
839 lhs
->type
= (flags
>> DESC_TYPE_SHIFT
) & 15;
840 lhs
->present
= (flags
& DESC_P_MASK
) != 0;
841 lhs
->dpl
= rhs
->selector
& 3;
842 lhs
->db
= (flags
>> DESC_B_SHIFT
) & 1;
843 lhs
->s
= (flags
& DESC_S_MASK
) != 0;
844 lhs
->l
= (flags
>> DESC_L_SHIFT
) & 1;
845 lhs
->g
= (flags
& DESC_G_MASK
) != 0;
846 lhs
->avl
= (flags
& DESC_AVL_MASK
) != 0;
850 static void get_seg(SegmentCache
*lhs
, const struct kvm_segment
*rhs
)
852 lhs
->selector
= rhs
->selector
;
853 lhs
->base
= rhs
->base
;
854 lhs
->limit
= rhs
->limit
;
856 (rhs
->type
<< DESC_TYPE_SHIFT
)
857 | (rhs
->present
* DESC_P_MASK
)
858 | (rhs
->dpl
<< DESC_DPL_SHIFT
)
859 | (rhs
->db
<< DESC_B_SHIFT
)
860 | (rhs
->s
* DESC_S_MASK
)
861 | (rhs
->l
<< DESC_L_SHIFT
)
862 | (rhs
->g
* DESC_G_MASK
)
863 | (rhs
->avl
* DESC_AVL_MASK
);
866 void kvm_arch_load_regs(CPUState
*env
, int level
)
868 struct kvm_regs regs
;
870 struct kvm_sregs sregs
;
871 struct kvm_msr_entry msrs
[100];
874 assert(kvm_cpu_is_stopped(env
) || env
->thread_id
== kvm_get_thread_id());
876 regs
.rax
= env
->regs
[R_EAX
];
877 regs
.rbx
= env
->regs
[R_EBX
];
878 regs
.rcx
= env
->regs
[R_ECX
];
879 regs
.rdx
= env
->regs
[R_EDX
];
880 regs
.rsi
= env
->regs
[R_ESI
];
881 regs
.rdi
= env
->regs
[R_EDI
];
882 regs
.rsp
= env
->regs
[R_ESP
];
883 regs
.rbp
= env
->regs
[R_EBP
];
885 regs
.r8
= env
->regs
[8];
886 regs
.r9
= env
->regs
[9];
887 regs
.r10
= env
->regs
[10];
888 regs
.r11
= env
->regs
[11];
889 regs
.r12
= env
->regs
[12];
890 regs
.r13
= env
->regs
[13];
891 regs
.r14
= env
->regs
[14];
892 regs
.r15
= env
->regs
[15];
895 regs
.rflags
= env
->eflags
;
898 kvm_set_regs(env
, ®s
);
900 memset(&fpu
, 0, sizeof fpu
);
901 fpu
.fsw
= env
->fpus
& ~(7 << 11);
902 fpu
.fsw
|= (env
->fpstt
& 7) << 11;
904 for (i
= 0; i
< 8; ++i
)
905 fpu
.ftwx
|= (!env
->fptags
[i
]) << i
;
906 memcpy(fpu
.fpr
, env
->fpregs
, sizeof env
->fpregs
);
907 memcpy(fpu
.xmm
, env
->xmm_regs
, sizeof env
->xmm_regs
);
908 fpu
.mxcsr
= env
->mxcsr
;
909 kvm_set_fpu(env
, &fpu
);
911 memset(sregs
.interrupt_bitmap
, 0, sizeof(sregs
.interrupt_bitmap
));
912 if (env
->interrupt_injected
>= 0) {
913 sregs
.interrupt_bitmap
[env
->interrupt_injected
/ 64] |=
914 (uint64_t)1 << (env
->interrupt_injected
% 64);
917 if ((env
->eflags
& VM_MASK
)) {
918 set_v8086_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
919 set_v8086_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
920 set_v8086_seg(&sregs
.es
, &env
->segs
[R_ES
]);
921 set_v8086_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
922 set_v8086_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
923 set_v8086_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
925 set_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
926 set_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
927 set_seg(&sregs
.es
, &env
->segs
[R_ES
]);
928 set_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
929 set_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
930 set_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
932 if (env
->cr
[0] & CR0_PE_MASK
) {
933 /* force ss cpl to cs cpl */
934 sregs
.ss
.selector
= (sregs
.ss
.selector
& ~3) |
935 (sregs
.cs
.selector
& 3);
936 sregs
.ss
.dpl
= sregs
.ss
.selector
& 3;
940 set_seg(&sregs
.tr
, &env
->tr
);
941 set_seg(&sregs
.ldt
, &env
->ldt
);
943 sregs
.idt
.limit
= env
->idt
.limit
;
944 sregs
.idt
.base
= env
->idt
.base
;
945 sregs
.gdt
.limit
= env
->gdt
.limit
;
946 sregs
.gdt
.base
= env
->gdt
.base
;
948 sregs
.cr0
= env
->cr
[0];
949 sregs
.cr2
= env
->cr
[2];
950 sregs
.cr3
= env
->cr
[3];
951 sregs
.cr4
= env
->cr
[4];
953 sregs
.cr8
= cpu_get_apic_tpr(env
);
954 sregs
.apic_base
= cpu_get_apic_base(env
);
956 sregs
.efer
= env
->efer
;
958 kvm_set_sregs(env
, &sregs
);
962 /* Remember to increase msrs size if you add new registers below */
963 set_msr_entry(&msrs
[n
++], MSR_IA32_SYSENTER_CS
, env
->sysenter_cs
);
964 set_msr_entry(&msrs
[n
++], MSR_IA32_SYSENTER_ESP
, env
->sysenter_esp
);
965 set_msr_entry(&msrs
[n
++], MSR_IA32_SYSENTER_EIP
, env
->sysenter_eip
);
966 if (kvm_has_msr_star
)
967 set_msr_entry(&msrs
[n
++], MSR_STAR
, env
->star
);
968 if (kvm_has_vm_hsave_pa
)
969 set_msr_entry(&msrs
[n
++], MSR_VM_HSAVE_PA
, env
->vm_hsave
);
971 if (lm_capable_kernel
) {
972 set_msr_entry(&msrs
[n
++], MSR_CSTAR
, env
->cstar
);
973 set_msr_entry(&msrs
[n
++], MSR_KERNELGSBASE
, env
->kernelgsbase
);
974 set_msr_entry(&msrs
[n
++], MSR_FMASK
, env
->fmask
);
975 set_msr_entry(&msrs
[n
++], MSR_LSTAR
, env
->lstar
);
978 if (level
== KVM_PUT_FULL_STATE
) {
980 * KVM is yet unable to synchronize TSC values of multiple VCPUs on
981 * writeback. Until this is fixed, we only write the offset to SMP
982 * guests after migration, desynchronizing the VCPUs, but avoiding
983 * huge jump-backs that would occur without any writeback at all.
985 if (smp_cpus
== 1 || env
->tsc
!= 0) {
986 set_msr_entry(&msrs
[n
++], MSR_IA32_TSC
, env
->tsc
);
988 set_msr_entry(&msrs
[n
++], MSR_KVM_SYSTEM_TIME
, env
->system_time_msr
);
989 set_msr_entry(&msrs
[n
++], MSR_KVM_WALL_CLOCK
, env
->wall_clock_msr
);
993 if (level
== KVM_PUT_RESET_STATE
)
994 set_msr_entry(&msrs
[n
++], MSR_MCG_STATUS
, env
->mcg_status
);
995 else if (level
== KVM_PUT_FULL_STATE
) {
996 set_msr_entry(&msrs
[n
++], MSR_MCG_STATUS
, env
->mcg_status
);
997 set_msr_entry(&msrs
[n
++], MSR_MCG_CTL
, env
->mcg_ctl
);
998 for (i
= 0; i
< (env
->mcg_cap
& 0xff); i
++)
999 set_msr_entry(&msrs
[n
++], MSR_MC0_CTL
+ i
, env
->mce_banks
[i
]);
1004 rc
= kvm_set_msrs(env
, msrs
, n
);
1006 perror("kvm_set_msrs FAILED");
1008 if (level
>= KVM_PUT_RESET_STATE
) {
1009 kvm_arch_load_mpstate(env
);
1010 kvm_load_lapic(env
);
1012 if (level
== KVM_PUT_FULL_STATE
) {
1013 if (env
->kvm_vcpu_update_vapic
)
1014 kvm_tpr_enable_vapic(env
);
1016 if (kvm_irqchip_in_kernel()) {
1017 /* Avoid deadlock: no user space IRQ will ever clear it. */
1021 kvm_put_vcpu_events(env
, level
);
1022 kvm_put_debugregs(env
);
1025 kvm_guest_debug_workarounds(env
);
1028 void kvm_arch_save_regs(CPUState
*env
)
1030 struct kvm_regs regs
;
1032 struct kvm_sregs sregs
;
1033 struct kvm_msr_entry msrs
[100];
1035 uint32_t i
, n
, rc
, bit
;
1037 assert(kvm_cpu_is_stopped(env
) || env
->thread_id
== kvm_get_thread_id());
1039 kvm_get_regs(env
, ®s
);
1041 env
->regs
[R_EAX
] = regs
.rax
;
1042 env
->regs
[R_EBX
] = regs
.rbx
;
1043 env
->regs
[R_ECX
] = regs
.rcx
;
1044 env
->regs
[R_EDX
] = regs
.rdx
;
1045 env
->regs
[R_ESI
] = regs
.rsi
;
1046 env
->regs
[R_EDI
] = regs
.rdi
;
1047 env
->regs
[R_ESP
] = regs
.rsp
;
1048 env
->regs
[R_EBP
] = regs
.rbp
;
1049 #ifdef TARGET_X86_64
1050 env
->regs
[8] = regs
.r8
;
1051 env
->regs
[9] = regs
.r9
;
1052 env
->regs
[10] = regs
.r10
;
1053 env
->regs
[11] = regs
.r11
;
1054 env
->regs
[12] = regs
.r12
;
1055 env
->regs
[13] = regs
.r13
;
1056 env
->regs
[14] = regs
.r14
;
1057 env
->regs
[15] = regs
.r15
;
1060 env
->eflags
= regs
.rflags
;
1061 env
->eip
= regs
.rip
;
1063 kvm_get_fpu(env
, &fpu
);
1064 env
->fpstt
= (fpu
.fsw
>> 11) & 7;
1065 env
->fpus
= fpu
.fsw
;
1066 env
->fpuc
= fpu
.fcw
;
1067 for (i
= 0; i
< 8; ++i
)
1068 env
->fptags
[i
] = !((fpu
.ftwx
>> i
) & 1);
1069 memcpy(env
->fpregs
, fpu
.fpr
, sizeof env
->fpregs
);
1070 memcpy(env
->xmm_regs
, fpu
.xmm
, sizeof env
->xmm_regs
);
1071 env
->mxcsr
= fpu
.mxcsr
;
1073 kvm_get_sregs(env
, &sregs
);
1075 /* There can only be one pending IRQ set in the bitmap at a time, so try
1076 to find it and save its number instead (-1 for none). */
1077 env
->interrupt_injected
= -1;
1078 for (i
= 0; i
< ARRAY_SIZE(sregs
.interrupt_bitmap
); i
++) {
1079 if (sregs
.interrupt_bitmap
[i
]) {
1080 bit
= ctz64(sregs
.interrupt_bitmap
[i
]);
1081 env
->interrupt_injected
= i
* 64 + bit
;
1086 get_seg(&env
->segs
[R_CS
], &sregs
.cs
);
1087 get_seg(&env
->segs
[R_DS
], &sregs
.ds
);
1088 get_seg(&env
->segs
[R_ES
], &sregs
.es
);
1089 get_seg(&env
->segs
[R_FS
], &sregs
.fs
);
1090 get_seg(&env
->segs
[R_GS
], &sregs
.gs
);
1091 get_seg(&env
->segs
[R_SS
], &sregs
.ss
);
1093 get_seg(&env
->tr
, &sregs
.tr
);
1094 get_seg(&env
->ldt
, &sregs
.ldt
);
1096 env
->idt
.limit
= sregs
.idt
.limit
;
1097 env
->idt
.base
= sregs
.idt
.base
;
1098 env
->gdt
.limit
= sregs
.gdt
.limit
;
1099 env
->gdt
.base
= sregs
.gdt
.base
;
1101 env
->cr
[0] = sregs
.cr0
;
1102 env
->cr
[2] = sregs
.cr2
;
1103 env
->cr
[3] = sregs
.cr3
;
1104 env
->cr
[4] = sregs
.cr4
;
1106 cpu_set_apic_base(env
, sregs
.apic_base
);
1108 env
->efer
= sregs
.efer
;
1109 //cpu_set_apic_tpr(env, sregs.cr8);
1111 #define HFLAG_COPY_MASK ~( \
1112 HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
1113 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
1114 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
1115 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
1119 hflags
= (env
->segs
[R_CS
].flags
>> DESC_DPL_SHIFT
) & HF_CPL_MASK
;
1120 hflags
|= (env
->cr
[0] & CR0_PE_MASK
) << (HF_PE_SHIFT
- CR0_PE_SHIFT
);
1121 hflags
|= (env
->cr
[0] << (HF_MP_SHIFT
- CR0_MP_SHIFT
)) &
1122 (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
);
1123 hflags
|= (env
->eflags
& (HF_TF_MASK
| HF_VM_MASK
| HF_IOPL_MASK
));
1124 hflags
|= (env
->cr
[4] & CR4_OSFXSR_MASK
) <<
1125 (HF_OSFXSR_SHIFT
- CR4_OSFXSR_SHIFT
);
1127 if (env
->efer
& MSR_EFER_LMA
) {
1128 hflags
|= HF_LMA_MASK
;
1131 if ((hflags
& HF_LMA_MASK
) && (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
1132 hflags
|= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
1134 hflags
|= (env
->segs
[R_CS
].flags
& DESC_B_MASK
) >>
1135 (DESC_B_SHIFT
- HF_CS32_SHIFT
);
1136 hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
) >>
1137 (DESC_B_SHIFT
- HF_SS32_SHIFT
);
1138 if (!(env
->cr
[0] & CR0_PE_MASK
) ||
1139 (env
->eflags
& VM_MASK
) ||
1140 !(hflags
& HF_CS32_MASK
)) {
1141 hflags
|= HF_ADDSEG_MASK
;
1143 hflags
|= ((env
->segs
[R_DS
].base
|
1144 env
->segs
[R_ES
].base
|
1145 env
->segs
[R_SS
].base
) != 0) <<
1149 env
->hflags
= (env
->hflags
& HFLAG_COPY_MASK
) | hflags
;
1153 /* Remember to increase msrs size if you add new registers below */
1154 msrs
[n
++].index
= MSR_IA32_SYSENTER_CS
;
1155 msrs
[n
++].index
= MSR_IA32_SYSENTER_ESP
;
1156 msrs
[n
++].index
= MSR_IA32_SYSENTER_EIP
;
1157 if (kvm_has_msr_star
)
1158 msrs
[n
++].index
= MSR_STAR
;
1159 msrs
[n
++].index
= MSR_IA32_TSC
;
1160 if (kvm_has_vm_hsave_pa
)
1161 msrs
[n
++].index
= MSR_VM_HSAVE_PA
;
1162 #ifdef TARGET_X86_64
1163 if (lm_capable_kernel
) {
1164 msrs
[n
++].index
= MSR_CSTAR
;
1165 msrs
[n
++].index
= MSR_KERNELGSBASE
;
1166 msrs
[n
++].index
= MSR_FMASK
;
1167 msrs
[n
++].index
= MSR_LSTAR
;
1170 msrs
[n
++].index
= MSR_KVM_SYSTEM_TIME
;
1171 msrs
[n
++].index
= MSR_KVM_WALL_CLOCK
;
1175 msrs
[n
++].index
= MSR_MCG_STATUS
;
1176 msrs
[n
++].index
= MSR_MCG_CTL
;
1177 for (i
= 0; i
< (env
->mcg_cap
& 0xff) * 4; i
++)
1178 msrs
[n
++].index
= MSR_MC0_CTL
+ i
;
1182 rc
= kvm_get_msrs(env
, msrs
, n
);
1184 perror("kvm_get_msrs FAILED");
1187 n
= rc
; /* actual number of MSRs */
1188 for (i
=0 ; i
<n
; i
++) {
1189 if (get_msr_entry(&msrs
[i
], env
))
1193 kvm_arch_save_mpstate(env
);
1194 kvm_save_lapic(env
);
1195 kvm_get_vcpu_events(env
);
1196 kvm_get_debugregs(env
);
1199 static void do_cpuid_ent(struct kvm_cpuid_entry2
*e
, uint32_t function
,
1200 uint32_t count
, CPUState
*env
)
1202 env
->regs
[R_EAX
] = function
;
1203 env
->regs
[R_ECX
] = count
;
1204 qemu_kvm_cpuid_on_env(env
);
1205 e
->function
= function
;
1208 e
->eax
= env
->regs
[R_EAX
];
1209 e
->ebx
= env
->regs
[R_EBX
];
1210 e
->ecx
= env
->regs
[R_ECX
];
1211 e
->edx
= env
->regs
[R_EDX
];
1214 struct kvm_para_features
{
1217 } para_features
[] = {
1218 #ifdef KVM_CAP_CLOCKSOURCE
1219 { KVM_CAP_CLOCKSOURCE
, KVM_FEATURE_CLOCKSOURCE
},
1221 #ifdef KVM_CAP_NOP_IO_DELAY
1222 { KVM_CAP_NOP_IO_DELAY
, KVM_FEATURE_NOP_IO_DELAY
},
1224 #ifdef KVM_CAP_PV_MMU
1225 { KVM_CAP_PV_MMU
, KVM_FEATURE_MMU_OP
},
1230 static int get_para_features(kvm_context_t kvm_context
)
1232 int i
, features
= 0;
1234 for (i
= 0; i
< ARRAY_SIZE(para_features
)-1; i
++) {
1235 if (kvm_check_extension(kvm_state
, para_features
[i
].cap
))
1236 features
|= (1 << para_features
[i
].feature
);
1242 static void kvm_trim_features(uint32_t *features
, uint32_t supported
)
1247 for (i
= 0; i
< 32; ++i
) {
1249 if ((*features
& mask
) && !(supported
& mask
)) {
1255 int kvm_arch_init_vcpu(CPUState
*cenv
)
1257 struct kvm_cpuid_entry2 cpuid_ent
[100];
1258 #ifdef KVM_CPUID_SIGNATURE
1259 struct kvm_cpuid_entry2
*pv_ent
;
1260 uint32_t signature
[3];
1264 uint32_t i
, j
, limit
;
1266 kvm_arch_reset_vcpu(cenv
);
1268 #ifdef KVM_CPUID_SIGNATURE
1269 /* Paravirtualization CPUIDs */
1270 memcpy(signature
, "KVMKVMKVM\0\0\0", 12);
1271 pv_ent
= &cpuid_ent
[cpuid_nent
++];
1272 memset(pv_ent
, 0, sizeof(*pv_ent
));
1273 pv_ent
->function
= KVM_CPUID_SIGNATURE
;
1275 pv_ent
->ebx
= signature
[0];
1276 pv_ent
->ecx
= signature
[1];
1277 pv_ent
->edx
= signature
[2];
1279 pv_ent
= &cpuid_ent
[cpuid_nent
++];
1280 memset(pv_ent
, 0, sizeof(*pv_ent
));
1281 pv_ent
->function
= KVM_CPUID_FEATURES
;
1282 pv_ent
->eax
= get_para_features(kvm_context
);
1285 kvm_trim_features(&cenv
->cpuid_features
,
1286 kvm_arch_get_supported_cpuid(cenv
, 1, R_EDX
));
1288 /* prevent the hypervisor bit from being cleared by the kernel */
1289 i
= cenv
->cpuid_ext_features
& CPUID_EXT_HYPERVISOR
;
1290 kvm_trim_features(&cenv
->cpuid_ext_features
,
1291 kvm_arch_get_supported_cpuid(cenv
, 1, R_ECX
));
1292 cenv
->cpuid_ext_features
|= i
;
1294 kvm_trim_features(&cenv
->cpuid_ext2_features
,
1295 kvm_arch_get_supported_cpuid(cenv
, 0x80000001, R_EDX
));
1296 kvm_trim_features(&cenv
->cpuid_ext3_features
,
1297 kvm_arch_get_supported_cpuid(cenv
, 0x80000001, R_ECX
));
1301 copy
.regs
[R_EAX
] = 0;
1302 qemu_kvm_cpuid_on_env(©
);
1303 limit
= copy
.regs
[R_EAX
];
1305 for (i
= 0; i
<= limit
; ++i
) {
1306 if (i
== 4 || i
== 0xb || i
== 0xd) {
1307 for (j
= 0; ; ++j
) {
1308 do_cpuid_ent(&cpuid_ent
[cpuid_nent
], i
, j
, ©
);
1310 cpuid_ent
[cpuid_nent
].flags
= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
1311 cpuid_ent
[cpuid_nent
].index
= j
;
1315 if (i
== 4 && copy
.regs
[R_EAX
] == 0)
1317 if (i
== 0xb && !(copy
.regs
[R_ECX
] & 0xff00))
1319 if (i
== 0xd && copy
.regs
[R_EAX
] == 0)
1323 do_cpuid_ent(&cpuid_ent
[cpuid_nent
++], i
, 0, ©
);
1326 copy
.regs
[R_EAX
] = 0x80000000;
1327 qemu_kvm_cpuid_on_env(©
);
1328 limit
= copy
.regs
[R_EAX
];
1330 for (i
= 0x80000000; i
<= limit
; ++i
)
1331 do_cpuid_ent(&cpuid_ent
[cpuid_nent
++], i
, 0, ©
);
1333 kvm_setup_cpuid2(cenv
, cpuid_nent
, cpuid_ent
);
1336 if (((cenv
->cpuid_version
>> 8)&0xF) >= 6
1337 && (cenv
->cpuid_features
&(CPUID_MCE
|CPUID_MCA
)) == (CPUID_MCE
|CPUID_MCA
)
1338 && kvm_check_extension(kvm_state
, KVM_CAP_MCE
) > 0) {
1342 if (kvm_get_mce_cap_supported(kvm_context
, &mcg_cap
, &banks
))
1343 perror("kvm_get_mce_cap_supported FAILED");
1345 if (banks
> MCE_BANKS_DEF
)
1346 banks
= MCE_BANKS_DEF
;
1347 mcg_cap
&= MCE_CAP_DEF
;
1349 if (kvm_setup_mce(cenv
, &mcg_cap
))
1350 perror("kvm_setup_mce FAILED");
1352 cenv
->mcg_cap
= mcg_cap
;
1357 #ifdef KVM_EXIT_TPR_ACCESS
1358 kvm_enable_tpr_access_reporting(cenv
);
1360 kvm_reset_mpstate(cenv
);
1364 int kvm_arch_halt(CPUState
*env
)
1367 if (!((env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
1368 (env
->eflags
& IF_MASK
)) &&
1369 !(env
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
1375 int kvm_arch_pre_run(CPUState
*env
, struct kvm_run
*run
)
1377 if (!kvm_irqchip_in_kernel())
1378 kvm_set_cr8(env
, cpu_get_apic_tpr(env
));
1382 int kvm_arch_has_work(CPUState
*env
)
1384 if (((env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
1385 (env
->eflags
& IF_MASK
)) ||
1386 (env
->interrupt_request
& CPU_INTERRUPT_NMI
))
1391 int kvm_arch_try_push_interrupts(void *opaque
)
1393 CPUState
*env
= cpu_single_env
;
1396 if (kvm_is_ready_for_interrupt_injection(env
) &&
1397 (env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
1398 (env
->eflags
& IF_MASK
)) {
1399 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
1400 irq
= cpu_get_pic_interrupt(env
);
1402 r
= kvm_inject_irq(env
, irq
);
1404 printf("cpu %d fail inject %x\n", env
->cpu_index
, irq
);
1408 return (env
->interrupt_request
& CPU_INTERRUPT_HARD
) != 0;
1411 #ifdef KVM_CAP_USER_NMI
1412 void kvm_arch_push_nmi(void *opaque
)
1414 CPUState
*env
= cpu_single_env
;
1417 if (likely(!(env
->interrupt_request
& CPU_INTERRUPT_NMI
)))
1420 env
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
1421 r
= kvm_inject_nmi(env
);
1423 printf("cpu %d fail inject NMI\n", env
->cpu_index
);
1425 #endif /* KVM_CAP_USER_NMI */
1427 void kvm_arch_cpu_reset(CPUState
*env
)
1429 kvm_arch_reset_vcpu(env
);
1430 kvm_reset_mpstate(env
);
1433 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
1434 void kvm_arch_do_ioperm(void *_data
)
1436 struct ioperm_data
*data
= _data
;
1437 ioperm(data
->start_port
, data
->num
, data
->turn_on
);
1442 * Setup x86 specific IRQ routing
1444 int kvm_arch_init_irq_routing(void)
1448 if (kvm_irqchip
&& kvm_has_gsi_routing(kvm_context
)) {
1449 kvm_clear_gsi_routes(kvm_context
);
1450 for (i
= 0; i
< 8; ++i
) {
1453 r
= kvm_add_irq_route(kvm_context
, i
, KVM_IRQCHIP_PIC_MASTER
, i
);
1457 for (i
= 8; i
< 16; ++i
) {
1458 r
= kvm_add_irq_route(kvm_context
, i
, KVM_IRQCHIP_PIC_SLAVE
, i
- 8);
1462 for (i
= 0; i
< 24; ++i
) {
1464 r
= kvm_add_irq_route(kvm_context
, i
, KVM_IRQCHIP_IOAPIC
, 2);
1465 } else if (i
!= 2) {
1466 r
= kvm_add_irq_route(kvm_context
, i
, KVM_IRQCHIP_IOAPIC
, i
);
1471 kvm_commit_irq_routes(kvm_context
);
1476 void kvm_arch_process_irqchip_events(CPUState
*env
)
1478 if (env
->interrupt_request
& CPU_INTERRUPT_INIT
) {
1479 kvm_cpu_synchronize_state(env
);
1482 if (env
->interrupt_request
& CPU_INTERRUPT_SIPI
) {
1483 kvm_cpu_synchronize_state(env
);