2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
7 * Copyright (C) 2006 Qumranet, Inc.
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/kernel.h>
26 #include <linux/highmem.h>
27 #include <linux/sched.h>
28 #include <linux/moduleparam.h>
29 #include "kvm_cache_regs.h"
35 #define __ex(x) __kvm_handle_fault_on_reboot(x)
37 MODULE_AUTHOR("Qumranet");
38 MODULE_LICENSE("GPL");
40 static int bypass_guest_pf
= 1;
41 module_param(bypass_guest_pf
, bool, 0);
43 static int enable_vpid
= 1;
44 module_param(enable_vpid
, bool, 0);
46 static int flexpriority_enabled
= 1;
47 module_param(flexpriority_enabled
, bool, 0);
49 static int enable_ept
= 1;
50 module_param(enable_ept
, bool, 0);
52 static int emulate_invalid_guest_state
= 0;
53 module_param(emulate_invalid_guest_state
, bool, 0);
63 struct list_head local_vcpus_link
;
64 unsigned long host_rsp
;
67 u32 idt_vectoring_info
;
68 struct kvm_msr_entry
*guest_msrs
;
69 struct kvm_msr_entry
*host_msrs
;
74 int msr_offset_kernel_gs_base
;
79 u16 fs_sel
, gs_sel
, ldt_sel
;
80 int gs_ldt_reload_needed
;
82 int guest_efer_loaded
;
92 bool emulation_required
;
95 static inline struct vcpu_vmx
*to_vmx(struct kvm_vcpu
*vcpu
)
97 return container_of(vcpu
, struct vcpu_vmx
, vcpu
);
100 static int init_rmode(struct kvm
*kvm
);
101 static u64
construct_eptp(unsigned long root_hpa
);
103 static DEFINE_PER_CPU(struct vmcs
*, vmxarea
);
104 static DEFINE_PER_CPU(struct vmcs
*, current_vmcs
);
105 static DEFINE_PER_CPU(struct list_head
, vcpus_on_cpu
);
107 static struct page
*vmx_io_bitmap_a
;
108 static struct page
*vmx_io_bitmap_b
;
109 static struct page
*vmx_msr_bitmap
;
111 static DECLARE_BITMAP(vmx_vpid_bitmap
, VMX_NR_VPIDS
);
112 static DEFINE_SPINLOCK(vmx_vpid_lock
);
114 static struct vmcs_config
{
118 u32 pin_based_exec_ctrl
;
119 u32 cpu_based_exec_ctrl
;
120 u32 cpu_based_2nd_exec_ctrl
;
125 struct vmx_capability
{
130 #define VMX_SEGMENT_FIELD(seg) \
131 [VCPU_SREG_##seg] = { \
132 .selector = GUEST_##seg##_SELECTOR, \
133 .base = GUEST_##seg##_BASE, \
134 .limit = GUEST_##seg##_LIMIT, \
135 .ar_bytes = GUEST_##seg##_AR_BYTES, \
138 static struct kvm_vmx_segment_field
{
143 } kvm_vmx_segment_fields
[] = {
144 VMX_SEGMENT_FIELD(CS
),
145 VMX_SEGMENT_FIELD(DS
),
146 VMX_SEGMENT_FIELD(ES
),
147 VMX_SEGMENT_FIELD(FS
),
148 VMX_SEGMENT_FIELD(GS
),
149 VMX_SEGMENT_FIELD(SS
),
150 VMX_SEGMENT_FIELD(TR
),
151 VMX_SEGMENT_FIELD(LDTR
),
155 * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
156 * away by decrementing the array size.
158 static const u32 vmx_msr_index
[] = {
160 MSR_SYSCALL_MASK
, MSR_LSTAR
, MSR_CSTAR
, MSR_KERNEL_GS_BASE
,
162 MSR_EFER
, MSR_K6_STAR
,
164 #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
166 static void load_msrs(struct kvm_msr_entry
*e
, int n
)
170 for (i
= 0; i
< n
; ++i
)
171 wrmsrl(e
[i
].index
, e
[i
].data
);
174 static void save_msrs(struct kvm_msr_entry
*e
, int n
)
178 for (i
= 0; i
< n
; ++i
)
179 rdmsrl(e
[i
].index
, e
[i
].data
);
182 static inline int is_page_fault(u32 intr_info
)
184 return (intr_info
& (INTR_INFO_INTR_TYPE_MASK
| INTR_INFO_VECTOR_MASK
|
185 INTR_INFO_VALID_MASK
)) ==
186 (INTR_TYPE_EXCEPTION
| PF_VECTOR
| INTR_INFO_VALID_MASK
);
189 static inline int is_no_device(u32 intr_info
)
191 return (intr_info
& (INTR_INFO_INTR_TYPE_MASK
| INTR_INFO_VECTOR_MASK
|
192 INTR_INFO_VALID_MASK
)) ==
193 (INTR_TYPE_EXCEPTION
| NM_VECTOR
| INTR_INFO_VALID_MASK
);
196 static inline int is_invalid_opcode(u32 intr_info
)
198 return (intr_info
& (INTR_INFO_INTR_TYPE_MASK
| INTR_INFO_VECTOR_MASK
|
199 INTR_INFO_VALID_MASK
)) ==
200 (INTR_TYPE_EXCEPTION
| UD_VECTOR
| INTR_INFO_VALID_MASK
);
203 static inline int is_external_interrupt(u32 intr_info
)
205 return (intr_info
& (INTR_INFO_INTR_TYPE_MASK
| INTR_INFO_VALID_MASK
))
206 == (INTR_TYPE_EXT_INTR
| INTR_INFO_VALID_MASK
);
209 static inline int cpu_has_vmx_msr_bitmap(void)
211 return (vmcs_config
.cpu_based_exec_ctrl
& CPU_BASED_USE_MSR_BITMAPS
);
214 static inline int cpu_has_vmx_tpr_shadow(void)
216 return (vmcs_config
.cpu_based_exec_ctrl
& CPU_BASED_TPR_SHADOW
);
219 static inline int vm_need_tpr_shadow(struct kvm
*kvm
)
221 return ((cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm
)));
224 static inline int cpu_has_secondary_exec_ctrls(void)
226 return (vmcs_config
.cpu_based_exec_ctrl
&
227 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
);
230 static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
232 return flexpriority_enabled
233 && (vmcs_config
.cpu_based_2nd_exec_ctrl
&
234 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
);
237 static inline int cpu_has_vmx_invept_individual_addr(void)
239 return (!!(vmx_capability
.ept
& VMX_EPT_EXTENT_INDIVIDUAL_BIT
));
242 static inline int cpu_has_vmx_invept_context(void)
244 return (!!(vmx_capability
.ept
& VMX_EPT_EXTENT_CONTEXT_BIT
));
247 static inline int cpu_has_vmx_invept_global(void)
249 return (!!(vmx_capability
.ept
& VMX_EPT_EXTENT_GLOBAL_BIT
));
252 static inline int cpu_has_vmx_ept(void)
254 return (vmcs_config
.cpu_based_2nd_exec_ctrl
&
255 SECONDARY_EXEC_ENABLE_EPT
);
258 static inline int vm_need_ept(void)
260 return (cpu_has_vmx_ept() && enable_ept
);
263 static inline int vm_need_virtualize_apic_accesses(struct kvm
*kvm
)
265 return ((cpu_has_vmx_virtualize_apic_accesses()) &&
266 (irqchip_in_kernel(kvm
)));
269 static inline int cpu_has_vmx_vpid(void)
271 return (vmcs_config
.cpu_based_2nd_exec_ctrl
&
272 SECONDARY_EXEC_ENABLE_VPID
);
275 static inline int cpu_has_virtual_nmis(void)
277 return vmcs_config
.pin_based_exec_ctrl
& PIN_BASED_VIRTUAL_NMIS
;
280 static int __find_msr_index(struct vcpu_vmx
*vmx
, u32 msr
)
284 for (i
= 0; i
< vmx
->nmsrs
; ++i
)
285 if (vmx
->guest_msrs
[i
].index
== msr
)
290 static inline void __invvpid(int ext
, u16 vpid
, gva_t gva
)
296 } operand
= { vpid
, 0, gva
};
298 asm volatile (__ex(ASM_VMX_INVVPID
)
299 /* CF==1 or ZF==1 --> rc = -1 */
301 : : "a"(&operand
), "c"(ext
) : "cc", "memory");
304 static inline void __invept(int ext
, u64 eptp
, gpa_t gpa
)
308 } operand
= {eptp
, gpa
};
310 asm volatile (__ex(ASM_VMX_INVEPT
)
311 /* CF==1 or ZF==1 --> rc = -1 */
312 "; ja 1f ; ud2 ; 1:\n"
313 : : "a" (&operand
), "c" (ext
) : "cc", "memory");
316 static struct kvm_msr_entry
*find_msr_entry(struct vcpu_vmx
*vmx
, u32 msr
)
320 i
= __find_msr_index(vmx
, msr
);
322 return &vmx
->guest_msrs
[i
];
326 static void vmcs_clear(struct vmcs
*vmcs
)
328 u64 phys_addr
= __pa(vmcs
);
331 asm volatile (__ex(ASM_VMX_VMCLEAR_RAX
) "; setna %0"
332 : "=g"(error
) : "a"(&phys_addr
), "m"(phys_addr
)
335 printk(KERN_ERR
"kvm: vmclear fail: %p/%llx\n",
339 static void __vcpu_clear(void *arg
)
341 struct vcpu_vmx
*vmx
= arg
;
342 int cpu
= raw_smp_processor_id();
344 if (vmx
->vcpu
.cpu
== cpu
)
345 vmcs_clear(vmx
->vmcs
);
346 if (per_cpu(current_vmcs
, cpu
) == vmx
->vmcs
)
347 per_cpu(current_vmcs
, cpu
) = NULL
;
348 rdtscll(vmx
->vcpu
.arch
.host_tsc
);
349 list_del(&vmx
->local_vcpus_link
);
354 static void vcpu_clear(struct vcpu_vmx
*vmx
)
356 if (vmx
->vcpu
.cpu
== -1)
358 smp_call_function_single(vmx
->vcpu
.cpu
, __vcpu_clear
, vmx
, 1);
361 static inline void vpid_sync_vcpu_all(struct vcpu_vmx
*vmx
)
366 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT
, vmx
->vpid
, 0);
369 static inline void ept_sync_global(void)
371 if (cpu_has_vmx_invept_global())
372 __invept(VMX_EPT_EXTENT_GLOBAL
, 0, 0);
375 static inline void ept_sync_context(u64 eptp
)
378 if (cpu_has_vmx_invept_context())
379 __invept(VMX_EPT_EXTENT_CONTEXT
, eptp
, 0);
385 static inline void ept_sync_individual_addr(u64 eptp
, gpa_t gpa
)
388 if (cpu_has_vmx_invept_individual_addr())
389 __invept(VMX_EPT_EXTENT_INDIVIDUAL_ADDR
,
392 ept_sync_context(eptp
);
396 static unsigned long vmcs_readl(unsigned long field
)
400 asm volatile (__ex(ASM_VMX_VMREAD_RDX_RAX
)
401 : "=a"(value
) : "d"(field
) : "cc");
405 static u16
vmcs_read16(unsigned long field
)
407 return vmcs_readl(field
);
410 static u32
vmcs_read32(unsigned long field
)
412 return vmcs_readl(field
);
415 static u64
vmcs_read64(unsigned long field
)
418 return vmcs_readl(field
);
420 return vmcs_readl(field
) | ((u64
)vmcs_readl(field
+1) << 32);
424 static noinline
void vmwrite_error(unsigned long field
, unsigned long value
)
426 printk(KERN_ERR
"vmwrite error: reg %lx value %lx (err %d)\n",
427 field
, value
, vmcs_read32(VM_INSTRUCTION_ERROR
));
431 static void vmcs_writel(unsigned long field
, unsigned long value
)
435 asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX
) "; setna %0"
436 : "=q"(error
) : "a"(value
), "d"(field
) : "cc");
438 vmwrite_error(field
, value
);
441 static void vmcs_write16(unsigned long field
, u16 value
)
443 vmcs_writel(field
, value
);
446 static void vmcs_write32(unsigned long field
, u32 value
)
448 vmcs_writel(field
, value
);
451 static void vmcs_write64(unsigned long field
, u64 value
)
453 vmcs_writel(field
, value
);
454 #ifndef CONFIG_X86_64
456 vmcs_writel(field
+1, value
>> 32);
460 static void vmcs_clear_bits(unsigned long field
, u32 mask
)
462 vmcs_writel(field
, vmcs_readl(field
) & ~mask
);
465 static void vmcs_set_bits(unsigned long field
, u32 mask
)
467 vmcs_writel(field
, vmcs_readl(field
) | mask
);
470 static void update_exception_bitmap(struct kvm_vcpu
*vcpu
)
474 eb
= (1u << PF_VECTOR
) | (1u << UD_VECTOR
);
475 if (!vcpu
->fpu_active
)
476 eb
|= 1u << NM_VECTOR
;
477 if (vcpu
->guest_debug
.enabled
)
478 eb
|= 1u << DB_VECTOR
;
479 if (vcpu
->arch
.rmode
.active
)
482 eb
&= ~(1u << PF_VECTOR
); /* bypass_guest_pf = 0 */
483 vmcs_write32(EXCEPTION_BITMAP
, eb
);
486 static void reload_tss(void)
489 * VT restores TR but not its size. Useless.
491 struct descriptor_table gdt
;
492 struct desc_struct
*descs
;
495 descs
= (void *)gdt
.base
;
496 descs
[GDT_ENTRY_TSS
].type
= 9; /* available TSS */
500 static void load_transition_efer(struct vcpu_vmx
*vmx
)
502 int efer_offset
= vmx
->msr_offset_efer
;
503 u64 host_efer
= vmx
->host_msrs
[efer_offset
].data
;
504 u64 guest_efer
= vmx
->guest_msrs
[efer_offset
].data
;
510 * NX is emulated; LMA and LME handled by hardware; SCE meaninless
513 ignore_bits
= EFER_NX
| EFER_SCE
;
515 ignore_bits
|= EFER_LMA
| EFER_LME
;
516 /* SCE is meaningful only in long mode on Intel */
517 if (guest_efer
& EFER_LMA
)
518 ignore_bits
&= ~(u64
)EFER_SCE
;
520 if ((guest_efer
& ~ignore_bits
) == (host_efer
& ~ignore_bits
))
523 vmx
->host_state
.guest_efer_loaded
= 1;
524 guest_efer
&= ~ignore_bits
;
525 guest_efer
|= host_efer
& ignore_bits
;
526 wrmsrl(MSR_EFER
, guest_efer
);
527 vmx
->vcpu
.stat
.efer_reload
++;
530 static void reload_host_efer(struct vcpu_vmx
*vmx
)
532 if (vmx
->host_state
.guest_efer_loaded
) {
533 vmx
->host_state
.guest_efer_loaded
= 0;
534 load_msrs(vmx
->host_msrs
+ vmx
->msr_offset_efer
, 1);
538 static void vmx_save_host_state(struct kvm_vcpu
*vcpu
)
540 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
542 if (vmx
->host_state
.loaded
)
545 vmx
->host_state
.loaded
= 1;
547 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
548 * allow segment selectors with cpl > 0 or ti == 1.
550 vmx
->host_state
.ldt_sel
= kvm_read_ldt();
551 vmx
->host_state
.gs_ldt_reload_needed
= vmx
->host_state
.ldt_sel
;
552 vmx
->host_state
.fs_sel
= kvm_read_fs();
553 if (!(vmx
->host_state
.fs_sel
& 7)) {
554 vmcs_write16(HOST_FS_SELECTOR
, vmx
->host_state
.fs_sel
);
555 vmx
->host_state
.fs_reload_needed
= 0;
557 vmcs_write16(HOST_FS_SELECTOR
, 0);
558 vmx
->host_state
.fs_reload_needed
= 1;
560 vmx
->host_state
.gs_sel
= kvm_read_gs();
561 if (!(vmx
->host_state
.gs_sel
& 7))
562 vmcs_write16(HOST_GS_SELECTOR
, vmx
->host_state
.gs_sel
);
564 vmcs_write16(HOST_GS_SELECTOR
, 0);
565 vmx
->host_state
.gs_ldt_reload_needed
= 1;
569 vmcs_writel(HOST_FS_BASE
, read_msr(MSR_FS_BASE
));
570 vmcs_writel(HOST_GS_BASE
, read_msr(MSR_GS_BASE
));
572 vmcs_writel(HOST_FS_BASE
, segment_base(vmx
->host_state
.fs_sel
));
573 vmcs_writel(HOST_GS_BASE
, segment_base(vmx
->host_state
.gs_sel
));
577 if (is_long_mode(&vmx
->vcpu
))
578 save_msrs(vmx
->host_msrs
+
579 vmx
->msr_offset_kernel_gs_base
, 1);
582 load_msrs(vmx
->guest_msrs
, vmx
->save_nmsrs
);
583 load_transition_efer(vmx
);
586 static void __vmx_load_host_state(struct vcpu_vmx
*vmx
)
590 if (!vmx
->host_state
.loaded
)
593 ++vmx
->vcpu
.stat
.host_state_reload
;
594 vmx
->host_state
.loaded
= 0;
595 if (vmx
->host_state
.fs_reload_needed
)
596 kvm_load_fs(vmx
->host_state
.fs_sel
);
597 if (vmx
->host_state
.gs_ldt_reload_needed
) {
598 kvm_load_ldt(vmx
->host_state
.ldt_sel
);
600 * If we have to reload gs, we must take care to
601 * preserve our gs base.
603 local_irq_save(flags
);
604 kvm_load_gs(vmx
->host_state
.gs_sel
);
606 wrmsrl(MSR_GS_BASE
, vmcs_readl(HOST_GS_BASE
));
608 local_irq_restore(flags
);
611 save_msrs(vmx
->guest_msrs
, vmx
->save_nmsrs
);
612 load_msrs(vmx
->host_msrs
, vmx
->save_nmsrs
);
613 reload_host_efer(vmx
);
616 static void vmx_load_host_state(struct vcpu_vmx
*vmx
)
619 __vmx_load_host_state(vmx
);
624 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
625 * vcpu mutex is already taken.
627 static void vmx_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
629 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
630 u64 phys_addr
= __pa(vmx
->vmcs
);
631 u64 tsc_this
, delta
, new_offset
;
633 if (vcpu
->cpu
!= cpu
) {
635 kvm_migrate_timers(vcpu
);
636 vpid_sync_vcpu_all(vmx
);
638 list_add(&vmx
->local_vcpus_link
,
639 &per_cpu(vcpus_on_cpu
, cpu
));
643 if (per_cpu(current_vmcs
, cpu
) != vmx
->vmcs
) {
646 per_cpu(current_vmcs
, cpu
) = vmx
->vmcs
;
647 asm volatile (__ex(ASM_VMX_VMPTRLD_RAX
) "; setna %0"
648 : "=g"(error
) : "a"(&phys_addr
), "m"(phys_addr
)
651 printk(KERN_ERR
"kvm: vmptrld %p/%llx fail\n",
652 vmx
->vmcs
, phys_addr
);
655 if (vcpu
->cpu
!= cpu
) {
656 struct descriptor_table dt
;
657 unsigned long sysenter_esp
;
661 * Linux uses per-cpu TSS and GDT, so set these when switching
664 vmcs_writel(HOST_TR_BASE
, kvm_read_tr_base()); /* 22.2.4 */
666 vmcs_writel(HOST_GDTR_BASE
, dt
.base
); /* 22.2.4 */
668 rdmsrl(MSR_IA32_SYSENTER_ESP
, sysenter_esp
);
669 vmcs_writel(HOST_IA32_SYSENTER_ESP
, sysenter_esp
); /* 22.2.3 */
672 * Make sure the time stamp counter is monotonous.
675 if (tsc_this
< vcpu
->arch
.host_tsc
) {
676 delta
= vcpu
->arch
.host_tsc
- tsc_this
;
677 new_offset
= vmcs_read64(TSC_OFFSET
) + delta
;
678 vmcs_write64(TSC_OFFSET
, new_offset
);
683 static void vmx_vcpu_put(struct kvm_vcpu
*vcpu
)
685 __vmx_load_host_state(to_vmx(vcpu
));
688 static void vmx_fpu_activate(struct kvm_vcpu
*vcpu
)
690 if (vcpu
->fpu_active
)
692 vcpu
->fpu_active
= 1;
693 vmcs_clear_bits(GUEST_CR0
, X86_CR0_TS
);
694 if (vcpu
->arch
.cr0
& X86_CR0_TS
)
695 vmcs_set_bits(GUEST_CR0
, X86_CR0_TS
);
696 update_exception_bitmap(vcpu
);
699 static void vmx_fpu_deactivate(struct kvm_vcpu
*vcpu
)
701 if (!vcpu
->fpu_active
)
703 vcpu
->fpu_active
= 0;
704 vmcs_set_bits(GUEST_CR0
, X86_CR0_TS
);
705 update_exception_bitmap(vcpu
);
708 static unsigned long vmx_get_rflags(struct kvm_vcpu
*vcpu
)
710 return vmcs_readl(GUEST_RFLAGS
);
713 static void vmx_set_rflags(struct kvm_vcpu
*vcpu
, unsigned long rflags
)
715 if (vcpu
->arch
.rmode
.active
)
716 rflags
|= X86_EFLAGS_IOPL
| X86_EFLAGS_VM
;
717 vmcs_writel(GUEST_RFLAGS
, rflags
);
720 static void skip_emulated_instruction(struct kvm_vcpu
*vcpu
)
723 u32 interruptibility
;
725 rip
= kvm_rip_read(vcpu
);
726 rip
+= vmcs_read32(VM_EXIT_INSTRUCTION_LEN
);
727 kvm_rip_write(vcpu
, rip
);
730 * We emulated an instruction, so temporary interrupt blocking
731 * should be removed, if set.
733 interruptibility
= vmcs_read32(GUEST_INTERRUPTIBILITY_INFO
);
734 if (interruptibility
& 3)
735 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO
,
736 interruptibility
& ~3);
737 vcpu
->arch
.interrupt_window_open
= 1;
740 static void vmx_queue_exception(struct kvm_vcpu
*vcpu
, unsigned nr
,
741 bool has_error_code
, u32 error_code
)
743 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
746 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE
, error_code
);
748 if (vcpu
->arch
.rmode
.active
) {
749 vmx
->rmode
.irq
.pending
= true;
750 vmx
->rmode
.irq
.vector
= nr
;
751 vmx
->rmode
.irq
.rip
= kvm_rip_read(vcpu
);
753 vmx
->rmode
.irq
.rip
++;
754 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD
,
755 nr
| INTR_TYPE_SOFT_INTR
756 | (has_error_code
? INTR_INFO_DELIVER_CODE_MASK
: 0)
757 | INTR_INFO_VALID_MASK
);
758 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN
, 1);
759 kvm_rip_write(vcpu
, vmx
->rmode
.irq
.rip
- 1);
763 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD
,
764 nr
| INTR_TYPE_EXCEPTION
765 | (has_error_code
? INTR_INFO_DELIVER_CODE_MASK
: 0)
766 | INTR_INFO_VALID_MASK
);
769 static bool vmx_exception_injected(struct kvm_vcpu
*vcpu
)
775 * Swap MSR entry in host/guest MSR entry array.
778 static void move_msr_up(struct vcpu_vmx
*vmx
, int from
, int to
)
780 struct kvm_msr_entry tmp
;
782 tmp
= vmx
->guest_msrs
[to
];
783 vmx
->guest_msrs
[to
] = vmx
->guest_msrs
[from
];
784 vmx
->guest_msrs
[from
] = tmp
;
785 tmp
= vmx
->host_msrs
[to
];
786 vmx
->host_msrs
[to
] = vmx
->host_msrs
[from
];
787 vmx
->host_msrs
[from
] = tmp
;
792 * Set up the vmcs to automatically save and restore system
793 * msrs. Don't touch the 64-bit msrs if the guest is in legacy
794 * mode, as fiddling with msrs is very expensive.
796 static void setup_msrs(struct vcpu_vmx
*vmx
)
800 vmx_load_host_state(vmx
);
803 if (is_long_mode(&vmx
->vcpu
)) {
806 index
= __find_msr_index(vmx
, MSR_SYSCALL_MASK
);
808 move_msr_up(vmx
, index
, save_nmsrs
++);
809 index
= __find_msr_index(vmx
, MSR_LSTAR
);
811 move_msr_up(vmx
, index
, save_nmsrs
++);
812 index
= __find_msr_index(vmx
, MSR_CSTAR
);
814 move_msr_up(vmx
, index
, save_nmsrs
++);
815 index
= __find_msr_index(vmx
, MSR_KERNEL_GS_BASE
);
817 move_msr_up(vmx
, index
, save_nmsrs
++);
819 * MSR_K6_STAR is only needed on long mode guests, and only
820 * if efer.sce is enabled.
822 index
= __find_msr_index(vmx
, MSR_K6_STAR
);
823 if ((index
>= 0) && (vmx
->vcpu
.arch
.shadow_efer
& EFER_SCE
))
824 move_msr_up(vmx
, index
, save_nmsrs
++);
827 vmx
->save_nmsrs
= save_nmsrs
;
830 vmx
->msr_offset_kernel_gs_base
=
831 __find_msr_index(vmx
, MSR_KERNEL_GS_BASE
);
833 vmx
->msr_offset_efer
= __find_msr_index(vmx
, MSR_EFER
);
837 * reads and returns guest's timestamp counter "register"
838 * guest_tsc = host_tsc + tsc_offset -- 21.3
840 static u64
guest_read_tsc(void)
842 u64 host_tsc
, tsc_offset
;
845 tsc_offset
= vmcs_read64(TSC_OFFSET
);
846 return host_tsc
+ tsc_offset
;
850 * writes 'guest_tsc' into guest's timestamp counter "register"
851 * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
853 static void guest_write_tsc(u64 guest_tsc
)
858 vmcs_write64(TSC_OFFSET
, guest_tsc
- host_tsc
);
862 * Reads an msr value (of 'msr_index') into 'pdata'.
863 * Returns 0 on success, non-0 otherwise.
864 * Assumes vcpu_load() was already called.
866 static int vmx_get_msr(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64
*pdata
)
869 struct kvm_msr_entry
*msr
;
872 printk(KERN_ERR
"BUG: get_msr called with NULL pdata\n");
879 data
= vmcs_readl(GUEST_FS_BASE
);
882 data
= vmcs_readl(GUEST_GS_BASE
);
885 return kvm_get_msr_common(vcpu
, msr_index
, pdata
);
887 case MSR_IA32_TIME_STAMP_COUNTER
:
888 data
= guest_read_tsc();
890 case MSR_IA32_SYSENTER_CS
:
891 data
= vmcs_read32(GUEST_SYSENTER_CS
);
893 case MSR_IA32_SYSENTER_EIP
:
894 data
= vmcs_readl(GUEST_SYSENTER_EIP
);
896 case MSR_IA32_SYSENTER_ESP
:
897 data
= vmcs_readl(GUEST_SYSENTER_ESP
);
900 msr
= find_msr_entry(to_vmx(vcpu
), msr_index
);
905 return kvm_get_msr_common(vcpu
, msr_index
, pdata
);
913 * Writes msr value into into the appropriate "register".
914 * Returns 0 on success, non-0 otherwise.
915 * Assumes vcpu_load() was already called.
917 static int vmx_set_msr(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64 data
)
919 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
920 struct kvm_msr_entry
*msr
;
926 vmx_load_host_state(vmx
);
927 ret
= kvm_set_msr_common(vcpu
, msr_index
, data
);
930 vmcs_writel(GUEST_FS_BASE
, data
);
933 vmcs_writel(GUEST_GS_BASE
, data
);
936 case MSR_IA32_SYSENTER_CS
:
937 vmcs_write32(GUEST_SYSENTER_CS
, data
);
939 case MSR_IA32_SYSENTER_EIP
:
940 vmcs_writel(GUEST_SYSENTER_EIP
, data
);
942 case MSR_IA32_SYSENTER_ESP
:
943 vmcs_writel(GUEST_SYSENTER_ESP
, data
);
945 case MSR_IA32_TIME_STAMP_COUNTER
:
946 guest_write_tsc(data
);
948 case MSR_P6_PERFCTR0
:
949 case MSR_P6_PERFCTR1
:
950 case MSR_P6_EVNTSEL0
:
951 case MSR_P6_EVNTSEL1
:
953 * Just discard all writes to the performance counters; this
954 * should keep both older linux and windows 64-bit guests
957 pr_unimpl(vcpu
, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", msr_index
, data
);
961 vmx_load_host_state(vmx
);
962 msr
= find_msr_entry(vmx
, msr_index
);
967 ret
= kvm_set_msr_common(vcpu
, msr_index
, data
);
973 static void vmx_cache_reg(struct kvm_vcpu
*vcpu
, enum kvm_reg reg
)
975 __set_bit(reg
, (unsigned long *)&vcpu
->arch
.regs_avail
);
978 vcpu
->arch
.regs
[VCPU_REGS_RSP
] = vmcs_readl(GUEST_RSP
);
981 vcpu
->arch
.regs
[VCPU_REGS_RIP
] = vmcs_readl(GUEST_RIP
);
988 static int set_guest_debug(struct kvm_vcpu
*vcpu
, struct kvm_debug_guest
*dbg
)
990 unsigned long dr7
= 0x400;
993 old_singlestep
= vcpu
->guest_debug
.singlestep
;
995 vcpu
->guest_debug
.enabled
= dbg
->enabled
;
996 if (vcpu
->guest_debug
.enabled
) {
999 dr7
|= 0x200; /* exact */
1000 for (i
= 0; i
< 4; ++i
) {
1001 if (!dbg
->breakpoints
[i
].enabled
)
1003 vcpu
->guest_debug
.bp
[i
] = dbg
->breakpoints
[i
].address
;
1004 dr7
|= 2 << (i
*2); /* global enable */
1005 dr7
|= 0 << (i
*4+16); /* execution breakpoint */
1008 vcpu
->guest_debug
.singlestep
= dbg
->singlestep
;
1010 vcpu
->guest_debug
.singlestep
= 0;
1012 if (old_singlestep
&& !vcpu
->guest_debug
.singlestep
) {
1013 unsigned long flags
;
1015 flags
= vmcs_readl(GUEST_RFLAGS
);
1016 flags
&= ~(X86_EFLAGS_TF
| X86_EFLAGS_RF
);
1017 vmcs_writel(GUEST_RFLAGS
, flags
);
1020 update_exception_bitmap(vcpu
);
1021 vmcs_writel(GUEST_DR7
, dr7
);
1026 static int vmx_get_irq(struct kvm_vcpu
*vcpu
)
1028 if (!vcpu
->arch
.interrupt
.pending
)
1030 return vcpu
->arch
.interrupt
.nr
;
1033 static __init
int cpu_has_kvm_support(void)
1035 unsigned long ecx
= cpuid_ecx(1);
1036 return test_bit(5, &ecx
); /* CPUID.1:ECX.VMX[bit 5] -> VT */
1039 static __init
int vmx_disabled_by_bios(void)
1043 rdmsrl(MSR_IA32_FEATURE_CONTROL
, msr
);
1044 return (msr
& (FEATURE_CONTROL_LOCKED
|
1045 FEATURE_CONTROL_VMXON_ENABLED
))
1046 == FEATURE_CONTROL_LOCKED
;
1047 /* locked but not enabled */
1050 static void hardware_enable(void *garbage
)
1052 int cpu
= raw_smp_processor_id();
1053 u64 phys_addr
= __pa(per_cpu(vmxarea
, cpu
));
1056 INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu
, cpu
));
1057 rdmsrl(MSR_IA32_FEATURE_CONTROL
, old
);
1058 if ((old
& (FEATURE_CONTROL_LOCKED
|
1059 FEATURE_CONTROL_VMXON_ENABLED
))
1060 != (FEATURE_CONTROL_LOCKED
|
1061 FEATURE_CONTROL_VMXON_ENABLED
))
1062 /* enable and lock */
1063 wrmsrl(MSR_IA32_FEATURE_CONTROL
, old
|
1064 FEATURE_CONTROL_LOCKED
|
1065 FEATURE_CONTROL_VMXON_ENABLED
);
1066 write_cr4(read_cr4() | X86_CR4_VMXE
); /* FIXME: not cpu hotplug safe */
1067 asm volatile (ASM_VMX_VMXON_RAX
1068 : : "a"(&phys_addr
), "m"(phys_addr
)
1072 static void vmclear_local_vcpus(void)
1074 int cpu
= raw_smp_processor_id();
1075 struct vcpu_vmx
*vmx
, *n
;
1077 list_for_each_entry_safe(vmx
, n
, &per_cpu(vcpus_on_cpu
, cpu
),
1082 static void hardware_disable(void *garbage
)
1084 vmclear_local_vcpus();
1085 asm volatile (__ex(ASM_VMX_VMXOFF
) : : : "cc");
1086 write_cr4(read_cr4() & ~X86_CR4_VMXE
);
1089 static __init
int adjust_vmx_controls(u32 ctl_min
, u32 ctl_opt
,
1090 u32 msr
, u32
*result
)
1092 u32 vmx_msr_low
, vmx_msr_high
;
1093 u32 ctl
= ctl_min
| ctl_opt
;
1095 rdmsr(msr
, vmx_msr_low
, vmx_msr_high
);
1097 ctl
&= vmx_msr_high
; /* bit == 0 in high word ==> must be zero */
1098 ctl
|= vmx_msr_low
; /* bit == 1 in low word ==> must be one */
1100 /* Ensure minimum (required) set of control bits are supported. */
1108 static __init
int setup_vmcs_config(struct vmcs_config
*vmcs_conf
)
1110 u32 vmx_msr_low
, vmx_msr_high
;
1111 u32 min
, opt
, min2
, opt2
;
1112 u32 _pin_based_exec_control
= 0;
1113 u32 _cpu_based_exec_control
= 0;
1114 u32 _cpu_based_2nd_exec_control
= 0;
1115 u32 _vmexit_control
= 0;
1116 u32 _vmentry_control
= 0;
1118 min
= PIN_BASED_EXT_INTR_MASK
| PIN_BASED_NMI_EXITING
;
1119 opt
= PIN_BASED_VIRTUAL_NMIS
;
1120 if (adjust_vmx_controls(min
, opt
, MSR_IA32_VMX_PINBASED_CTLS
,
1121 &_pin_based_exec_control
) < 0)
1124 min
= CPU_BASED_HLT_EXITING
|
1125 #ifdef CONFIG_X86_64
1126 CPU_BASED_CR8_LOAD_EXITING
|
1127 CPU_BASED_CR8_STORE_EXITING
|
1129 CPU_BASED_CR3_LOAD_EXITING
|
1130 CPU_BASED_CR3_STORE_EXITING
|
1131 CPU_BASED_USE_IO_BITMAPS
|
1132 CPU_BASED_MOV_DR_EXITING
|
1133 CPU_BASED_USE_TSC_OFFSETING
|
1134 CPU_BASED_INVLPG_EXITING
;
1135 opt
= CPU_BASED_TPR_SHADOW
|
1136 CPU_BASED_USE_MSR_BITMAPS
|
1137 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
;
1138 if (adjust_vmx_controls(min
, opt
, MSR_IA32_VMX_PROCBASED_CTLS
,
1139 &_cpu_based_exec_control
) < 0)
1141 #ifdef CONFIG_X86_64
1142 if ((_cpu_based_exec_control
& CPU_BASED_TPR_SHADOW
))
1143 _cpu_based_exec_control
&= ~CPU_BASED_CR8_LOAD_EXITING
&
1144 ~CPU_BASED_CR8_STORE_EXITING
;
1146 if (_cpu_based_exec_control
& CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
) {
1148 opt2
= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
|
1149 SECONDARY_EXEC_WBINVD_EXITING
|
1150 SECONDARY_EXEC_ENABLE_VPID
|
1151 SECONDARY_EXEC_ENABLE_EPT
;
1152 if (adjust_vmx_controls(min2
, opt2
,
1153 MSR_IA32_VMX_PROCBASED_CTLS2
,
1154 &_cpu_based_2nd_exec_control
) < 0)
1157 #ifndef CONFIG_X86_64
1158 if (!(_cpu_based_2nd_exec_control
&
1159 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
))
1160 _cpu_based_exec_control
&= ~CPU_BASED_TPR_SHADOW
;
1162 if (_cpu_based_2nd_exec_control
& SECONDARY_EXEC_ENABLE_EPT
) {
1163 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
1165 min
&= ~(CPU_BASED_CR3_LOAD_EXITING
|
1166 CPU_BASED_CR3_STORE_EXITING
|
1167 CPU_BASED_INVLPG_EXITING
);
1168 if (adjust_vmx_controls(min
, opt
, MSR_IA32_VMX_PROCBASED_CTLS
,
1169 &_cpu_based_exec_control
) < 0)
1171 rdmsr(MSR_IA32_VMX_EPT_VPID_CAP
,
1172 vmx_capability
.ept
, vmx_capability
.vpid
);
1176 #ifdef CONFIG_X86_64
1177 min
|= VM_EXIT_HOST_ADDR_SPACE_SIZE
;
1180 if (adjust_vmx_controls(min
, opt
, MSR_IA32_VMX_EXIT_CTLS
,
1181 &_vmexit_control
) < 0)
1185 if (adjust_vmx_controls(min
, opt
, MSR_IA32_VMX_ENTRY_CTLS
,
1186 &_vmentry_control
) < 0)
1189 rdmsr(MSR_IA32_VMX_BASIC
, vmx_msr_low
, vmx_msr_high
);
1191 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
1192 if ((vmx_msr_high
& 0x1fff) > PAGE_SIZE
)
1195 #ifdef CONFIG_X86_64
1196 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
1197 if (vmx_msr_high
& (1u<<16))
1201 /* Require Write-Back (WB) memory type for VMCS accesses. */
1202 if (((vmx_msr_high
>> 18) & 15) != 6)
1205 vmcs_conf
->size
= vmx_msr_high
& 0x1fff;
1206 vmcs_conf
->order
= get_order(vmcs_config
.size
);
1207 vmcs_conf
->revision_id
= vmx_msr_low
;
1209 vmcs_conf
->pin_based_exec_ctrl
= _pin_based_exec_control
;
1210 vmcs_conf
->cpu_based_exec_ctrl
= _cpu_based_exec_control
;
1211 vmcs_conf
->cpu_based_2nd_exec_ctrl
= _cpu_based_2nd_exec_control
;
1212 vmcs_conf
->vmexit_ctrl
= _vmexit_control
;
1213 vmcs_conf
->vmentry_ctrl
= _vmentry_control
;
1218 static struct vmcs
*alloc_vmcs_cpu(int cpu
)
1220 int node
= cpu_to_node(cpu
);
1224 pages
= alloc_pages_node(node
, GFP_KERNEL
, vmcs_config
.order
);
1227 vmcs
= page_address(pages
);
1228 memset(vmcs
, 0, vmcs_config
.size
);
1229 vmcs
->revision_id
= vmcs_config
.revision_id
; /* vmcs revision id */
1233 static struct vmcs
*alloc_vmcs(void)
1235 return alloc_vmcs_cpu(raw_smp_processor_id());
1238 static void free_vmcs(struct vmcs
*vmcs
)
1240 free_pages((unsigned long)vmcs
, vmcs_config
.order
);
1243 static void free_kvm_area(void)
1247 for_each_online_cpu(cpu
)
1248 free_vmcs(per_cpu(vmxarea
, cpu
));
1251 static __init
int alloc_kvm_area(void)
1255 for_each_online_cpu(cpu
) {
1258 vmcs
= alloc_vmcs_cpu(cpu
);
1264 per_cpu(vmxarea
, cpu
) = vmcs
;
1269 static __init
int hardware_setup(void)
1271 if (setup_vmcs_config(&vmcs_config
) < 0)
1274 if (boot_cpu_has(X86_FEATURE_NX
))
1275 kvm_enable_efer_bits(EFER_NX
);
1277 return alloc_kvm_area();
1280 static __exit
void hardware_unsetup(void)
1285 static void fix_pmode_dataseg(int seg
, struct kvm_save_segment
*save
)
1287 struct kvm_vmx_segment_field
*sf
= &kvm_vmx_segment_fields
[seg
];
1289 if (vmcs_readl(sf
->base
) == save
->base
&& (save
->base
& AR_S_MASK
)) {
1290 vmcs_write16(sf
->selector
, save
->selector
);
1291 vmcs_writel(sf
->base
, save
->base
);
1292 vmcs_write32(sf
->limit
, save
->limit
);
1293 vmcs_write32(sf
->ar_bytes
, save
->ar
);
1295 u32 dpl
= (vmcs_read16(sf
->selector
) & SELECTOR_RPL_MASK
)
1297 vmcs_write32(sf
->ar_bytes
, 0x93 | dpl
);
1301 static void enter_pmode(struct kvm_vcpu
*vcpu
)
1303 unsigned long flags
;
1304 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1306 vmx
->emulation_required
= 1;
1307 vcpu
->arch
.rmode
.active
= 0;
1309 vmcs_writel(GUEST_TR_BASE
, vcpu
->arch
.rmode
.tr
.base
);
1310 vmcs_write32(GUEST_TR_LIMIT
, vcpu
->arch
.rmode
.tr
.limit
);
1311 vmcs_write32(GUEST_TR_AR_BYTES
, vcpu
->arch
.rmode
.tr
.ar
);
1313 flags
= vmcs_readl(GUEST_RFLAGS
);
1314 flags
&= ~(X86_EFLAGS_IOPL
| X86_EFLAGS_VM
);
1315 flags
|= (vcpu
->arch
.rmode
.save_iopl
<< IOPL_SHIFT
);
1316 vmcs_writel(GUEST_RFLAGS
, flags
);
1318 vmcs_writel(GUEST_CR4
, (vmcs_readl(GUEST_CR4
) & ~X86_CR4_VME
) |
1319 (vmcs_readl(CR4_READ_SHADOW
) & X86_CR4_VME
));
1321 update_exception_bitmap(vcpu
);
1323 if (emulate_invalid_guest_state
)
1326 fix_pmode_dataseg(VCPU_SREG_ES
, &vcpu
->arch
.rmode
.es
);
1327 fix_pmode_dataseg(VCPU_SREG_DS
, &vcpu
->arch
.rmode
.ds
);
1328 fix_pmode_dataseg(VCPU_SREG_GS
, &vcpu
->arch
.rmode
.gs
);
1329 fix_pmode_dataseg(VCPU_SREG_FS
, &vcpu
->arch
.rmode
.fs
);
1331 vmcs_write16(GUEST_SS_SELECTOR
, 0);
1332 vmcs_write32(GUEST_SS_AR_BYTES
, 0x93);
1334 vmcs_write16(GUEST_CS_SELECTOR
,
1335 vmcs_read16(GUEST_CS_SELECTOR
) & ~SELECTOR_RPL_MASK
);
1336 vmcs_write32(GUEST_CS_AR_BYTES
, 0x9b);
1339 static gva_t
rmode_tss_base(struct kvm
*kvm
)
1341 if (!kvm
->arch
.tss_addr
) {
1342 gfn_t base_gfn
= kvm
->memslots
[0].base_gfn
+
1343 kvm
->memslots
[0].npages
- 3;
1344 return base_gfn
<< PAGE_SHIFT
;
1346 return kvm
->arch
.tss_addr
;
1349 static void fix_rmode_seg(int seg
, struct kvm_save_segment
*save
)
1351 struct kvm_vmx_segment_field
*sf
= &kvm_vmx_segment_fields
[seg
];
1353 save
->selector
= vmcs_read16(sf
->selector
);
1354 save
->base
= vmcs_readl(sf
->base
);
1355 save
->limit
= vmcs_read32(sf
->limit
);
1356 save
->ar
= vmcs_read32(sf
->ar_bytes
);
1357 vmcs_write16(sf
->selector
, save
->base
>> 4);
1358 vmcs_write32(sf
->base
, save
->base
& 0xfffff);
1359 vmcs_write32(sf
->limit
, 0xffff);
1360 vmcs_write32(sf
->ar_bytes
, 0xf3);
1363 static void enter_rmode(struct kvm_vcpu
*vcpu
)
1365 unsigned long flags
;
1366 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1368 vmx
->emulation_required
= 1;
1369 vcpu
->arch
.rmode
.active
= 1;
1371 vcpu
->arch
.rmode
.tr
.base
= vmcs_readl(GUEST_TR_BASE
);
1372 vmcs_writel(GUEST_TR_BASE
, rmode_tss_base(vcpu
->kvm
));
1374 vcpu
->arch
.rmode
.tr
.limit
= vmcs_read32(GUEST_TR_LIMIT
);
1375 vmcs_write32(GUEST_TR_LIMIT
, RMODE_TSS_SIZE
- 1);
1377 vcpu
->arch
.rmode
.tr
.ar
= vmcs_read32(GUEST_TR_AR_BYTES
);
1378 vmcs_write32(GUEST_TR_AR_BYTES
, 0x008b);
1380 flags
= vmcs_readl(GUEST_RFLAGS
);
1381 vcpu
->arch
.rmode
.save_iopl
1382 = (flags
& X86_EFLAGS_IOPL
) >> IOPL_SHIFT
;
1384 flags
|= X86_EFLAGS_IOPL
| X86_EFLAGS_VM
;
1386 vmcs_writel(GUEST_RFLAGS
, flags
);
1387 vmcs_writel(GUEST_CR4
, vmcs_readl(GUEST_CR4
) | X86_CR4_VME
);
1388 update_exception_bitmap(vcpu
);
1390 if (emulate_invalid_guest_state
)
1391 goto continue_rmode
;
1393 vmcs_write16(GUEST_SS_SELECTOR
, vmcs_readl(GUEST_SS_BASE
) >> 4);
1394 vmcs_write32(GUEST_SS_LIMIT
, 0xffff);
1395 vmcs_write32(GUEST_SS_AR_BYTES
, 0xf3);
1397 vmcs_write32(GUEST_CS_AR_BYTES
, 0xf3);
1398 vmcs_write32(GUEST_CS_LIMIT
, 0xffff);
1399 if (vmcs_readl(GUEST_CS_BASE
) == 0xffff0000)
1400 vmcs_writel(GUEST_CS_BASE
, 0xf0000);
1401 vmcs_write16(GUEST_CS_SELECTOR
, vmcs_readl(GUEST_CS_BASE
) >> 4);
1403 fix_rmode_seg(VCPU_SREG_ES
, &vcpu
->arch
.rmode
.es
);
1404 fix_rmode_seg(VCPU_SREG_DS
, &vcpu
->arch
.rmode
.ds
);
1405 fix_rmode_seg(VCPU_SREG_GS
, &vcpu
->arch
.rmode
.gs
);
1406 fix_rmode_seg(VCPU_SREG_FS
, &vcpu
->arch
.rmode
.fs
);
1409 kvm_mmu_reset_context(vcpu
);
1410 init_rmode(vcpu
->kvm
);
1413 #ifdef CONFIG_X86_64
1415 static void enter_lmode(struct kvm_vcpu
*vcpu
)
1419 guest_tr_ar
= vmcs_read32(GUEST_TR_AR_BYTES
);
1420 if ((guest_tr_ar
& AR_TYPE_MASK
) != AR_TYPE_BUSY_64_TSS
) {
1421 printk(KERN_DEBUG
"%s: tss fixup for long mode. \n",
1423 vmcs_write32(GUEST_TR_AR_BYTES
,
1424 (guest_tr_ar
& ~AR_TYPE_MASK
)
1425 | AR_TYPE_BUSY_64_TSS
);
1428 vcpu
->arch
.shadow_efer
|= EFER_LMA
;
1430 find_msr_entry(to_vmx(vcpu
), MSR_EFER
)->data
|= EFER_LMA
| EFER_LME
;
1431 vmcs_write32(VM_ENTRY_CONTROLS
,
1432 vmcs_read32(VM_ENTRY_CONTROLS
)
1433 | VM_ENTRY_IA32E_MODE
);
1436 static void exit_lmode(struct kvm_vcpu
*vcpu
)
1438 vcpu
->arch
.shadow_efer
&= ~EFER_LMA
;
1440 vmcs_write32(VM_ENTRY_CONTROLS
,
1441 vmcs_read32(VM_ENTRY_CONTROLS
)
1442 & ~VM_ENTRY_IA32E_MODE
);
1447 static void vmx_flush_tlb(struct kvm_vcpu
*vcpu
)
1449 vpid_sync_vcpu_all(to_vmx(vcpu
));
1451 ept_sync_context(construct_eptp(vcpu
->arch
.mmu
.root_hpa
));
1454 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu
*vcpu
)
1456 vcpu
->arch
.cr4
&= KVM_GUEST_CR4_MASK
;
1457 vcpu
->arch
.cr4
|= vmcs_readl(GUEST_CR4
) & ~KVM_GUEST_CR4_MASK
;
1460 static void ept_load_pdptrs(struct kvm_vcpu
*vcpu
)
1462 if (is_paging(vcpu
) && is_pae(vcpu
) && !is_long_mode(vcpu
)) {
1463 if (!load_pdptrs(vcpu
, vcpu
->arch
.cr3
)) {
1464 printk(KERN_ERR
"EPT: Fail to load pdptrs!\n");
1467 vmcs_write64(GUEST_PDPTR0
, vcpu
->arch
.pdptrs
[0]);
1468 vmcs_write64(GUEST_PDPTR1
, vcpu
->arch
.pdptrs
[1]);
1469 vmcs_write64(GUEST_PDPTR2
, vcpu
->arch
.pdptrs
[2]);
1470 vmcs_write64(GUEST_PDPTR3
, vcpu
->arch
.pdptrs
[3]);
1474 static void vmx_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
);
1476 static void ept_update_paging_mode_cr0(unsigned long *hw_cr0
,
1478 struct kvm_vcpu
*vcpu
)
1480 if (!(cr0
& X86_CR0_PG
)) {
1481 /* From paging/starting to nonpaging */
1482 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL
,
1483 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL
) |
1484 (CPU_BASED_CR3_LOAD_EXITING
|
1485 CPU_BASED_CR3_STORE_EXITING
));
1486 vcpu
->arch
.cr0
= cr0
;
1487 vmx_set_cr4(vcpu
, vcpu
->arch
.cr4
);
1488 *hw_cr0
|= X86_CR0_PE
| X86_CR0_PG
;
1489 *hw_cr0
&= ~X86_CR0_WP
;
1490 } else if (!is_paging(vcpu
)) {
1491 /* From nonpaging to paging */
1492 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL
,
1493 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL
) &
1494 ~(CPU_BASED_CR3_LOAD_EXITING
|
1495 CPU_BASED_CR3_STORE_EXITING
));
1496 vcpu
->arch
.cr0
= cr0
;
1497 vmx_set_cr4(vcpu
, vcpu
->arch
.cr4
);
1498 if (!(vcpu
->arch
.cr0
& X86_CR0_WP
))
1499 *hw_cr0
&= ~X86_CR0_WP
;
1503 static void ept_update_paging_mode_cr4(unsigned long *hw_cr4
,
1504 struct kvm_vcpu
*vcpu
)
1506 if (!is_paging(vcpu
)) {
1507 *hw_cr4
&= ~X86_CR4_PAE
;
1508 *hw_cr4
|= X86_CR4_PSE
;
1509 } else if (!(vcpu
->arch
.cr4
& X86_CR4_PAE
))
1510 *hw_cr4
&= ~X86_CR4_PAE
;
1513 static void vmx_set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
)
1515 unsigned long hw_cr0
= (cr0
& ~KVM_GUEST_CR0_MASK
) |
1516 KVM_VM_CR0_ALWAYS_ON
;
1518 vmx_fpu_deactivate(vcpu
);
1520 if (vcpu
->arch
.rmode
.active
&& (cr0
& X86_CR0_PE
))
1523 if (!vcpu
->arch
.rmode
.active
&& !(cr0
& X86_CR0_PE
))
1526 #ifdef CONFIG_X86_64
1527 if (vcpu
->arch
.shadow_efer
& EFER_LME
) {
1528 if (!is_paging(vcpu
) && (cr0
& X86_CR0_PG
))
1530 if (is_paging(vcpu
) && !(cr0
& X86_CR0_PG
))
1536 ept_update_paging_mode_cr0(&hw_cr0
, cr0
, vcpu
);
1538 vmcs_writel(CR0_READ_SHADOW
, cr0
);
1539 vmcs_writel(GUEST_CR0
, hw_cr0
);
1540 vcpu
->arch
.cr0
= cr0
;
1542 if (!(cr0
& X86_CR0_TS
) || !(cr0
& X86_CR0_PE
))
1543 vmx_fpu_activate(vcpu
);
1546 static u64
construct_eptp(unsigned long root_hpa
)
1550 /* TODO write the value reading from MSR */
1551 eptp
= VMX_EPT_DEFAULT_MT
|
1552 VMX_EPT_DEFAULT_GAW
<< VMX_EPT_GAW_EPTP_SHIFT
;
1553 eptp
|= (root_hpa
& PAGE_MASK
);
1558 static void vmx_set_cr3(struct kvm_vcpu
*vcpu
, unsigned long cr3
)
1560 unsigned long guest_cr3
;
1564 if (vm_need_ept()) {
1565 eptp
= construct_eptp(cr3
);
1566 vmcs_write64(EPT_POINTER
, eptp
);
1567 ept_sync_context(eptp
);
1568 ept_load_pdptrs(vcpu
);
1569 guest_cr3
= is_paging(vcpu
) ? vcpu
->arch
.cr3
:
1570 VMX_EPT_IDENTITY_PAGETABLE_ADDR
;
1573 vmx_flush_tlb(vcpu
);
1574 vmcs_writel(GUEST_CR3
, guest_cr3
);
1575 if (vcpu
->arch
.cr0
& X86_CR0_PE
)
1576 vmx_fpu_deactivate(vcpu
);
1579 static void vmx_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
)
1581 unsigned long hw_cr4
= cr4
| (vcpu
->arch
.rmode
.active
?
1582 KVM_RMODE_VM_CR4_ALWAYS_ON
: KVM_PMODE_VM_CR4_ALWAYS_ON
);
1584 vcpu
->arch
.cr4
= cr4
;
1586 ept_update_paging_mode_cr4(&hw_cr4
, vcpu
);
1588 vmcs_writel(CR4_READ_SHADOW
, cr4
);
1589 vmcs_writel(GUEST_CR4
, hw_cr4
);
1592 static void vmx_set_efer(struct kvm_vcpu
*vcpu
, u64 efer
)
1594 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1595 struct kvm_msr_entry
*msr
= find_msr_entry(vmx
, MSR_EFER
);
1597 vcpu
->arch
.shadow_efer
= efer
;
1600 if (efer
& EFER_LMA
) {
1601 vmcs_write32(VM_ENTRY_CONTROLS
,
1602 vmcs_read32(VM_ENTRY_CONTROLS
) |
1603 VM_ENTRY_IA32E_MODE
);
1607 vmcs_write32(VM_ENTRY_CONTROLS
,
1608 vmcs_read32(VM_ENTRY_CONTROLS
) &
1609 ~VM_ENTRY_IA32E_MODE
);
1611 msr
->data
= efer
& ~EFER_LME
;
1616 static u64
vmx_get_segment_base(struct kvm_vcpu
*vcpu
, int seg
)
1618 struct kvm_vmx_segment_field
*sf
= &kvm_vmx_segment_fields
[seg
];
1620 return vmcs_readl(sf
->base
);
1623 static void vmx_get_segment(struct kvm_vcpu
*vcpu
,
1624 struct kvm_segment
*var
, int seg
)
1626 struct kvm_vmx_segment_field
*sf
= &kvm_vmx_segment_fields
[seg
];
1629 var
->base
= vmcs_readl(sf
->base
);
1630 var
->limit
= vmcs_read32(sf
->limit
);
1631 var
->selector
= vmcs_read16(sf
->selector
);
1632 ar
= vmcs_read32(sf
->ar_bytes
);
1633 if (ar
& AR_UNUSABLE_MASK
)
1635 var
->type
= ar
& 15;
1636 var
->s
= (ar
>> 4) & 1;
1637 var
->dpl
= (ar
>> 5) & 3;
1638 var
->present
= (ar
>> 7) & 1;
1639 var
->avl
= (ar
>> 12) & 1;
1640 var
->l
= (ar
>> 13) & 1;
1641 var
->db
= (ar
>> 14) & 1;
1642 var
->g
= (ar
>> 15) & 1;
1643 var
->unusable
= (ar
>> 16) & 1;
1646 static int vmx_get_cpl(struct kvm_vcpu
*vcpu
)
1648 struct kvm_segment kvm_seg
;
1650 if (!(vcpu
->arch
.cr0
& X86_CR0_PE
)) /* if real mode */
1653 if (vmx_get_rflags(vcpu
) & X86_EFLAGS_VM
) /* if virtual 8086 */
1656 vmx_get_segment(vcpu
, &kvm_seg
, VCPU_SREG_CS
);
1657 return kvm_seg
.selector
& 3;
1660 static u32
vmx_segment_access_rights(struct kvm_segment
*var
)
1667 ar
= var
->type
& 15;
1668 ar
|= (var
->s
& 1) << 4;
1669 ar
|= (var
->dpl
& 3) << 5;
1670 ar
|= (var
->present
& 1) << 7;
1671 ar
|= (var
->avl
& 1) << 12;
1672 ar
|= (var
->l
& 1) << 13;
1673 ar
|= (var
->db
& 1) << 14;
1674 ar
|= (var
->g
& 1) << 15;
1676 if (ar
== 0) /* a 0 value means unusable */
1677 ar
= AR_UNUSABLE_MASK
;
1682 static void vmx_set_segment(struct kvm_vcpu
*vcpu
,
1683 struct kvm_segment
*var
, int seg
)
1685 struct kvm_vmx_segment_field
*sf
= &kvm_vmx_segment_fields
[seg
];
1688 if (vcpu
->arch
.rmode
.active
&& seg
== VCPU_SREG_TR
) {
1689 vcpu
->arch
.rmode
.tr
.selector
= var
->selector
;
1690 vcpu
->arch
.rmode
.tr
.base
= var
->base
;
1691 vcpu
->arch
.rmode
.tr
.limit
= var
->limit
;
1692 vcpu
->arch
.rmode
.tr
.ar
= vmx_segment_access_rights(var
);
1695 vmcs_writel(sf
->base
, var
->base
);
1696 vmcs_write32(sf
->limit
, var
->limit
);
1697 vmcs_write16(sf
->selector
, var
->selector
);
1698 if (vcpu
->arch
.rmode
.active
&& var
->s
) {
1700 * Hack real-mode segments into vm86 compatibility.
1702 if (var
->base
== 0xffff0000 && var
->selector
== 0xf000)
1703 vmcs_writel(sf
->base
, 0xf0000);
1706 ar
= vmx_segment_access_rights(var
);
1707 vmcs_write32(sf
->ar_bytes
, ar
);
1710 static void vmx_get_cs_db_l_bits(struct kvm_vcpu
*vcpu
, int *db
, int *l
)
1712 u32 ar
= vmcs_read32(GUEST_CS_AR_BYTES
);
1714 *db
= (ar
>> 14) & 1;
1715 *l
= (ar
>> 13) & 1;
1718 static void vmx_get_idt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
1720 dt
->limit
= vmcs_read32(GUEST_IDTR_LIMIT
);
1721 dt
->base
= vmcs_readl(GUEST_IDTR_BASE
);
1724 static void vmx_set_idt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
1726 vmcs_write32(GUEST_IDTR_LIMIT
, dt
->limit
);
1727 vmcs_writel(GUEST_IDTR_BASE
, dt
->base
);
1730 static void vmx_get_gdt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
1732 dt
->limit
= vmcs_read32(GUEST_GDTR_LIMIT
);
1733 dt
->base
= vmcs_readl(GUEST_GDTR_BASE
);
1736 static void vmx_set_gdt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
1738 vmcs_write32(GUEST_GDTR_LIMIT
, dt
->limit
);
1739 vmcs_writel(GUEST_GDTR_BASE
, dt
->base
);
1742 static bool rmode_segment_valid(struct kvm_vcpu
*vcpu
, int seg
)
1744 struct kvm_segment var
;
1747 vmx_get_segment(vcpu
, &var
, seg
);
1748 ar
= vmx_segment_access_rights(&var
);
1750 if (var
.base
!= (var
.selector
<< 4))
1752 if (var
.limit
!= 0xffff)
1760 static bool code_segment_valid(struct kvm_vcpu
*vcpu
)
1762 struct kvm_segment cs
;
1763 unsigned int cs_rpl
;
1765 vmx_get_segment(vcpu
, &cs
, VCPU_SREG_CS
);
1766 cs_rpl
= cs
.selector
& SELECTOR_RPL_MASK
;
1768 if (~cs
.type
& (AR_TYPE_CODE_MASK
|AR_TYPE_ACCESSES_MASK
))
1772 if (!(~cs
.type
& (AR_TYPE_CODE_MASK
|AR_TYPE_WRITEABLE_MASK
))) {
1773 if (cs
.dpl
> cs_rpl
)
1775 } else if (cs
.type
& AR_TYPE_CODE_MASK
) {
1776 if (cs
.dpl
!= cs_rpl
)
1782 /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
1786 static bool stack_segment_valid(struct kvm_vcpu
*vcpu
)
1788 struct kvm_segment ss
;
1789 unsigned int ss_rpl
;
1791 vmx_get_segment(vcpu
, &ss
, VCPU_SREG_SS
);
1792 ss_rpl
= ss
.selector
& SELECTOR_RPL_MASK
;
1794 if ((ss
.type
!= 3) || (ss
.type
!= 7))
1798 if (ss
.dpl
!= ss_rpl
) /* DPL != RPL */
1806 static bool data_segment_valid(struct kvm_vcpu
*vcpu
, int seg
)
1808 struct kvm_segment var
;
1811 vmx_get_segment(vcpu
, &var
, seg
);
1812 rpl
= var
.selector
& SELECTOR_RPL_MASK
;
1818 if (~var
.type
& (AR_TYPE_CODE_MASK
|AR_TYPE_WRITEABLE_MASK
)) {
1819 if (var
.dpl
< rpl
) /* DPL < RPL */
1823 /* TODO: Add other members to kvm_segment_field to allow checking for other access
1829 static bool tr_valid(struct kvm_vcpu
*vcpu
)
1831 struct kvm_segment tr
;
1833 vmx_get_segment(vcpu
, &tr
, VCPU_SREG_TR
);
1835 if (tr
.selector
& SELECTOR_TI_MASK
) /* TI = 1 */
1837 if ((tr
.type
!= 3) || (tr
.type
!= 11)) /* TODO: Check if guest is in IA32e mode */
1845 static bool ldtr_valid(struct kvm_vcpu
*vcpu
)
1847 struct kvm_segment ldtr
;
1849 vmx_get_segment(vcpu
, &ldtr
, VCPU_SREG_LDTR
);
1851 if (ldtr
.selector
& SELECTOR_TI_MASK
) /* TI = 1 */
1861 static bool cs_ss_rpl_check(struct kvm_vcpu
*vcpu
)
1863 struct kvm_segment cs
, ss
;
1865 vmx_get_segment(vcpu
, &cs
, VCPU_SREG_CS
);
1866 vmx_get_segment(vcpu
, &ss
, VCPU_SREG_SS
);
1868 return ((cs
.selector
& SELECTOR_RPL_MASK
) ==
1869 (ss
.selector
& SELECTOR_RPL_MASK
));
1873 * Check if guest state is valid. Returns true if valid, false if
1875 * We assume that registers are always usable
1877 static bool guest_state_valid(struct kvm_vcpu
*vcpu
)
1879 /* real mode guest state checks */
1880 if (!(vcpu
->arch
.cr0
& X86_CR0_PE
)) {
1881 if (!rmode_segment_valid(vcpu
, VCPU_SREG_CS
))
1883 if (!rmode_segment_valid(vcpu
, VCPU_SREG_SS
))
1885 if (!rmode_segment_valid(vcpu
, VCPU_SREG_DS
))
1887 if (!rmode_segment_valid(vcpu
, VCPU_SREG_ES
))
1889 if (!rmode_segment_valid(vcpu
, VCPU_SREG_FS
))
1891 if (!rmode_segment_valid(vcpu
, VCPU_SREG_GS
))
1894 /* protected mode guest state checks */
1895 if (!cs_ss_rpl_check(vcpu
))
1897 if (!code_segment_valid(vcpu
))
1899 if (!stack_segment_valid(vcpu
))
1901 if (!data_segment_valid(vcpu
, VCPU_SREG_DS
))
1903 if (!data_segment_valid(vcpu
, VCPU_SREG_ES
))
1905 if (!data_segment_valid(vcpu
, VCPU_SREG_FS
))
1907 if (!data_segment_valid(vcpu
, VCPU_SREG_GS
))
1909 if (!tr_valid(vcpu
))
1911 if (!ldtr_valid(vcpu
))
1915 * - Add checks on RIP
1916 * - Add checks on RFLAGS
1922 static int init_rmode_tss(struct kvm
*kvm
)
1924 gfn_t fn
= rmode_tss_base(kvm
) >> PAGE_SHIFT
;
1929 r
= kvm_clear_guest_page(kvm
, fn
, 0, PAGE_SIZE
);
1932 data
= TSS_BASE_SIZE
+ TSS_REDIRECTION_SIZE
;
1933 r
= kvm_write_guest_page(kvm
, fn
++, &data
,
1934 TSS_IOPB_BASE_OFFSET
, sizeof(u16
));
1937 r
= kvm_clear_guest_page(kvm
, fn
++, 0, PAGE_SIZE
);
1940 r
= kvm_clear_guest_page(kvm
, fn
, 0, PAGE_SIZE
);
1944 r
= kvm_write_guest_page(kvm
, fn
, &data
,
1945 RMODE_TSS_SIZE
- 2 * PAGE_SIZE
- 1,
1955 static int init_rmode_identity_map(struct kvm
*kvm
)
1958 pfn_t identity_map_pfn
;
1963 if (unlikely(!kvm
->arch
.ept_identity_pagetable
)) {
1964 printk(KERN_ERR
"EPT: identity-mapping pagetable "
1965 "haven't been allocated!\n");
1968 if (likely(kvm
->arch
.ept_identity_pagetable_done
))
1971 identity_map_pfn
= VMX_EPT_IDENTITY_PAGETABLE_ADDR
>> PAGE_SHIFT
;
1972 r
= kvm_clear_guest_page(kvm
, identity_map_pfn
, 0, PAGE_SIZE
);
1975 /* Set up identity-mapping pagetable for EPT in real mode */
1976 for (i
= 0; i
< PT32_ENT_PER_PAGE
; i
++) {
1977 tmp
= (i
<< 22) + (_PAGE_PRESENT
| _PAGE_RW
| _PAGE_USER
|
1978 _PAGE_ACCESSED
| _PAGE_DIRTY
| _PAGE_PSE
);
1979 r
= kvm_write_guest_page(kvm
, identity_map_pfn
,
1980 &tmp
, i
* sizeof(tmp
), sizeof(tmp
));
1984 kvm
->arch
.ept_identity_pagetable_done
= true;
1990 static void seg_setup(int seg
)
1992 struct kvm_vmx_segment_field
*sf
= &kvm_vmx_segment_fields
[seg
];
1994 vmcs_write16(sf
->selector
, 0);
1995 vmcs_writel(sf
->base
, 0);
1996 vmcs_write32(sf
->limit
, 0xffff);
1997 vmcs_write32(sf
->ar_bytes
, 0xf3);
2000 static int alloc_apic_access_page(struct kvm
*kvm
)
2002 struct kvm_userspace_memory_region kvm_userspace_mem
;
2005 down_write(&kvm
->slots_lock
);
2006 if (kvm
->arch
.apic_access_page
)
2008 kvm_userspace_mem
.slot
= APIC_ACCESS_PAGE_PRIVATE_MEMSLOT
;
2009 kvm_userspace_mem
.flags
= 0;
2010 kvm_userspace_mem
.guest_phys_addr
= 0xfee00000ULL
;
2011 kvm_userspace_mem
.memory_size
= PAGE_SIZE
;
2012 r
= __kvm_set_memory_region(kvm
, &kvm_userspace_mem
, 0);
2016 kvm
->arch
.apic_access_page
= gfn_to_page(kvm
, 0xfee00);
2018 up_write(&kvm
->slots_lock
);
2022 static int alloc_identity_pagetable(struct kvm
*kvm
)
2024 struct kvm_userspace_memory_region kvm_userspace_mem
;
2027 down_write(&kvm
->slots_lock
);
2028 if (kvm
->arch
.ept_identity_pagetable
)
2030 kvm_userspace_mem
.slot
= IDENTITY_PAGETABLE_PRIVATE_MEMSLOT
;
2031 kvm_userspace_mem
.flags
= 0;
2032 kvm_userspace_mem
.guest_phys_addr
= VMX_EPT_IDENTITY_PAGETABLE_ADDR
;
2033 kvm_userspace_mem
.memory_size
= PAGE_SIZE
;
2034 r
= __kvm_set_memory_region(kvm
, &kvm_userspace_mem
, 0);
2038 kvm
->arch
.ept_identity_pagetable
= gfn_to_page(kvm
,
2039 VMX_EPT_IDENTITY_PAGETABLE_ADDR
>> PAGE_SHIFT
);
2041 up_write(&kvm
->slots_lock
);
2045 static void allocate_vpid(struct vcpu_vmx
*vmx
)
2050 if (!enable_vpid
|| !cpu_has_vmx_vpid())
2052 spin_lock(&vmx_vpid_lock
);
2053 vpid
= find_first_zero_bit(vmx_vpid_bitmap
, VMX_NR_VPIDS
);
2054 if (vpid
< VMX_NR_VPIDS
) {
2056 __set_bit(vpid
, vmx_vpid_bitmap
);
2058 spin_unlock(&vmx_vpid_lock
);
2061 static void vmx_disable_intercept_for_msr(struct page
*msr_bitmap
, u32 msr
)
2065 if (!cpu_has_vmx_msr_bitmap())
2069 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
2070 * have the write-low and read-high bitmap offsets the wrong way round.
2071 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
2073 va
= kmap(msr_bitmap
);
2074 if (msr
<= 0x1fff) {
2075 __clear_bit(msr
, va
+ 0x000); /* read-low */
2076 __clear_bit(msr
, va
+ 0x800); /* write-low */
2077 } else if ((msr
>= 0xc0000000) && (msr
<= 0xc0001fff)) {
2079 __clear_bit(msr
, va
+ 0x400); /* read-high */
2080 __clear_bit(msr
, va
+ 0xc00); /* write-high */
2086 * Sets up the vmcs for emulated real mode.
2088 static int vmx_vcpu_setup(struct vcpu_vmx
*vmx
)
2090 u32 host_sysenter_cs
;
2093 struct descriptor_table dt
;
2095 unsigned long kvm_vmx_return
;
2099 vmcs_write64(IO_BITMAP_A
, page_to_phys(vmx_io_bitmap_a
));
2100 vmcs_write64(IO_BITMAP_B
, page_to_phys(vmx_io_bitmap_b
));
2102 if (cpu_has_vmx_msr_bitmap())
2103 vmcs_write64(MSR_BITMAP
, page_to_phys(vmx_msr_bitmap
));
2105 vmcs_write64(VMCS_LINK_POINTER
, -1ull); /* 22.3.1.5 */
2108 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL
,
2109 vmcs_config
.pin_based_exec_ctrl
);
2111 exec_control
= vmcs_config
.cpu_based_exec_ctrl
;
2112 if (!vm_need_tpr_shadow(vmx
->vcpu
.kvm
)) {
2113 exec_control
&= ~CPU_BASED_TPR_SHADOW
;
2114 #ifdef CONFIG_X86_64
2115 exec_control
|= CPU_BASED_CR8_STORE_EXITING
|
2116 CPU_BASED_CR8_LOAD_EXITING
;
2120 exec_control
|= CPU_BASED_CR3_STORE_EXITING
|
2121 CPU_BASED_CR3_LOAD_EXITING
|
2122 CPU_BASED_INVLPG_EXITING
;
2123 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL
, exec_control
);
2125 if (cpu_has_secondary_exec_ctrls()) {
2126 exec_control
= vmcs_config
.cpu_based_2nd_exec_ctrl
;
2127 if (!vm_need_virtualize_apic_accesses(vmx
->vcpu
.kvm
))
2129 ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
;
2131 exec_control
&= ~SECONDARY_EXEC_ENABLE_VPID
;
2133 exec_control
&= ~SECONDARY_EXEC_ENABLE_EPT
;
2134 vmcs_write32(SECONDARY_VM_EXEC_CONTROL
, exec_control
);
2137 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK
, !!bypass_guest_pf
);
2138 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH
, !!bypass_guest_pf
);
2139 vmcs_write32(CR3_TARGET_COUNT
, 0); /* 22.2.1 */
2141 vmcs_writel(HOST_CR0
, read_cr0()); /* 22.2.3 */
2142 vmcs_writel(HOST_CR4
, read_cr4()); /* 22.2.3, 22.2.5 */
2143 vmcs_writel(HOST_CR3
, read_cr3()); /* 22.2.3 FIXME: shadow tables */
2145 vmcs_write16(HOST_CS_SELECTOR
, __KERNEL_CS
); /* 22.2.4 */
2146 vmcs_write16(HOST_DS_SELECTOR
, __KERNEL_DS
); /* 22.2.4 */
2147 vmcs_write16(HOST_ES_SELECTOR
, __KERNEL_DS
); /* 22.2.4 */
2148 vmcs_write16(HOST_FS_SELECTOR
, kvm_read_fs()); /* 22.2.4 */
2149 vmcs_write16(HOST_GS_SELECTOR
, kvm_read_gs()); /* 22.2.4 */
2150 vmcs_write16(HOST_SS_SELECTOR
, __KERNEL_DS
); /* 22.2.4 */
2151 #ifdef CONFIG_X86_64
2152 rdmsrl(MSR_FS_BASE
, a
);
2153 vmcs_writel(HOST_FS_BASE
, a
); /* 22.2.4 */
2154 rdmsrl(MSR_GS_BASE
, a
);
2155 vmcs_writel(HOST_GS_BASE
, a
); /* 22.2.4 */
2157 vmcs_writel(HOST_FS_BASE
, 0); /* 22.2.4 */
2158 vmcs_writel(HOST_GS_BASE
, 0); /* 22.2.4 */
2161 vmcs_write16(HOST_TR_SELECTOR
, GDT_ENTRY_TSS
*8); /* 22.2.4 */
2164 vmcs_writel(HOST_IDTR_BASE
, dt
.base
); /* 22.2.4 */
2166 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return
));
2167 vmcs_writel(HOST_RIP
, kvm_vmx_return
); /* 22.2.5 */
2168 vmcs_write32(VM_EXIT_MSR_STORE_COUNT
, 0);
2169 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT
, 0);
2170 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT
, 0);
2172 rdmsr(MSR_IA32_SYSENTER_CS
, host_sysenter_cs
, junk
);
2173 vmcs_write32(HOST_IA32_SYSENTER_CS
, host_sysenter_cs
);
2174 rdmsrl(MSR_IA32_SYSENTER_ESP
, a
);
2175 vmcs_writel(HOST_IA32_SYSENTER_ESP
, a
); /* 22.2.3 */
2176 rdmsrl(MSR_IA32_SYSENTER_EIP
, a
);
2177 vmcs_writel(HOST_IA32_SYSENTER_EIP
, a
); /* 22.2.3 */
2179 for (i
= 0; i
< NR_VMX_MSR
; ++i
) {
2180 u32 index
= vmx_msr_index
[i
];
2181 u32 data_low
, data_high
;
2185 if (rdmsr_safe(index
, &data_low
, &data_high
) < 0)
2187 if (wrmsr_safe(index
, data_low
, data_high
) < 0)
2189 data
= data_low
| ((u64
)data_high
<< 32);
2190 vmx
->host_msrs
[j
].index
= index
;
2191 vmx
->host_msrs
[j
].reserved
= 0;
2192 vmx
->host_msrs
[j
].data
= data
;
2193 vmx
->guest_msrs
[j
] = vmx
->host_msrs
[j
];
2197 vmcs_write32(VM_EXIT_CONTROLS
, vmcs_config
.vmexit_ctrl
);
2199 /* 22.2.1, 20.8.1 */
2200 vmcs_write32(VM_ENTRY_CONTROLS
, vmcs_config
.vmentry_ctrl
);
2202 vmcs_writel(CR0_GUEST_HOST_MASK
, ~0UL);
2203 vmcs_writel(CR4_GUEST_HOST_MASK
, KVM_GUEST_CR4_MASK
);
2209 static int init_rmode(struct kvm
*kvm
)
2211 if (!init_rmode_tss(kvm
))
2213 if (!init_rmode_identity_map(kvm
))
2218 static int vmx_vcpu_reset(struct kvm_vcpu
*vcpu
)
2220 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2224 vcpu
->arch
.regs_avail
= ~((1 << VCPU_REGS_RIP
) | (1 << VCPU_REGS_RSP
));
2225 down_read(&vcpu
->kvm
->slots_lock
);
2226 if (!init_rmode(vmx
->vcpu
.kvm
)) {
2231 vmx
->vcpu
.arch
.rmode
.active
= 0;
2233 vmx
->vcpu
.arch
.regs
[VCPU_REGS_RDX
] = get_rdx_init_val();
2234 kvm_set_cr8(&vmx
->vcpu
, 0);
2235 msr
= 0xfee00000 | MSR_IA32_APICBASE_ENABLE
;
2236 if (vmx
->vcpu
.vcpu_id
== 0)
2237 msr
|= MSR_IA32_APICBASE_BSP
;
2238 kvm_set_apic_base(&vmx
->vcpu
, msr
);
2240 fx_init(&vmx
->vcpu
);
2242 seg_setup(VCPU_SREG_CS
);
2244 * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
2245 * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh.
2247 if (vmx
->vcpu
.vcpu_id
== 0) {
2248 vmcs_write16(GUEST_CS_SELECTOR
, 0xf000);
2249 vmcs_writel(GUEST_CS_BASE
, 0x000f0000);
2251 vmcs_write16(GUEST_CS_SELECTOR
, vmx
->vcpu
.arch
.sipi_vector
<< 8);
2252 vmcs_writel(GUEST_CS_BASE
, vmx
->vcpu
.arch
.sipi_vector
<< 12);
2255 seg_setup(VCPU_SREG_DS
);
2256 seg_setup(VCPU_SREG_ES
);
2257 seg_setup(VCPU_SREG_FS
);
2258 seg_setup(VCPU_SREG_GS
);
2259 seg_setup(VCPU_SREG_SS
);
2261 vmcs_write16(GUEST_TR_SELECTOR
, 0);
2262 vmcs_writel(GUEST_TR_BASE
, 0);
2263 vmcs_write32(GUEST_TR_LIMIT
, 0xffff);
2264 vmcs_write32(GUEST_TR_AR_BYTES
, 0x008b);
2266 vmcs_write16(GUEST_LDTR_SELECTOR
, 0);
2267 vmcs_writel(GUEST_LDTR_BASE
, 0);
2268 vmcs_write32(GUEST_LDTR_LIMIT
, 0xffff);
2269 vmcs_write32(GUEST_LDTR_AR_BYTES
, 0x00082);
2271 vmcs_write32(GUEST_SYSENTER_CS
, 0);
2272 vmcs_writel(GUEST_SYSENTER_ESP
, 0);
2273 vmcs_writel(GUEST_SYSENTER_EIP
, 0);
2275 vmcs_writel(GUEST_RFLAGS
, 0x02);
2276 if (vmx
->vcpu
.vcpu_id
== 0)
2277 kvm_rip_write(vcpu
, 0xfff0);
2279 kvm_rip_write(vcpu
, 0);
2280 kvm_register_write(vcpu
, VCPU_REGS_RSP
, 0);
2282 /* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */
2283 vmcs_writel(GUEST_DR7
, 0x400);
2285 vmcs_writel(GUEST_GDTR_BASE
, 0);
2286 vmcs_write32(GUEST_GDTR_LIMIT
, 0xffff);
2288 vmcs_writel(GUEST_IDTR_BASE
, 0);
2289 vmcs_write32(GUEST_IDTR_LIMIT
, 0xffff);
2291 vmcs_write32(GUEST_ACTIVITY_STATE
, 0);
2292 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO
, 0);
2293 vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS
, 0);
2297 /* Special registers */
2298 vmcs_write64(GUEST_IA32_DEBUGCTL
, 0);
2302 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD
, 0); /* 22.2.1 */
2304 if (cpu_has_vmx_tpr_shadow()) {
2305 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR
, 0);
2306 if (vm_need_tpr_shadow(vmx
->vcpu
.kvm
))
2307 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR
,
2308 page_to_phys(vmx
->vcpu
.arch
.apic
->regs_page
));
2309 vmcs_write32(TPR_THRESHOLD
, 0);
2312 if (vm_need_virtualize_apic_accesses(vmx
->vcpu
.kvm
))
2313 vmcs_write64(APIC_ACCESS_ADDR
,
2314 page_to_phys(vmx
->vcpu
.kvm
->arch
.apic_access_page
));
2317 vmcs_write16(VIRTUAL_PROCESSOR_ID
, vmx
->vpid
);
2319 vmx
->vcpu
.arch
.cr0
= 0x60000010;
2320 vmx_set_cr0(&vmx
->vcpu
, vmx
->vcpu
.arch
.cr0
); /* enter rmode */
2321 vmx_set_cr4(&vmx
->vcpu
, 0);
2322 vmx_set_efer(&vmx
->vcpu
, 0);
2323 vmx_fpu_activate(&vmx
->vcpu
);
2324 update_exception_bitmap(&vmx
->vcpu
);
2326 vpid_sync_vcpu_all(vmx
);
2330 /* HACK: Don't enable emulation on guest boot/reset */
2331 vmx
->emulation_required
= 0;
2334 up_read(&vcpu
->kvm
->slots_lock
);
2338 static void vmx_inject_irq(struct kvm_vcpu
*vcpu
, int irq
)
2340 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2342 KVMTRACE_1D(INJ_VIRQ
, vcpu
, (u32
)irq
, handler
);
2344 ++vcpu
->stat
.irq_injections
;
2345 if (vcpu
->arch
.rmode
.active
) {
2346 vmx
->rmode
.irq
.pending
= true;
2347 vmx
->rmode
.irq
.vector
= irq
;
2348 vmx
->rmode
.irq
.rip
= kvm_rip_read(vcpu
);
2349 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD
,
2350 irq
| INTR_TYPE_SOFT_INTR
| INTR_INFO_VALID_MASK
);
2351 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN
, 1);
2352 kvm_rip_write(vcpu
, vmx
->rmode
.irq
.rip
- 1);
2355 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD
,
2356 irq
| INTR_TYPE_EXT_INTR
| INTR_INFO_VALID_MASK
);
2359 static void vmx_inject_nmi(struct kvm_vcpu
*vcpu
)
2361 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD
,
2362 INTR_TYPE_NMI_INTR
| INTR_INFO_VALID_MASK
| NMI_VECTOR
);
2365 static void kvm_do_inject_irq(struct kvm_vcpu
*vcpu
)
2367 int word_index
= __ffs(vcpu
->arch
.irq_summary
);
2368 int bit_index
= __ffs(vcpu
->arch
.irq_pending
[word_index
]);
2369 int irq
= word_index
* BITS_PER_LONG
+ bit_index
;
2371 clear_bit(bit_index
, &vcpu
->arch
.irq_pending
[word_index
]);
2372 if (!vcpu
->arch
.irq_pending
[word_index
])
2373 clear_bit(word_index
, &vcpu
->arch
.irq_summary
);
2374 kvm_queue_interrupt(vcpu
, irq
);
2378 static void do_interrupt_requests(struct kvm_vcpu
*vcpu
,
2379 struct kvm_run
*kvm_run
)
2381 u32 cpu_based_vm_exec_control
;
2383 vcpu
->arch
.interrupt_window_open
=
2384 ((vmcs_readl(GUEST_RFLAGS
) & X86_EFLAGS_IF
) &&
2385 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO
) & 3) == 0);
2387 if (vcpu
->arch
.interrupt_window_open
&&
2388 vcpu
->arch
.irq_summary
&& !vcpu
->arch
.interrupt
.pending
)
2389 kvm_do_inject_irq(vcpu
);
2391 if (vcpu
->arch
.interrupt_window_open
&& vcpu
->arch
.interrupt
.pending
)
2392 vmx_inject_irq(vcpu
, vcpu
->arch
.interrupt
.nr
);
2394 cpu_based_vm_exec_control
= vmcs_read32(CPU_BASED_VM_EXEC_CONTROL
);
2395 if (!vcpu
->arch
.interrupt_window_open
&&
2396 (vcpu
->arch
.irq_summary
|| kvm_run
->request_interrupt_window
))
2398 * Interrupts blocked. Wait for unblock.
2400 cpu_based_vm_exec_control
|= CPU_BASED_VIRTUAL_INTR_PENDING
;
2402 cpu_based_vm_exec_control
&= ~CPU_BASED_VIRTUAL_INTR_PENDING
;
2403 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL
, cpu_based_vm_exec_control
);
2406 static int vmx_set_tss_addr(struct kvm
*kvm
, unsigned int addr
)
2409 struct kvm_userspace_memory_region tss_mem
= {
2411 .guest_phys_addr
= addr
,
2412 .memory_size
= PAGE_SIZE
* 3,
2416 ret
= kvm_set_memory_region(kvm
, &tss_mem
, 0);
2419 kvm
->arch
.tss_addr
= addr
;
2423 static void kvm_guest_debug_pre(struct kvm_vcpu
*vcpu
)
2425 struct kvm_guest_debug
*dbg
= &vcpu
->guest_debug
;
2427 set_debugreg(dbg
->bp
[0], 0);
2428 set_debugreg(dbg
->bp
[1], 1);
2429 set_debugreg(dbg
->bp
[2], 2);
2430 set_debugreg(dbg
->bp
[3], 3);
2432 if (dbg
->singlestep
) {
2433 unsigned long flags
;
2435 flags
= vmcs_readl(GUEST_RFLAGS
);
2436 flags
|= X86_EFLAGS_TF
| X86_EFLAGS_RF
;
2437 vmcs_writel(GUEST_RFLAGS
, flags
);
2441 static int handle_rmode_exception(struct kvm_vcpu
*vcpu
,
2442 int vec
, u32 err_code
)
2445 * Instruction with address size override prefix opcode 0x67
2446 * Cause the #SS fault with 0 error code in VM86 mode.
2448 if (((vec
== GP_VECTOR
) || (vec
== SS_VECTOR
)) && err_code
== 0)
2449 if (emulate_instruction(vcpu
, NULL
, 0, 0, 0) == EMULATE_DONE
)
2452 * Forward all other exceptions that are valid in real mode.
2453 * FIXME: Breaks guest debugging in real mode, needs to be fixed with
2454 * the required debugging infrastructure rework.
2467 kvm_queue_exception(vcpu
, vec
);
2473 static int handle_exception(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2475 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2476 u32 intr_info
, error_code
;
2477 unsigned long cr2
, rip
;
2479 enum emulation_result er
;
2481 vect_info
= vmx
->idt_vectoring_info
;
2482 intr_info
= vmcs_read32(VM_EXIT_INTR_INFO
);
2484 if ((vect_info
& VECTORING_INFO_VALID_MASK
) &&
2485 !is_page_fault(intr_info
))
2486 printk(KERN_ERR
"%s: unexpected, vectoring info 0x%x "
2487 "intr info 0x%x\n", __func__
, vect_info
, intr_info
);
2489 if (!irqchip_in_kernel(vcpu
->kvm
) && is_external_interrupt(vect_info
)) {
2490 int irq
= vect_info
& VECTORING_INFO_VECTOR_MASK
;
2491 set_bit(irq
, vcpu
->arch
.irq_pending
);
2492 set_bit(irq
/ BITS_PER_LONG
, &vcpu
->arch
.irq_summary
);
2495 if ((intr_info
& INTR_INFO_INTR_TYPE_MASK
) == INTR_TYPE_NMI_INTR
)
2496 return 1; /* already handled by vmx_vcpu_run() */
2498 if (is_no_device(intr_info
)) {
2499 vmx_fpu_activate(vcpu
);
2503 if (is_invalid_opcode(intr_info
)) {
2504 er
= emulate_instruction(vcpu
, kvm_run
, 0, 0, EMULTYPE_TRAP_UD
);
2505 if (er
!= EMULATE_DONE
)
2506 kvm_queue_exception(vcpu
, UD_VECTOR
);
2511 rip
= kvm_rip_read(vcpu
);
2512 if (intr_info
& INTR_INFO_DELIVER_CODE_MASK
)
2513 error_code
= vmcs_read32(VM_EXIT_INTR_ERROR_CODE
);
2514 if (is_page_fault(intr_info
)) {
2515 /* EPT won't cause page fault directly */
2518 cr2
= vmcs_readl(EXIT_QUALIFICATION
);
2519 KVMTRACE_3D(PAGE_FAULT
, vcpu
, error_code
, (u32
)cr2
,
2520 (u32
)((u64
)cr2
>> 32), handler
);
2521 if (vcpu
->arch
.interrupt
.pending
|| vcpu
->arch
.exception
.pending
)
2522 kvm_mmu_unprotect_page_virt(vcpu
, cr2
);
2523 return kvm_mmu_page_fault(vcpu
, cr2
, error_code
);
2526 if (vcpu
->arch
.rmode
.active
&&
2527 handle_rmode_exception(vcpu
, intr_info
& INTR_INFO_VECTOR_MASK
,
2529 if (vcpu
->arch
.halt_request
) {
2530 vcpu
->arch
.halt_request
= 0;
2531 return kvm_emulate_halt(vcpu
);
2536 if ((intr_info
& (INTR_INFO_INTR_TYPE_MASK
| INTR_INFO_VECTOR_MASK
)) ==
2537 (INTR_TYPE_EXCEPTION
| 1)) {
2538 kvm_run
->exit_reason
= KVM_EXIT_DEBUG
;
2541 kvm_run
->exit_reason
= KVM_EXIT_EXCEPTION
;
2542 kvm_run
->ex
.exception
= intr_info
& INTR_INFO_VECTOR_MASK
;
2543 kvm_run
->ex
.error_code
= error_code
;
2547 static int handle_external_interrupt(struct kvm_vcpu
*vcpu
,
2548 struct kvm_run
*kvm_run
)
2550 ++vcpu
->stat
.irq_exits
;
2551 KVMTRACE_1D(INTR
, vcpu
, vmcs_read32(VM_EXIT_INTR_INFO
), handler
);
2555 static int handle_triple_fault(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2557 kvm_run
->exit_reason
= KVM_EXIT_SHUTDOWN
;
2561 static int handle_io(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2563 unsigned long exit_qualification
;
2564 int size
, down
, in
, string
, rep
;
2567 ++vcpu
->stat
.io_exits
;
2568 exit_qualification
= vmcs_readl(EXIT_QUALIFICATION
);
2569 string
= (exit_qualification
& 16) != 0;
2572 if (emulate_instruction(vcpu
,
2573 kvm_run
, 0, 0, 0) == EMULATE_DO_MMIO
)
2578 size
= (exit_qualification
& 7) + 1;
2579 in
= (exit_qualification
& 8) != 0;
2580 down
= (vmcs_readl(GUEST_RFLAGS
) & X86_EFLAGS_DF
) != 0;
2581 rep
= (exit_qualification
& 32) != 0;
2582 port
= exit_qualification
>> 16;
2584 return kvm_emulate_pio(vcpu
, kvm_run
, in
, size
, port
);
2588 vmx_patch_hypercall(struct kvm_vcpu
*vcpu
, unsigned char *hypercall
)
2591 * Patch in the VMCALL instruction:
2593 hypercall
[0] = 0x0f;
2594 hypercall
[1] = 0x01;
2595 hypercall
[2] = 0xc1;
2598 static int handle_cr(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2600 unsigned long exit_qualification
;
2604 exit_qualification
= vmcs_readl(EXIT_QUALIFICATION
);
2605 cr
= exit_qualification
& 15;
2606 reg
= (exit_qualification
>> 8) & 15;
2607 switch ((exit_qualification
>> 4) & 3) {
2608 case 0: /* mov to cr */
2609 KVMTRACE_3D(CR_WRITE
, vcpu
, (u32
)cr
,
2610 (u32
)kvm_register_read(vcpu
, reg
),
2611 (u32
)((u64
)kvm_register_read(vcpu
, reg
) >> 32),
2615 kvm_set_cr0(vcpu
, kvm_register_read(vcpu
, reg
));
2616 skip_emulated_instruction(vcpu
);
2619 kvm_set_cr3(vcpu
, kvm_register_read(vcpu
, reg
));
2620 skip_emulated_instruction(vcpu
);
2623 kvm_set_cr4(vcpu
, kvm_register_read(vcpu
, reg
));
2624 skip_emulated_instruction(vcpu
);
2627 kvm_set_cr8(vcpu
, kvm_register_read(vcpu
, reg
));
2628 skip_emulated_instruction(vcpu
);
2629 if (irqchip_in_kernel(vcpu
->kvm
))
2631 kvm_run
->exit_reason
= KVM_EXIT_SET_TPR
;
2636 vmx_fpu_deactivate(vcpu
);
2637 vcpu
->arch
.cr0
&= ~X86_CR0_TS
;
2638 vmcs_writel(CR0_READ_SHADOW
, vcpu
->arch
.cr0
);
2639 vmx_fpu_activate(vcpu
);
2640 KVMTRACE_0D(CLTS
, vcpu
, handler
);
2641 skip_emulated_instruction(vcpu
);
2643 case 1: /*mov from cr*/
2646 kvm_register_write(vcpu
, reg
, vcpu
->arch
.cr3
);
2647 KVMTRACE_3D(CR_READ
, vcpu
, (u32
)cr
,
2648 (u32
)kvm_register_read(vcpu
, reg
),
2649 (u32
)((u64
)kvm_register_read(vcpu
, reg
) >> 32),
2651 skip_emulated_instruction(vcpu
);
2654 kvm_register_write(vcpu
, reg
, kvm_get_cr8(vcpu
));
2655 KVMTRACE_2D(CR_READ
, vcpu
, (u32
)cr
,
2656 (u32
)kvm_register_read(vcpu
, reg
), handler
);
2657 skip_emulated_instruction(vcpu
);
2662 kvm_lmsw(vcpu
, (exit_qualification
>> LMSW_SOURCE_DATA_SHIFT
) & 0x0f);
2664 skip_emulated_instruction(vcpu
);
2669 kvm_run
->exit_reason
= 0;
2670 pr_unimpl(vcpu
, "unhandled control register: op %d cr %d\n",
2671 (int)(exit_qualification
>> 4) & 3, cr
);
2675 static int handle_dr(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2677 unsigned long exit_qualification
;
2682 * FIXME: this code assumes the host is debugging the guest.
2683 * need to deal with guest debugging itself too.
2685 exit_qualification
= vmcs_readl(EXIT_QUALIFICATION
);
2686 dr
= exit_qualification
& 7;
2687 reg
= (exit_qualification
>> 8) & 15;
2688 if (exit_qualification
& 16) {
2700 kvm_register_write(vcpu
, reg
, val
);
2701 KVMTRACE_2D(DR_READ
, vcpu
, (u32
)dr
, (u32
)val
, handler
);
2705 skip_emulated_instruction(vcpu
);
2709 static int handle_cpuid(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2711 kvm_emulate_cpuid(vcpu
);
2715 static int handle_rdmsr(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2717 u32 ecx
= vcpu
->arch
.regs
[VCPU_REGS_RCX
];
2720 if (vmx_get_msr(vcpu
, ecx
, &data
)) {
2721 kvm_inject_gp(vcpu
, 0);
2725 KVMTRACE_3D(MSR_READ
, vcpu
, ecx
, (u32
)data
, (u32
)(data
>> 32),
2728 /* FIXME: handling of bits 32:63 of rax, rdx */
2729 vcpu
->arch
.regs
[VCPU_REGS_RAX
] = data
& -1u;
2730 vcpu
->arch
.regs
[VCPU_REGS_RDX
] = (data
>> 32) & -1u;
2731 skip_emulated_instruction(vcpu
);
2735 static int handle_wrmsr(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2737 u32 ecx
= vcpu
->arch
.regs
[VCPU_REGS_RCX
];
2738 u64 data
= (vcpu
->arch
.regs
[VCPU_REGS_RAX
] & -1u)
2739 | ((u64
)(vcpu
->arch
.regs
[VCPU_REGS_RDX
] & -1u) << 32);
2741 KVMTRACE_3D(MSR_WRITE
, vcpu
, ecx
, (u32
)data
, (u32
)(data
>> 32),
2744 if (vmx_set_msr(vcpu
, ecx
, data
) != 0) {
2745 kvm_inject_gp(vcpu
, 0);
2749 skip_emulated_instruction(vcpu
);
2753 static int handle_tpr_below_threshold(struct kvm_vcpu
*vcpu
,
2754 struct kvm_run
*kvm_run
)
2759 static int handle_interrupt_window(struct kvm_vcpu
*vcpu
,
2760 struct kvm_run
*kvm_run
)
2762 u32 cpu_based_vm_exec_control
;
2764 /* clear pending irq */
2765 cpu_based_vm_exec_control
= vmcs_read32(CPU_BASED_VM_EXEC_CONTROL
);
2766 cpu_based_vm_exec_control
&= ~CPU_BASED_VIRTUAL_INTR_PENDING
;
2767 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL
, cpu_based_vm_exec_control
);
2769 KVMTRACE_0D(PEND_INTR
, vcpu
, handler
);
2770 ++vcpu
->stat
.irq_window_exits
;
2773 * If the user space waits to inject interrupts, exit as soon as
2776 if (kvm_run
->request_interrupt_window
&&
2777 !vcpu
->arch
.irq_summary
) {
2778 kvm_run
->exit_reason
= KVM_EXIT_IRQ_WINDOW_OPEN
;
2784 static int handle_halt(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2786 skip_emulated_instruction(vcpu
);
2787 return kvm_emulate_halt(vcpu
);
2790 static int handle_vmcall(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2792 skip_emulated_instruction(vcpu
);
2793 kvm_emulate_hypercall(vcpu
);
2797 static int handle_invlpg(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2799 u64 exit_qualification
= vmcs_read64(EXIT_QUALIFICATION
);
2801 kvm_mmu_invlpg(vcpu
, exit_qualification
);
2802 skip_emulated_instruction(vcpu
);
2806 static int handle_wbinvd(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2808 skip_emulated_instruction(vcpu
);
2809 /* TODO: Add support for VT-d/pass-through device */
2813 static int handle_apic_access(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2815 u64 exit_qualification
;
2816 enum emulation_result er
;
2817 unsigned long offset
;
2819 exit_qualification
= vmcs_read64(EXIT_QUALIFICATION
);
2820 offset
= exit_qualification
& 0xffful
;
2822 er
= emulate_instruction(vcpu
, kvm_run
, 0, 0, 0);
2824 if (er
!= EMULATE_DONE
) {
2826 "Fail to handle apic access vmexit! Offset is 0x%lx\n",
2833 static int handle_task_switch(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2835 unsigned long exit_qualification
;
2839 exit_qualification
= vmcs_readl(EXIT_QUALIFICATION
);
2841 reason
= (u32
)exit_qualification
>> 30;
2842 tss_selector
= exit_qualification
;
2844 return kvm_task_switch(vcpu
, tss_selector
, reason
);
2847 static int handle_ept_violation(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2849 u64 exit_qualification
;
2850 enum emulation_result er
;
2856 exit_qualification
= vmcs_read64(EXIT_QUALIFICATION
);
2858 if (exit_qualification
& (1 << 6)) {
2859 printk(KERN_ERR
"EPT: GPA exceeds GAW!\n");
2863 gla_validity
= (exit_qualification
>> 7) & 0x3;
2864 if (gla_validity
!= 0x3 && gla_validity
!= 0x1 && gla_validity
!= 0) {
2865 printk(KERN_ERR
"EPT: Handling EPT violation failed!\n");
2866 printk(KERN_ERR
"EPT: GPA: 0x%lx, GVA: 0x%lx\n",
2867 (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS
),
2868 (long unsigned int)vmcs_read64(GUEST_LINEAR_ADDRESS
));
2869 printk(KERN_ERR
"EPT: Exit qualification is 0x%lx\n",
2870 (long unsigned int)exit_qualification
);
2871 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
2872 kvm_run
->hw
.hardware_exit_reason
= 0;
2876 gpa
= vmcs_read64(GUEST_PHYSICAL_ADDRESS
);
2877 hva
= gfn_to_hva(vcpu
->kvm
, gpa
>> PAGE_SHIFT
);
2878 if (!kvm_is_error_hva(hva
)) {
2879 r
= kvm_mmu_page_fault(vcpu
, gpa
& PAGE_MASK
, 0);
2881 printk(KERN_ERR
"EPT: Not enough memory!\n");
2887 er
= emulate_instruction(vcpu
, kvm_run
, 0, 0, 0);
2889 if (er
== EMULATE_FAIL
) {
2891 "EPT: Fail to handle EPT violation vmexit!er is %d\n",
2893 printk(KERN_ERR
"EPT: GPA: 0x%lx, GVA: 0x%lx\n",
2894 (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS
),
2895 (long unsigned int)vmcs_read64(GUEST_LINEAR_ADDRESS
));
2896 printk(KERN_ERR
"EPT: Exit qualification is 0x%lx\n",
2897 (long unsigned int)exit_qualification
);
2899 } else if (er
== EMULATE_DO_MMIO
)
2905 static int handle_nmi_window(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2907 u32 cpu_based_vm_exec_control
;
2909 /* clear pending NMI */
2910 cpu_based_vm_exec_control
= vmcs_read32(CPU_BASED_VM_EXEC_CONTROL
);
2911 cpu_based_vm_exec_control
&= ~CPU_BASED_VIRTUAL_NMI_PENDING
;
2912 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL
, cpu_based_vm_exec_control
);
2913 ++vcpu
->stat
.nmi_window_exits
;
2918 static void handle_invalid_guest_state(struct kvm_vcpu
*vcpu
,
2919 struct kvm_run
*kvm_run
)
2921 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2927 while (!guest_state_valid(vcpu
)) {
2928 err
= emulate_instruction(vcpu
, kvm_run
, 0, 0, 0);
2933 case EMULATE_DO_MMIO
:
2934 kvm_report_emulation_failure(vcpu
, "mmio");
2935 /* TODO: Handle MMIO */
2938 kvm_report_emulation_failure(vcpu
, "emulation failure");
2942 if (signal_pending(current
))
2948 local_irq_disable();
2951 /* Guest state should be valid now, no more emulation should be needed */
2952 vmx
->emulation_required
= 0;
2956 * The exit handlers return 1 if the exit was handled fully and guest execution
2957 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
2958 * to be done to userspace and return 0.
2960 static int (*kvm_vmx_exit_handlers
[])(struct kvm_vcpu
*vcpu
,
2961 struct kvm_run
*kvm_run
) = {
2962 [EXIT_REASON_EXCEPTION_NMI
] = handle_exception
,
2963 [EXIT_REASON_EXTERNAL_INTERRUPT
] = handle_external_interrupt
,
2964 [EXIT_REASON_TRIPLE_FAULT
] = handle_triple_fault
,
2965 [EXIT_REASON_NMI_WINDOW
] = handle_nmi_window
,
2966 [EXIT_REASON_IO_INSTRUCTION
] = handle_io
,
2967 [EXIT_REASON_CR_ACCESS
] = handle_cr
,
2968 [EXIT_REASON_DR_ACCESS
] = handle_dr
,
2969 [EXIT_REASON_CPUID
] = handle_cpuid
,
2970 [EXIT_REASON_MSR_READ
] = handle_rdmsr
,
2971 [EXIT_REASON_MSR_WRITE
] = handle_wrmsr
,
2972 [EXIT_REASON_PENDING_INTERRUPT
] = handle_interrupt_window
,
2973 [EXIT_REASON_HLT
] = handle_halt
,
2974 [EXIT_REASON_INVLPG
] = handle_invlpg
,
2975 [EXIT_REASON_VMCALL
] = handle_vmcall
,
2976 [EXIT_REASON_TPR_BELOW_THRESHOLD
] = handle_tpr_below_threshold
,
2977 [EXIT_REASON_APIC_ACCESS
] = handle_apic_access
,
2978 [EXIT_REASON_WBINVD
] = handle_wbinvd
,
2979 [EXIT_REASON_TASK_SWITCH
] = handle_task_switch
,
2980 [EXIT_REASON_EPT_VIOLATION
] = handle_ept_violation
,
2983 static const int kvm_vmx_max_exit_handlers
=
2984 ARRAY_SIZE(kvm_vmx_exit_handlers
);
2987 * The guest has exited. See if we can fix it or if we need userspace
2990 static int kvm_handle_exit(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
)
2992 u32 exit_reason
= vmcs_read32(VM_EXIT_REASON
);
2993 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2994 u32 vectoring_info
= vmx
->idt_vectoring_info
;
2996 KVMTRACE_3D(VMEXIT
, vcpu
, exit_reason
, (u32
)kvm_rip_read(vcpu
),
2997 (u32
)((u64
)kvm_rip_read(vcpu
) >> 32), entryexit
);
2999 /* Access CR3 don't cause VMExit in paging mode, so we need
3000 * to sync with guest real CR3. */
3001 if (vm_need_ept() && is_paging(vcpu
)) {
3002 vcpu
->arch
.cr3
= vmcs_readl(GUEST_CR3
);
3003 ept_load_pdptrs(vcpu
);
3006 if (unlikely(vmx
->fail
)) {
3007 kvm_run
->exit_reason
= KVM_EXIT_FAIL_ENTRY
;
3008 kvm_run
->fail_entry
.hardware_entry_failure_reason
3009 = vmcs_read32(VM_INSTRUCTION_ERROR
);
3013 if ((vectoring_info
& VECTORING_INFO_VALID_MASK
) &&
3014 (exit_reason
!= EXIT_REASON_EXCEPTION_NMI
&&
3015 exit_reason
!= EXIT_REASON_EPT_VIOLATION
))
3016 printk(KERN_WARNING
"%s: unexpected, valid vectoring info and "
3017 "exit reason is 0x%x\n", __func__
, exit_reason
);
3018 if (exit_reason
< kvm_vmx_max_exit_handlers
3019 && kvm_vmx_exit_handlers
[exit_reason
])
3020 return kvm_vmx_exit_handlers
[exit_reason
](vcpu
, kvm_run
);
3022 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
3023 kvm_run
->hw
.hardware_exit_reason
= exit_reason
;
3028 static void update_tpr_threshold(struct kvm_vcpu
*vcpu
)
3032 if (!vm_need_tpr_shadow(vcpu
->kvm
))
3035 if (!kvm_lapic_enabled(vcpu
) ||
3036 ((max_irr
= kvm_lapic_find_highest_irr(vcpu
)) == -1)) {
3037 vmcs_write32(TPR_THRESHOLD
, 0);
3041 tpr
= (kvm_lapic_get_cr8(vcpu
) & 0x0f) << 4;
3042 vmcs_write32(TPR_THRESHOLD
, (max_irr
> tpr
) ? tpr
>> 4 : max_irr
>> 4);
3045 static void enable_irq_window(struct kvm_vcpu
*vcpu
)
3047 u32 cpu_based_vm_exec_control
;
3049 cpu_based_vm_exec_control
= vmcs_read32(CPU_BASED_VM_EXEC_CONTROL
);
3050 cpu_based_vm_exec_control
|= CPU_BASED_VIRTUAL_INTR_PENDING
;
3051 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL
, cpu_based_vm_exec_control
);
3054 static void enable_nmi_window(struct kvm_vcpu
*vcpu
)
3056 u32 cpu_based_vm_exec_control
;
3058 if (!cpu_has_virtual_nmis())
3061 cpu_based_vm_exec_control
= vmcs_read32(CPU_BASED_VM_EXEC_CONTROL
);
3062 cpu_based_vm_exec_control
|= CPU_BASED_VIRTUAL_NMI_PENDING
;
3063 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL
, cpu_based_vm_exec_control
);
3066 static int vmx_nmi_enabled(struct kvm_vcpu
*vcpu
)
3068 u32 guest_intr
= vmcs_read32(GUEST_INTERRUPTIBILITY_INFO
);
3069 return !(guest_intr
& (GUEST_INTR_STATE_NMI
|
3070 GUEST_INTR_STATE_MOV_SS
|
3071 GUEST_INTR_STATE_STI
));
3074 static int vmx_irq_enabled(struct kvm_vcpu
*vcpu
)
3076 u32 guest_intr
= vmcs_read32(GUEST_INTERRUPTIBILITY_INFO
);
3077 return (!(guest_intr
& (GUEST_INTR_STATE_MOV_SS
|
3078 GUEST_INTR_STATE_STI
)) &&
3079 (vmcs_readl(GUEST_RFLAGS
) & X86_EFLAGS_IF
));
3082 static void enable_intr_window(struct kvm_vcpu
*vcpu
)
3084 if (vcpu
->arch
.nmi_pending
)
3085 enable_nmi_window(vcpu
);
3086 else if (kvm_cpu_has_interrupt(vcpu
))
3087 enable_irq_window(vcpu
);
3090 static void vmx_complete_interrupts(struct vcpu_vmx
*vmx
)
3093 u32 idt_vectoring_info
;
3097 bool idtv_info_valid
;
3100 exit_intr_info
= vmcs_read32(VM_EXIT_INTR_INFO
);
3101 if (cpu_has_virtual_nmis()) {
3102 unblock_nmi
= (exit_intr_info
& INTR_INFO_UNBLOCK_NMI
) != 0;
3103 vector
= exit_intr_info
& INTR_INFO_VECTOR_MASK
;
3106 * Re-set bit "block by NMI" before VM entry if vmexit caused by
3107 * a guest IRET fault.
3109 if (unblock_nmi
&& vector
!= DF_VECTOR
)
3110 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO
,
3111 GUEST_INTR_STATE_NMI
);
3114 idt_vectoring_info
= vmx
->idt_vectoring_info
;
3115 idtv_info_valid
= idt_vectoring_info
& VECTORING_INFO_VALID_MASK
;
3116 vector
= idt_vectoring_info
& VECTORING_INFO_VECTOR_MASK
;
3117 type
= idt_vectoring_info
& VECTORING_INFO_TYPE_MASK
;
3118 if (vmx
->vcpu
.arch
.nmi_injected
) {
3121 * Clear bit "block by NMI" before VM entry if a NMI delivery
3124 if (idtv_info_valid
&& type
== INTR_TYPE_NMI_INTR
)
3125 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO
,
3126 GUEST_INTR_STATE_NMI
);
3128 vmx
->vcpu
.arch
.nmi_injected
= false;
3130 kvm_clear_exception_queue(&vmx
->vcpu
);
3131 if (idtv_info_valid
&& type
== INTR_TYPE_EXCEPTION
) {
3132 if (idt_vectoring_info
& VECTORING_INFO_DELIVER_CODE_MASK
) {
3133 error
= vmcs_read32(IDT_VECTORING_ERROR_CODE
);
3134 kvm_queue_exception_e(&vmx
->vcpu
, vector
, error
);
3136 kvm_queue_exception(&vmx
->vcpu
, vector
);
3137 vmx
->idt_vectoring_info
= 0;
3139 kvm_clear_interrupt_queue(&vmx
->vcpu
);
3140 if (idtv_info_valid
&& type
== INTR_TYPE_EXT_INTR
) {
3141 kvm_queue_interrupt(&vmx
->vcpu
, vector
);
3142 vmx
->idt_vectoring_info
= 0;
3146 static void vmx_intr_assist(struct kvm_vcpu
*vcpu
)
3148 update_tpr_threshold(vcpu
);
3150 if (cpu_has_virtual_nmis()) {
3151 if (vcpu
->arch
.nmi_pending
&& !vcpu
->arch
.nmi_injected
) {
3152 if (vcpu
->arch
.interrupt
.pending
) {
3153 enable_nmi_window(vcpu
);
3154 } else if (vmx_nmi_enabled(vcpu
)) {
3155 vcpu
->arch
.nmi_pending
= false;
3156 vcpu
->arch
.nmi_injected
= true;
3158 enable_intr_window(vcpu
);
3162 if (vcpu
->arch
.nmi_injected
) {
3163 vmx_inject_nmi(vcpu
);
3164 enable_intr_window(vcpu
);
3168 if (!vcpu
->arch
.interrupt
.pending
&& kvm_cpu_has_interrupt(vcpu
)) {
3169 if (vmx_irq_enabled(vcpu
))
3170 kvm_queue_interrupt(vcpu
, kvm_cpu_get_interrupt(vcpu
));
3172 enable_irq_window(vcpu
);
3174 if (vcpu
->arch
.interrupt
.pending
) {
3175 vmx_inject_irq(vcpu
, vcpu
->arch
.interrupt
.nr
);
3176 kvm_timer_intr_post(vcpu
, vcpu
->arch
.interrupt
.nr
);
3181 * Failure to inject an interrupt should give us the information
3182 * in IDT_VECTORING_INFO_FIELD. However, if the failure occurs
3183 * when fetching the interrupt redirection bitmap in the real-mode
3184 * tss, this doesn't happen. So we do it ourselves.
3186 static void fixup_rmode_irq(struct vcpu_vmx
*vmx
)
3188 vmx
->rmode
.irq
.pending
= 0;
3189 if (kvm_rip_read(&vmx
->vcpu
) + 1 != vmx
->rmode
.irq
.rip
)
3191 kvm_rip_write(&vmx
->vcpu
, vmx
->rmode
.irq
.rip
);
3192 if (vmx
->idt_vectoring_info
& VECTORING_INFO_VALID_MASK
) {
3193 vmx
->idt_vectoring_info
&= ~VECTORING_INFO_TYPE_MASK
;
3194 vmx
->idt_vectoring_info
|= INTR_TYPE_EXT_INTR
;
3197 vmx
->idt_vectoring_info
=
3198 VECTORING_INFO_VALID_MASK
3199 | INTR_TYPE_EXT_INTR
3200 | vmx
->rmode
.irq
.vector
;
3203 #ifdef CONFIG_X86_64
3211 static void vmx_vcpu_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
3213 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3216 /* Handle invalid guest state instead of entering VMX */
3217 if (vmx
->emulation_required
&& emulate_invalid_guest_state
) {
3218 handle_invalid_guest_state(vcpu
, kvm_run
);
3222 if (test_bit(VCPU_REGS_RSP
, (unsigned long *)&vcpu
->arch
.regs_dirty
))
3223 vmcs_writel(GUEST_RSP
, vcpu
->arch
.regs
[VCPU_REGS_RSP
]);
3224 if (test_bit(VCPU_REGS_RIP
, (unsigned long *)&vcpu
->arch
.regs_dirty
))
3225 vmcs_writel(GUEST_RIP
, vcpu
->arch
.regs
[VCPU_REGS_RIP
]);
3228 * Loading guest fpu may have cleared host cr0.ts
3230 vmcs_writel(HOST_CR0
, read_cr0());
3233 /* Store host registers */
3234 "push %%"R
"dx; push %%"R
"bp;"
3236 "cmp %%"R
"sp, %c[host_rsp](%0) \n\t"
3238 "mov %%"R
"sp, %c[host_rsp](%0) \n\t"
3239 __ex(ASM_VMX_VMWRITE_RSP_RDX
) "\n\t"
3241 /* Check if vmlaunch of vmresume is needed */
3242 "cmpl $0, %c[launched](%0) \n\t"
3243 /* Load guest registers. Don't clobber flags. */
3244 "mov %c[cr2](%0), %%"R
"ax \n\t"
3245 "mov %%"R
"ax, %%cr2 \n\t"
3246 "mov %c[rax](%0), %%"R
"ax \n\t"
3247 "mov %c[rbx](%0), %%"R
"bx \n\t"
3248 "mov %c[rdx](%0), %%"R
"dx \n\t"
3249 "mov %c[rsi](%0), %%"R
"si \n\t"
3250 "mov %c[rdi](%0), %%"R
"di \n\t"
3251 "mov %c[rbp](%0), %%"R
"bp \n\t"
3252 #ifdef CONFIG_X86_64
3253 "mov %c[r8](%0), %%r8 \n\t"
3254 "mov %c[r9](%0), %%r9 \n\t"
3255 "mov %c[r10](%0), %%r10 \n\t"
3256 "mov %c[r11](%0), %%r11 \n\t"
3257 "mov %c[r12](%0), %%r12 \n\t"
3258 "mov %c[r13](%0), %%r13 \n\t"
3259 "mov %c[r14](%0), %%r14 \n\t"
3260 "mov %c[r15](%0), %%r15 \n\t"
3262 "mov %c[rcx](%0), %%"R
"cx \n\t" /* kills %0 (ecx) */
3264 /* Enter guest mode */
3265 "jne .Llaunched \n\t"
3266 __ex(ASM_VMX_VMLAUNCH
) "\n\t"
3267 "jmp .Lkvm_vmx_return \n\t"
3268 ".Llaunched: " __ex(ASM_VMX_VMRESUME
) "\n\t"
3269 ".Lkvm_vmx_return: "
3270 /* Save guest registers, load host registers, keep flags */
3271 "xchg %0, (%%"R
"sp) \n\t"
3272 "mov %%"R
"ax, %c[rax](%0) \n\t"
3273 "mov %%"R
"bx, %c[rbx](%0) \n\t"
3274 "push"Q
" (%%"R
"sp); pop"Q
" %c[rcx](%0) \n\t"
3275 "mov %%"R
"dx, %c[rdx](%0) \n\t"
3276 "mov %%"R
"si, %c[rsi](%0) \n\t"
3277 "mov %%"R
"di, %c[rdi](%0) \n\t"
3278 "mov %%"R
"bp, %c[rbp](%0) \n\t"
3279 #ifdef CONFIG_X86_64
3280 "mov %%r8, %c[r8](%0) \n\t"
3281 "mov %%r9, %c[r9](%0) \n\t"
3282 "mov %%r10, %c[r10](%0) \n\t"
3283 "mov %%r11, %c[r11](%0) \n\t"
3284 "mov %%r12, %c[r12](%0) \n\t"
3285 "mov %%r13, %c[r13](%0) \n\t"
3286 "mov %%r14, %c[r14](%0) \n\t"
3287 "mov %%r15, %c[r15](%0) \n\t"
3289 "mov %%cr2, %%"R
"ax \n\t"
3290 "mov %%"R
"ax, %c[cr2](%0) \n\t"
3292 "pop %%"R
"bp; pop %%"R
"bp; pop %%"R
"dx \n\t"
3293 "setbe %c[fail](%0) \n\t"
3294 : : "c"(vmx
), "d"((unsigned long)HOST_RSP
),
3295 [launched
]"i"(offsetof(struct vcpu_vmx
, launched
)),
3296 [fail
]"i"(offsetof(struct vcpu_vmx
, fail
)),
3297 [host_rsp
]"i"(offsetof(struct vcpu_vmx
, host_rsp
)),
3298 [rax
]"i"(offsetof(struct vcpu_vmx
, vcpu
.arch
.regs
[VCPU_REGS_RAX
])),
3299 [rbx
]"i"(offsetof(struct vcpu_vmx
, vcpu
.arch
.regs
[VCPU_REGS_RBX
])),
3300 [rcx
]"i"(offsetof(struct vcpu_vmx
, vcpu
.arch
.regs
[VCPU_REGS_RCX
])),
3301 [rdx
]"i"(offsetof(struct vcpu_vmx
, vcpu
.arch
.regs
[VCPU_REGS_RDX
])),
3302 [rsi
]"i"(offsetof(struct vcpu_vmx
, vcpu
.arch
.regs
[VCPU_REGS_RSI
])),
3303 [rdi
]"i"(offsetof(struct vcpu_vmx
, vcpu
.arch
.regs
[VCPU_REGS_RDI
])),
3304 [rbp
]"i"(offsetof(struct vcpu_vmx
, vcpu
.arch
.regs
[VCPU_REGS_RBP
])),
3305 #ifdef CONFIG_X86_64
3306 [r8
]"i"(offsetof(struct vcpu_vmx
, vcpu
.arch
.regs
[VCPU_REGS_R8
])),
3307 [r9
]"i"(offsetof(struct vcpu_vmx
, vcpu
.arch
.regs
[VCPU_REGS_R9
])),
3308 [r10
]"i"(offsetof(struct vcpu_vmx
, vcpu
.arch
.regs
[VCPU_REGS_R10
])),
3309 [r11
]"i"(offsetof(struct vcpu_vmx
, vcpu
.arch
.regs
[VCPU_REGS_R11
])),
3310 [r12
]"i"(offsetof(struct vcpu_vmx
, vcpu
.arch
.regs
[VCPU_REGS_R12
])),
3311 [r13
]"i"(offsetof(struct vcpu_vmx
, vcpu
.arch
.regs
[VCPU_REGS_R13
])),
3312 [r14
]"i"(offsetof(struct vcpu_vmx
, vcpu
.arch
.regs
[VCPU_REGS_R14
])),
3313 [r15
]"i"(offsetof(struct vcpu_vmx
, vcpu
.arch
.regs
[VCPU_REGS_R15
])),
3315 [cr2
]"i"(offsetof(struct vcpu_vmx
, vcpu
.arch
.cr2
))
3317 , R
"bx", R
"di", R
"si"
3318 #ifdef CONFIG_X86_64
3319 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
3323 vcpu
->arch
.regs_avail
= ~((1 << VCPU_REGS_RIP
) | (1 << VCPU_REGS_RSP
));
3324 vcpu
->arch
.regs_dirty
= 0;
3326 vmx
->idt_vectoring_info
= vmcs_read32(IDT_VECTORING_INFO_FIELD
);
3327 if (vmx
->rmode
.irq
.pending
)
3328 fixup_rmode_irq(vmx
);
3330 vcpu
->arch
.interrupt_window_open
=
3331 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO
) &
3332 (GUEST_INTR_STATE_STI
| GUEST_INTR_STATE_MOV_SS
)) == 0;
3334 asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS
));
3337 intr_info
= vmcs_read32(VM_EXIT_INTR_INFO
);
3339 /* We need to handle NMIs before interrupts are enabled */
3340 if ((intr_info
& INTR_INFO_INTR_TYPE_MASK
) == INTR_TYPE_NMI_INTR
&&
3341 (intr_info
& INTR_INFO_VALID_MASK
)) {
3342 KVMTRACE_0D(NMI
, vcpu
, handler
);
3346 vmx_complete_interrupts(vmx
);
3352 static void vmx_free_vmcs(struct kvm_vcpu
*vcpu
)
3354 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3358 free_vmcs(vmx
->vmcs
);
3363 static void vmx_free_vcpu(struct kvm_vcpu
*vcpu
)
3365 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3367 spin_lock(&vmx_vpid_lock
);
3369 __clear_bit(vmx
->vpid
, vmx_vpid_bitmap
);
3370 spin_unlock(&vmx_vpid_lock
);
3371 vmx_free_vmcs(vcpu
);
3372 kfree(vmx
->host_msrs
);
3373 kfree(vmx
->guest_msrs
);
3374 kvm_vcpu_uninit(vcpu
);
3375 kmem_cache_free(kvm_vcpu_cache
, vmx
);
3378 static struct kvm_vcpu
*vmx_create_vcpu(struct kvm
*kvm
, unsigned int id
)
3381 struct vcpu_vmx
*vmx
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
3385 return ERR_PTR(-ENOMEM
);
3389 err
= kvm_vcpu_init(&vmx
->vcpu
, kvm
, id
);
3393 vmx
->guest_msrs
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
3394 if (!vmx
->guest_msrs
) {
3399 vmx
->host_msrs
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
3400 if (!vmx
->host_msrs
)
3401 goto free_guest_msrs
;
3403 vmx
->vmcs
= alloc_vmcs();
3407 vmcs_clear(vmx
->vmcs
);
3410 vmx_vcpu_load(&vmx
->vcpu
, cpu
);
3411 err
= vmx_vcpu_setup(vmx
);
3412 vmx_vcpu_put(&vmx
->vcpu
);
3416 if (vm_need_virtualize_apic_accesses(kvm
))
3417 if (alloc_apic_access_page(kvm
) != 0)
3421 if (alloc_identity_pagetable(kvm
) != 0)
3427 free_vmcs(vmx
->vmcs
);
3429 kfree(vmx
->host_msrs
);
3431 kfree(vmx
->guest_msrs
);
3433 kvm_vcpu_uninit(&vmx
->vcpu
);
3435 kmem_cache_free(kvm_vcpu_cache
, vmx
);
3436 return ERR_PTR(err
);
3439 static void __init
vmx_check_processor_compat(void *rtn
)
3441 struct vmcs_config vmcs_conf
;
3444 if (setup_vmcs_config(&vmcs_conf
) < 0)
3446 if (memcmp(&vmcs_config
, &vmcs_conf
, sizeof(struct vmcs_config
)) != 0) {
3447 printk(KERN_ERR
"kvm: CPU %d feature inconsistency!\n",
3448 smp_processor_id());
3453 static int get_ept_level(void)
3455 return VMX_EPT_DEFAULT_GAW
+ 1;
3458 static struct kvm_x86_ops vmx_x86_ops
= {
3459 .cpu_has_kvm_support
= cpu_has_kvm_support
,
3460 .disabled_by_bios
= vmx_disabled_by_bios
,
3461 .hardware_setup
= hardware_setup
,
3462 .hardware_unsetup
= hardware_unsetup
,
3463 .check_processor_compatibility
= vmx_check_processor_compat
,
3464 .hardware_enable
= hardware_enable
,
3465 .hardware_disable
= hardware_disable
,
3466 .cpu_has_accelerated_tpr
= cpu_has_vmx_virtualize_apic_accesses
,
3468 .vcpu_create
= vmx_create_vcpu
,
3469 .vcpu_free
= vmx_free_vcpu
,
3470 .vcpu_reset
= vmx_vcpu_reset
,
3472 .prepare_guest_switch
= vmx_save_host_state
,
3473 .vcpu_load
= vmx_vcpu_load
,
3474 .vcpu_put
= vmx_vcpu_put
,
3476 .set_guest_debug
= set_guest_debug
,
3477 .guest_debug_pre
= kvm_guest_debug_pre
,
3478 .get_msr
= vmx_get_msr
,
3479 .set_msr
= vmx_set_msr
,
3480 .get_segment_base
= vmx_get_segment_base
,
3481 .get_segment
= vmx_get_segment
,
3482 .set_segment
= vmx_set_segment
,
3483 .get_cpl
= vmx_get_cpl
,
3484 .get_cs_db_l_bits
= vmx_get_cs_db_l_bits
,
3485 .decache_cr4_guest_bits
= vmx_decache_cr4_guest_bits
,
3486 .set_cr0
= vmx_set_cr0
,
3487 .set_cr3
= vmx_set_cr3
,
3488 .set_cr4
= vmx_set_cr4
,
3489 .set_efer
= vmx_set_efer
,
3490 .get_idt
= vmx_get_idt
,
3491 .set_idt
= vmx_set_idt
,
3492 .get_gdt
= vmx_get_gdt
,
3493 .set_gdt
= vmx_set_gdt
,
3494 .cache_reg
= vmx_cache_reg
,
3495 .get_rflags
= vmx_get_rflags
,
3496 .set_rflags
= vmx_set_rflags
,
3498 .tlb_flush
= vmx_flush_tlb
,
3500 .run
= vmx_vcpu_run
,
3501 .handle_exit
= kvm_handle_exit
,
3502 .skip_emulated_instruction
= skip_emulated_instruction
,
3503 .patch_hypercall
= vmx_patch_hypercall
,
3504 .get_irq
= vmx_get_irq
,
3505 .set_irq
= vmx_inject_irq
,
3506 .queue_exception
= vmx_queue_exception
,
3507 .exception_injected
= vmx_exception_injected
,
3508 .inject_pending_irq
= vmx_intr_assist
,
3509 .inject_pending_vectors
= do_interrupt_requests
,
3511 .set_tss_addr
= vmx_set_tss_addr
,
3512 .get_tdp_level
= get_ept_level
,
3515 static int __init
vmx_init(void)
3520 vmx_io_bitmap_a
= alloc_page(GFP_KERNEL
| __GFP_HIGHMEM
);
3521 if (!vmx_io_bitmap_a
)
3524 vmx_io_bitmap_b
= alloc_page(GFP_KERNEL
| __GFP_HIGHMEM
);
3525 if (!vmx_io_bitmap_b
) {
3530 vmx_msr_bitmap
= alloc_page(GFP_KERNEL
| __GFP_HIGHMEM
);
3531 if (!vmx_msr_bitmap
) {
3537 * Allow direct access to the PC debug port (it is often used for I/O
3538 * delays, but the vmexits simply slow things down).
3540 va
= kmap(vmx_io_bitmap_a
);
3541 memset(va
, 0xff, PAGE_SIZE
);
3542 clear_bit(0x80, va
);
3543 kunmap(vmx_io_bitmap_a
);
3545 va
= kmap(vmx_io_bitmap_b
);
3546 memset(va
, 0xff, PAGE_SIZE
);
3547 kunmap(vmx_io_bitmap_b
);
3549 va
= kmap(vmx_msr_bitmap
);
3550 memset(va
, 0xff, PAGE_SIZE
);
3551 kunmap(vmx_msr_bitmap
);
3553 set_bit(0, vmx_vpid_bitmap
); /* 0 is reserved for host */
3555 r
= kvm_init(&vmx_x86_ops
, sizeof(struct vcpu_vmx
), THIS_MODULE
);
3559 vmx_disable_intercept_for_msr(vmx_msr_bitmap
, MSR_FS_BASE
);
3560 vmx_disable_intercept_for_msr(vmx_msr_bitmap
, MSR_GS_BASE
);
3561 vmx_disable_intercept_for_msr(vmx_msr_bitmap
, MSR_IA32_SYSENTER_CS
);
3562 vmx_disable_intercept_for_msr(vmx_msr_bitmap
, MSR_IA32_SYSENTER_ESP
);
3563 vmx_disable_intercept_for_msr(vmx_msr_bitmap
, MSR_IA32_SYSENTER_EIP
);
3565 if (vm_need_ept()) {
3566 bypass_guest_pf
= 0;
3567 kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK
|
3568 VMX_EPT_WRITABLE_MASK
|
3569 VMX_EPT_DEFAULT_MT
<< VMX_EPT_MT_EPTE_SHIFT
|
3571 kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
3572 VMX_EPT_EXECUTABLE_MASK
);
3577 if (bypass_guest_pf
)
3578 kvm_mmu_set_nonpresent_ptes(~0xffeull
, 0ull);
3585 __free_page(vmx_msr_bitmap
);
3587 __free_page(vmx_io_bitmap_b
);
3589 __free_page(vmx_io_bitmap_a
);
3593 static void __exit
vmx_exit(void)
3595 __free_page(vmx_msr_bitmap
);
3596 __free_page(vmx_io_bitmap_b
);
3597 __free_page(vmx_io_bitmap_a
);
3602 module_init(vmx_init
)
3603 module_exit(vmx_exit
)