2 * Kernel-based Virtual Machine driver for Linux
6 * Copyright (C) 2006 Qumranet, Inc.
9 * Yaniv Kamay <yaniv@qumranet.com>
10 * Avi Kivity <avi@qumranet.com>
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
18 #include "x86_emulate.h"
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/vmalloc.h>
24 #include <linux/highmem.h>
25 #include <linux/sched.h>
29 MODULE_AUTHOR("Qumranet");
30 MODULE_LICENSE("GPL");
32 #define IOPM_ALLOC_ORDER 2
33 #define MSRPM_ALLOC_ORDER 1
39 #define DR7_GD_MASK (1 << 13)
40 #define DR6_BD_MASK (1 << 13)
42 #define SEG_TYPE_LDT 2
43 #define SEG_TYPE_BUSY_TSS16 3
45 #define KVM_EFER_LMA (1 << 10)
46 #define KVM_EFER_LME (1 << 8)
48 #define SVM_FEATURE_NPT (1 << 0)
49 #define SVM_FEATURE_LBRV (1 << 1)
50 #define SVM_DEATURE_SVML (1 << 2)
52 static void kvm_reput_irq(struct vcpu_svm
*svm
);
54 static inline struct vcpu_svm
*to_svm(struct kvm_vcpu
*vcpu
)
56 return container_of(vcpu
, struct vcpu_svm
, vcpu
);
59 unsigned long iopm_base
;
60 unsigned long msrpm_base
;
62 struct kvm_ldttss_desc
{
65 unsigned base1
: 8, type
: 5, dpl
: 2, p
: 1;
66 unsigned limit1
: 4, zero0
: 3, g
: 1, base2
: 8;
69 } __attribute__((packed
));
77 struct kvm_ldttss_desc
*tss_desc
;
79 struct page
*save_area
;
82 static DEFINE_PER_CPU(struct svm_cpu_data
*, svm_data
);
83 static uint32_t svm_features
;
85 struct svm_init_data
{
90 static u32 msrpm_ranges
[] = {0, 0xc0000000, 0xc0010000};
92 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
93 #define MSRS_RANGE_SIZE 2048
94 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
96 #define MAX_INST_SIZE 15
98 static inline u32
svm_has(u32 feat
)
100 return svm_features
& feat
;
103 static inline u8
pop_irq(struct kvm_vcpu
*vcpu
)
105 int word_index
= __ffs(vcpu
->irq_summary
);
106 int bit_index
= __ffs(vcpu
->irq_pending
[word_index
]);
107 int irq
= word_index
* BITS_PER_LONG
+ bit_index
;
109 clear_bit(bit_index
, &vcpu
->irq_pending
[word_index
]);
110 if (!vcpu
->irq_pending
[word_index
])
111 clear_bit(word_index
, &vcpu
->irq_summary
);
115 static inline void push_irq(struct kvm_vcpu
*vcpu
, u8 irq
)
117 set_bit(irq
, vcpu
->irq_pending
);
118 set_bit(irq
/ BITS_PER_LONG
, &vcpu
->irq_summary
);
121 static inline void clgi(void)
123 asm volatile (SVM_CLGI
);
126 static inline void stgi(void)
128 asm volatile (SVM_STGI
);
131 static inline void invlpga(unsigned long addr
, u32 asid
)
133 asm volatile (SVM_INVLPGA :: "a"(addr
), "c"(asid
));
136 static inline unsigned long kvm_read_cr2(void)
140 asm volatile ("mov %%cr2, %0" : "=r" (cr2
));
144 static inline void kvm_write_cr2(unsigned long val
)
146 asm volatile ("mov %0, %%cr2" :: "r" (val
));
149 static inline unsigned long read_dr6(void)
153 asm volatile ("mov %%dr6, %0" : "=r" (dr6
));
157 static inline void write_dr6(unsigned long val
)
159 asm volatile ("mov %0, %%dr6" :: "r" (val
));
162 static inline unsigned long read_dr7(void)
166 asm volatile ("mov %%dr7, %0" : "=r" (dr7
));
170 static inline void write_dr7(unsigned long val
)
172 asm volatile ("mov %0, %%dr7" :: "r" (val
));
175 static inline void force_new_asid(struct kvm_vcpu
*vcpu
)
177 to_svm(vcpu
)->asid_generation
--;
180 static inline void flush_guest_tlb(struct kvm_vcpu
*vcpu
)
182 force_new_asid(vcpu
);
185 static void svm_set_efer(struct kvm_vcpu
*vcpu
, u64 efer
)
187 if (!(efer
& KVM_EFER_LMA
))
188 efer
&= ~KVM_EFER_LME
;
190 to_svm(vcpu
)->vmcb
->save
.efer
= efer
| MSR_EFER_SVME_MASK
;
191 vcpu
->shadow_efer
= efer
;
194 static void svm_inject_gp(struct kvm_vcpu
*vcpu
, unsigned error_code
)
196 struct vcpu_svm
*svm
= to_svm(vcpu
);
198 svm
->vmcb
->control
.event_inj
= SVM_EVTINJ_VALID
|
199 SVM_EVTINJ_VALID_ERR
|
200 SVM_EVTINJ_TYPE_EXEPT
|
202 svm
->vmcb
->control
.event_inj_err
= error_code
;
205 static void inject_ud(struct kvm_vcpu
*vcpu
)
207 to_svm(vcpu
)->vmcb
->control
.event_inj
= SVM_EVTINJ_VALID
|
208 SVM_EVTINJ_TYPE_EXEPT
|
212 static int is_page_fault(uint32_t info
)
214 info
&= SVM_EVTINJ_VEC_MASK
| SVM_EVTINJ_TYPE_MASK
| SVM_EVTINJ_VALID
;
215 return info
== (PF_VECTOR
| SVM_EVTINJ_VALID
| SVM_EVTINJ_TYPE_EXEPT
);
218 static int is_external_interrupt(u32 info
)
220 info
&= SVM_EVTINJ_TYPE_MASK
| SVM_EVTINJ_VALID
;
221 return info
== (SVM_EVTINJ_VALID
| SVM_EVTINJ_TYPE_INTR
);
224 static void skip_emulated_instruction(struct kvm_vcpu
*vcpu
)
226 struct vcpu_svm
*svm
= to_svm(vcpu
);
228 if (!svm
->next_rip
) {
229 printk(KERN_DEBUG
"%s: NOP\n", __FUNCTION__
);
232 if (svm
->next_rip
- svm
->vmcb
->save
.rip
> MAX_INST_SIZE
) {
233 printk(KERN_ERR
"%s: ip 0x%llx next 0x%llx\n",
239 vcpu
->rip
= svm
->vmcb
->save
.rip
= svm
->next_rip
;
240 svm
->vmcb
->control
.int_state
&= ~SVM_INTERRUPT_SHADOW_MASK
;
242 vcpu
->interrupt_window_open
= 1;
245 static int has_svm(void)
247 uint32_t eax
, ebx
, ecx
, edx
;
249 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_AMD
) {
250 printk(KERN_INFO
"has_svm: not amd\n");
254 cpuid(0x80000000, &eax
, &ebx
, &ecx
, &edx
);
255 if (eax
< SVM_CPUID_FUNC
) {
256 printk(KERN_INFO
"has_svm: can't execute cpuid_8000000a\n");
260 cpuid(0x80000001, &eax
, &ebx
, &ecx
, &edx
);
261 if (!(ecx
& (1 << SVM_CPUID_FEATURE_SHIFT
))) {
262 printk(KERN_DEBUG
"has_svm: svm not available\n");
268 static void svm_hardware_disable(void *garbage
)
270 struct svm_cpu_data
*svm_data
271 = per_cpu(svm_data
, raw_smp_processor_id());
276 wrmsrl(MSR_VM_HSAVE_PA
, 0);
277 rdmsrl(MSR_EFER
, efer
);
278 wrmsrl(MSR_EFER
, efer
& ~MSR_EFER_SVME_MASK
);
279 per_cpu(svm_data
, raw_smp_processor_id()) = NULL
;
280 __free_page(svm_data
->save_area
);
285 static void svm_hardware_enable(void *garbage
)
288 struct svm_cpu_data
*svm_data
;
291 struct desc_ptr gdt_descr
;
293 struct Xgt_desc_struct gdt_descr
;
295 struct desc_struct
*gdt
;
296 int me
= raw_smp_processor_id();
299 printk(KERN_ERR
"svm_cpu_init: err EOPNOTSUPP on %d\n", me
);
302 svm_data
= per_cpu(svm_data
, me
);
305 printk(KERN_ERR
"svm_cpu_init: svm_data is NULL on %d\n",
310 svm_data
->asid_generation
= 1;
311 svm_data
->max_asid
= cpuid_ebx(SVM_CPUID_FUNC
) - 1;
312 svm_data
->next_asid
= svm_data
->max_asid
+ 1;
313 svm_features
= cpuid_edx(SVM_CPUID_FUNC
);
315 asm volatile ( "sgdt %0" : "=m"(gdt_descr
) );
316 gdt
= (struct desc_struct
*)gdt_descr
.address
;
317 svm_data
->tss_desc
= (struct kvm_ldttss_desc
*)(gdt
+ GDT_ENTRY_TSS
);
319 rdmsrl(MSR_EFER
, efer
);
320 wrmsrl(MSR_EFER
, efer
| MSR_EFER_SVME_MASK
);
322 wrmsrl(MSR_VM_HSAVE_PA
,
323 page_to_pfn(svm_data
->save_area
) << PAGE_SHIFT
);
326 static int svm_cpu_init(int cpu
)
328 struct svm_cpu_data
*svm_data
;
331 svm_data
= kzalloc(sizeof(struct svm_cpu_data
), GFP_KERNEL
);
335 svm_data
->save_area
= alloc_page(GFP_KERNEL
);
337 if (!svm_data
->save_area
)
340 per_cpu(svm_data
, cpu
) = svm_data
;
350 static void set_msr_interception(u32
*msrpm
, unsigned msr
,
355 for (i
= 0; i
< NUM_MSR_MAPS
; i
++) {
356 if (msr
>= msrpm_ranges
[i
] &&
357 msr
< msrpm_ranges
[i
] + MSRS_IN_RANGE
) {
358 u32 msr_offset
= (i
* MSRS_IN_RANGE
+ msr
-
359 msrpm_ranges
[i
]) * 2;
361 u32
*base
= msrpm
+ (msr_offset
/ 32);
362 u32 msr_shift
= msr_offset
% 32;
363 u32 mask
= ((write
) ? 0 : 2) | ((read
) ? 0 : 1);
364 *base
= (*base
& ~(0x3 << msr_shift
)) |
372 static __init
int svm_hardware_setup(void)
375 struct page
*iopm_pages
;
376 struct page
*msrpm_pages
;
377 void *iopm_va
, *msrpm_va
;
380 iopm_pages
= alloc_pages(GFP_KERNEL
, IOPM_ALLOC_ORDER
);
385 iopm_va
= page_address(iopm_pages
);
386 memset(iopm_va
, 0xff, PAGE_SIZE
* (1 << IOPM_ALLOC_ORDER
));
387 clear_bit(0x80, iopm_va
); /* allow direct access to PC debug port */
388 iopm_base
= page_to_pfn(iopm_pages
) << PAGE_SHIFT
;
391 msrpm_pages
= alloc_pages(GFP_KERNEL
, MSRPM_ALLOC_ORDER
);
397 msrpm_va
= page_address(msrpm_pages
);
398 memset(msrpm_va
, 0xff, PAGE_SIZE
* (1 << MSRPM_ALLOC_ORDER
));
399 msrpm_base
= page_to_pfn(msrpm_pages
) << PAGE_SHIFT
;
402 set_msr_interception(msrpm_va
, MSR_GS_BASE
, 1, 1);
403 set_msr_interception(msrpm_va
, MSR_FS_BASE
, 1, 1);
404 set_msr_interception(msrpm_va
, MSR_KERNEL_GS_BASE
, 1, 1);
405 set_msr_interception(msrpm_va
, MSR_LSTAR
, 1, 1);
406 set_msr_interception(msrpm_va
, MSR_CSTAR
, 1, 1);
407 set_msr_interception(msrpm_va
, MSR_SYSCALL_MASK
, 1, 1);
409 set_msr_interception(msrpm_va
, MSR_K6_STAR
, 1, 1);
410 set_msr_interception(msrpm_va
, MSR_IA32_SYSENTER_CS
, 1, 1);
411 set_msr_interception(msrpm_va
, MSR_IA32_SYSENTER_ESP
, 1, 1);
412 set_msr_interception(msrpm_va
, MSR_IA32_SYSENTER_EIP
, 1, 1);
414 for_each_online_cpu(cpu
) {
415 r
= svm_cpu_init(cpu
);
422 __free_pages(msrpm_pages
, MSRPM_ALLOC_ORDER
);
425 __free_pages(iopm_pages
, IOPM_ALLOC_ORDER
);
430 static __exit
void svm_hardware_unsetup(void)
432 __free_pages(pfn_to_page(msrpm_base
>> PAGE_SHIFT
), MSRPM_ALLOC_ORDER
);
433 __free_pages(pfn_to_page(iopm_base
>> PAGE_SHIFT
), IOPM_ALLOC_ORDER
);
434 iopm_base
= msrpm_base
= 0;
437 static void init_seg(struct vmcb_seg
*seg
)
440 seg
->attrib
= SVM_SELECTOR_P_MASK
| SVM_SELECTOR_S_MASK
|
441 SVM_SELECTOR_WRITE_MASK
; /* Read/Write Data Segment */
446 static void init_sys_seg(struct vmcb_seg
*seg
, uint32_t type
)
449 seg
->attrib
= SVM_SELECTOR_P_MASK
| type
;
454 static void init_vmcb(struct vmcb
*vmcb
)
456 struct vmcb_control_area
*control
= &vmcb
->control
;
457 struct vmcb_save_area
*save
= &vmcb
->save
;
459 control
->intercept_cr_read
= INTERCEPT_CR0_MASK
|
463 control
->intercept_cr_write
= INTERCEPT_CR0_MASK
|
467 control
->intercept_dr_read
= INTERCEPT_DR0_MASK
|
472 control
->intercept_dr_write
= INTERCEPT_DR0_MASK
|
479 control
->intercept_exceptions
= 1 << PF_VECTOR
;
482 control
->intercept
= (1ULL << INTERCEPT_INTR
) |
483 (1ULL << INTERCEPT_NMI
) |
484 (1ULL << INTERCEPT_SMI
) |
486 * selective cr0 intercept bug?
487 * 0: 0f 22 d8 mov %eax,%cr3
488 * 3: 0f 20 c0 mov %cr0,%eax
489 * 6: 0d 00 00 00 80 or $0x80000000,%eax
490 * b: 0f 22 c0 mov %eax,%cr0
491 * set cr3 ->interception
492 * get cr0 ->interception
493 * set cr0 -> no interception
495 /* (1ULL << INTERCEPT_SELECTIVE_CR0) | */
496 (1ULL << INTERCEPT_CPUID
) |
497 (1ULL << INTERCEPT_INVD
) |
498 (1ULL << INTERCEPT_HLT
) |
499 (1ULL << INTERCEPT_INVLPGA
) |
500 (1ULL << INTERCEPT_IOIO_PROT
) |
501 (1ULL << INTERCEPT_MSR_PROT
) |
502 (1ULL << INTERCEPT_TASK_SWITCH
) |
503 (1ULL << INTERCEPT_SHUTDOWN
) |
504 (1ULL << INTERCEPT_VMRUN
) |
505 (1ULL << INTERCEPT_VMMCALL
) |
506 (1ULL << INTERCEPT_VMLOAD
) |
507 (1ULL << INTERCEPT_VMSAVE
) |
508 (1ULL << INTERCEPT_STGI
) |
509 (1ULL << INTERCEPT_CLGI
) |
510 (1ULL << INTERCEPT_SKINIT
) |
511 (1ULL << INTERCEPT_WBINVD
) |
512 (1ULL << INTERCEPT_MONITOR
) |
513 (1ULL << INTERCEPT_MWAIT
);
515 control
->iopm_base_pa
= iopm_base
;
516 control
->msrpm_base_pa
= msrpm_base
;
517 control
->tsc_offset
= 0;
518 control
->int_ctl
= V_INTR_MASKING_MASK
;
526 save
->cs
.selector
= 0xf000;
527 /* Executable/Readable Code Segment */
528 save
->cs
.attrib
= SVM_SELECTOR_READ_MASK
| SVM_SELECTOR_P_MASK
|
529 SVM_SELECTOR_S_MASK
| SVM_SELECTOR_CODE_MASK
;
530 save
->cs
.limit
= 0xffff;
532 * cs.base should really be 0xffff0000, but vmx can't handle that, so
533 * be consistent with it.
535 * Replace when we have real mode working for vmx.
537 save
->cs
.base
= 0xf0000;
539 save
->gdtr
.limit
= 0xffff;
540 save
->idtr
.limit
= 0xffff;
542 init_sys_seg(&save
->ldtr
, SEG_TYPE_LDT
);
543 init_sys_seg(&save
->tr
, SEG_TYPE_BUSY_TSS16
);
545 save
->efer
= MSR_EFER_SVME_MASK
;
547 save
->dr6
= 0xffff0ff0;
550 save
->rip
= 0x0000fff0;
553 * cr0 val on cpu init should be 0x60000010, we enable cpu
554 * cache by default. the orderly way is to enable cache in bios.
556 save
->cr0
= 0x00000010 | X86_CR0_PG
| X86_CR0_WP
;
557 save
->cr4
= X86_CR4_PAE
;
561 static void svm_vcpu_reset(struct kvm_vcpu
*vcpu
)
563 struct vcpu_svm
*svm
= to_svm(vcpu
);
565 init_vmcb(svm
->vmcb
);
567 if (vcpu
->vcpu_id
!= 0) {
568 svm
->vmcb
->save
.rip
= 0;
569 svm
->vmcb
->save
.cs
.base
= svm
->vcpu
.sipi_vector
<< 12;
570 svm
->vmcb
->save
.cs
.selector
= svm
->vcpu
.sipi_vector
<< 8;
574 static struct kvm_vcpu
*svm_create_vcpu(struct kvm
*kvm
, unsigned int id
)
576 struct vcpu_svm
*svm
;
580 svm
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
586 err
= kvm_vcpu_init(&svm
->vcpu
, kvm
, id
);
590 if (irqchip_in_kernel(kvm
)) {
591 err
= kvm_create_lapic(&svm
->vcpu
);
596 page
= alloc_page(GFP_KERNEL
);
602 svm
->vmcb
= page_address(page
);
603 clear_page(svm
->vmcb
);
604 svm
->vmcb_pa
= page_to_pfn(page
) << PAGE_SHIFT
;
605 svm
->asid_generation
= 0;
606 memset(svm
->db_regs
, 0, sizeof(svm
->db_regs
));
607 init_vmcb(svm
->vmcb
);
610 svm
->vcpu
.fpu_active
= 1;
611 svm
->vcpu
.apic_base
= 0xfee00000 | MSR_IA32_APICBASE_ENABLE
;
612 if (svm
->vcpu
.vcpu_id
== 0)
613 svm
->vcpu
.apic_base
|= MSR_IA32_APICBASE_BSP
;
618 kvm_vcpu_uninit(&svm
->vcpu
);
620 kmem_cache_free(kvm_vcpu_cache
, svm
);
625 static void svm_free_vcpu(struct kvm_vcpu
*vcpu
)
627 struct vcpu_svm
*svm
= to_svm(vcpu
);
629 __free_page(pfn_to_page(svm
->vmcb_pa
>> PAGE_SHIFT
));
630 kvm_vcpu_uninit(vcpu
);
631 kmem_cache_free(kvm_vcpu_cache
, svm
);
634 static void svm_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
636 struct vcpu_svm
*svm
= to_svm(vcpu
);
639 if (unlikely(cpu
!= vcpu
->cpu
)) {
643 * Make sure that the guest sees a monotonically
647 delta
= vcpu
->host_tsc
- tsc_this
;
648 svm
->vmcb
->control
.tsc_offset
+= delta
;
650 kvm_migrate_apic_timer(vcpu
);
653 for (i
= 0; i
< NR_HOST_SAVE_USER_MSRS
; i
++)
654 rdmsrl(host_save_user_msrs
[i
], svm
->host_user_msrs
[i
]);
657 static void svm_vcpu_put(struct kvm_vcpu
*vcpu
)
659 struct vcpu_svm
*svm
= to_svm(vcpu
);
662 for (i
= 0; i
< NR_HOST_SAVE_USER_MSRS
; i
++)
663 wrmsrl(host_save_user_msrs
[i
], svm
->host_user_msrs
[i
]);
665 rdtscll(vcpu
->host_tsc
);
666 kvm_put_guest_fpu(vcpu
);
669 static void svm_vcpu_decache(struct kvm_vcpu
*vcpu
)
673 static void svm_cache_regs(struct kvm_vcpu
*vcpu
)
675 struct vcpu_svm
*svm
= to_svm(vcpu
);
677 vcpu
->regs
[VCPU_REGS_RAX
] = svm
->vmcb
->save
.rax
;
678 vcpu
->regs
[VCPU_REGS_RSP
] = svm
->vmcb
->save
.rsp
;
679 vcpu
->rip
= svm
->vmcb
->save
.rip
;
682 static void svm_decache_regs(struct kvm_vcpu
*vcpu
)
684 struct vcpu_svm
*svm
= to_svm(vcpu
);
685 svm
->vmcb
->save
.rax
= vcpu
->regs
[VCPU_REGS_RAX
];
686 svm
->vmcb
->save
.rsp
= vcpu
->regs
[VCPU_REGS_RSP
];
687 svm
->vmcb
->save
.rip
= vcpu
->rip
;
690 static unsigned long svm_get_rflags(struct kvm_vcpu
*vcpu
)
692 return to_svm(vcpu
)->vmcb
->save
.rflags
;
695 static void svm_set_rflags(struct kvm_vcpu
*vcpu
, unsigned long rflags
)
697 to_svm(vcpu
)->vmcb
->save
.rflags
= rflags
;
700 static struct vmcb_seg
*svm_seg(struct kvm_vcpu
*vcpu
, int seg
)
702 struct vmcb_save_area
*save
= &to_svm(vcpu
)->vmcb
->save
;
705 case VCPU_SREG_CS
: return &save
->cs
;
706 case VCPU_SREG_DS
: return &save
->ds
;
707 case VCPU_SREG_ES
: return &save
->es
;
708 case VCPU_SREG_FS
: return &save
->fs
;
709 case VCPU_SREG_GS
: return &save
->gs
;
710 case VCPU_SREG_SS
: return &save
->ss
;
711 case VCPU_SREG_TR
: return &save
->tr
;
712 case VCPU_SREG_LDTR
: return &save
->ldtr
;
718 static u64
svm_get_segment_base(struct kvm_vcpu
*vcpu
, int seg
)
720 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
725 static void svm_get_segment(struct kvm_vcpu
*vcpu
,
726 struct kvm_segment
*var
, int seg
)
728 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
731 var
->limit
= s
->limit
;
732 var
->selector
= s
->selector
;
733 var
->type
= s
->attrib
& SVM_SELECTOR_TYPE_MASK
;
734 var
->s
= (s
->attrib
>> SVM_SELECTOR_S_SHIFT
) & 1;
735 var
->dpl
= (s
->attrib
>> SVM_SELECTOR_DPL_SHIFT
) & 3;
736 var
->present
= (s
->attrib
>> SVM_SELECTOR_P_SHIFT
) & 1;
737 var
->avl
= (s
->attrib
>> SVM_SELECTOR_AVL_SHIFT
) & 1;
738 var
->l
= (s
->attrib
>> SVM_SELECTOR_L_SHIFT
) & 1;
739 var
->db
= (s
->attrib
>> SVM_SELECTOR_DB_SHIFT
) & 1;
740 var
->g
= (s
->attrib
>> SVM_SELECTOR_G_SHIFT
) & 1;
741 var
->unusable
= !var
->present
;
744 static void svm_get_idt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
746 struct vcpu_svm
*svm
= to_svm(vcpu
);
748 dt
->limit
= svm
->vmcb
->save
.idtr
.limit
;
749 dt
->base
= svm
->vmcb
->save
.idtr
.base
;
752 static void svm_set_idt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
754 struct vcpu_svm
*svm
= to_svm(vcpu
);
756 svm
->vmcb
->save
.idtr
.limit
= dt
->limit
;
757 svm
->vmcb
->save
.idtr
.base
= dt
->base
;
760 static void svm_get_gdt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
762 struct vcpu_svm
*svm
= to_svm(vcpu
);
764 dt
->limit
= svm
->vmcb
->save
.gdtr
.limit
;
765 dt
->base
= svm
->vmcb
->save
.gdtr
.base
;
768 static void svm_set_gdt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
770 struct vcpu_svm
*svm
= to_svm(vcpu
);
772 svm
->vmcb
->save
.gdtr
.limit
= dt
->limit
;
773 svm
->vmcb
->save
.gdtr
.base
= dt
->base
;
776 static void svm_decache_cr4_guest_bits(struct kvm_vcpu
*vcpu
)
780 static void svm_set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
)
782 struct vcpu_svm
*svm
= to_svm(vcpu
);
785 if (vcpu
->shadow_efer
& KVM_EFER_LME
) {
786 if (!is_paging(vcpu
) && (cr0
& X86_CR0_PG
)) {
787 vcpu
->shadow_efer
|= KVM_EFER_LMA
;
788 svm
->vmcb
->save
.efer
|= KVM_EFER_LMA
| KVM_EFER_LME
;
791 if (is_paging(vcpu
) && !(cr0
& X86_CR0_PG
) ) {
792 vcpu
->shadow_efer
&= ~KVM_EFER_LMA
;
793 svm
->vmcb
->save
.efer
&= ~(KVM_EFER_LMA
| KVM_EFER_LME
);
797 if ((vcpu
->cr0
& X86_CR0_TS
) && !(cr0
& X86_CR0_TS
)) {
798 svm
->vmcb
->control
.intercept_exceptions
&= ~(1 << NM_VECTOR
);
799 vcpu
->fpu_active
= 1;
803 cr0
|= X86_CR0_PG
| X86_CR0_WP
;
804 cr0
&= ~(X86_CR0_CD
| X86_CR0_NW
);
805 svm
->vmcb
->save
.cr0
= cr0
;
808 static void svm_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
)
811 to_svm(vcpu
)->vmcb
->save
.cr4
= cr4
| X86_CR4_PAE
;
814 static void svm_set_segment(struct kvm_vcpu
*vcpu
,
815 struct kvm_segment
*var
, int seg
)
817 struct vcpu_svm
*svm
= to_svm(vcpu
);
818 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
821 s
->limit
= var
->limit
;
822 s
->selector
= var
->selector
;
826 s
->attrib
= (var
->type
& SVM_SELECTOR_TYPE_MASK
);
827 s
->attrib
|= (var
->s
& 1) << SVM_SELECTOR_S_SHIFT
;
828 s
->attrib
|= (var
->dpl
& 3) << SVM_SELECTOR_DPL_SHIFT
;
829 s
->attrib
|= (var
->present
& 1) << SVM_SELECTOR_P_SHIFT
;
830 s
->attrib
|= (var
->avl
& 1) << SVM_SELECTOR_AVL_SHIFT
;
831 s
->attrib
|= (var
->l
& 1) << SVM_SELECTOR_L_SHIFT
;
832 s
->attrib
|= (var
->db
& 1) << SVM_SELECTOR_DB_SHIFT
;
833 s
->attrib
|= (var
->g
& 1) << SVM_SELECTOR_G_SHIFT
;
835 if (seg
== VCPU_SREG_CS
)
837 = (svm
->vmcb
->save
.cs
.attrib
838 >> SVM_SELECTOR_DPL_SHIFT
) & 3;
844 svm(vcpu)->vmcb->control.int_ctl &= ~V_TPR_MASK;
845 svm(vcpu)->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK);
849 static int svm_guest_debug(struct kvm_vcpu
*vcpu
, struct kvm_debug_guest
*dbg
)
854 static int svm_get_irq(struct kvm_vcpu
*vcpu
)
856 struct vcpu_svm
*svm
= to_svm(vcpu
);
857 u32 exit_int_info
= svm
->vmcb
->control
.exit_int_info
;
859 if (is_external_interrupt(exit_int_info
))
860 return exit_int_info
& SVM_EVTINJ_VEC_MASK
;
864 static void load_host_msrs(struct kvm_vcpu
*vcpu
)
867 wrmsrl(MSR_GS_BASE
, to_svm(vcpu
)->host_gs_base
);
871 static void save_host_msrs(struct kvm_vcpu
*vcpu
)
874 rdmsrl(MSR_GS_BASE
, to_svm(vcpu
)->host_gs_base
);
878 static void new_asid(struct vcpu_svm
*svm
, struct svm_cpu_data
*svm_data
)
880 if (svm_data
->next_asid
> svm_data
->max_asid
) {
881 ++svm_data
->asid_generation
;
882 svm_data
->next_asid
= 1;
883 svm
->vmcb
->control
.tlb_ctl
= TLB_CONTROL_FLUSH_ALL_ASID
;
886 svm
->vcpu
.cpu
= svm_data
->cpu
;
887 svm
->asid_generation
= svm_data
->asid_generation
;
888 svm
->vmcb
->control
.asid
= svm_data
->next_asid
++;
891 static unsigned long svm_get_dr(struct kvm_vcpu
*vcpu
, int dr
)
893 return to_svm(vcpu
)->db_regs
[dr
];
896 static void svm_set_dr(struct kvm_vcpu
*vcpu
, int dr
, unsigned long value
,
899 struct vcpu_svm
*svm
= to_svm(vcpu
);
903 if (svm
->vmcb
->save
.dr7
& DR7_GD_MASK
) {
904 svm
->vmcb
->save
.dr7
&= ~DR7_GD_MASK
;
905 svm
->vmcb
->save
.dr6
|= DR6_BD_MASK
;
906 *exception
= DB_VECTOR
;
912 svm
->db_regs
[dr
] = value
;
915 if (vcpu
->cr4
& X86_CR4_DE
) {
916 *exception
= UD_VECTOR
;
920 if (value
& ~((1ULL << 32) - 1)) {
921 *exception
= GP_VECTOR
;
924 svm
->vmcb
->save
.dr7
= value
;
928 printk(KERN_DEBUG
"%s: unexpected dr %u\n",
930 *exception
= UD_VECTOR
;
935 static int pf_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
937 u32 exit_int_info
= svm
->vmcb
->control
.exit_int_info
;
938 struct kvm
*kvm
= svm
->vcpu
.kvm
;
941 enum emulation_result er
;
944 if (!irqchip_in_kernel(kvm
) &&
945 is_external_interrupt(exit_int_info
))
946 push_irq(&svm
->vcpu
, exit_int_info
& SVM_EVTINJ_VEC_MASK
);
948 mutex_lock(&kvm
->lock
);
950 fault_address
= svm
->vmcb
->control
.exit_info_2
;
951 error_code
= svm
->vmcb
->control
.exit_info_1
;
952 r
= kvm_mmu_page_fault(&svm
->vcpu
, fault_address
, error_code
);
954 mutex_unlock(&kvm
->lock
);
958 mutex_unlock(&kvm
->lock
);
961 er
= emulate_instruction(&svm
->vcpu
, kvm_run
, fault_address
,
963 mutex_unlock(&kvm
->lock
);
968 case EMULATE_DO_MMIO
:
969 ++svm
->vcpu
.stat
.mmio_exits
;
972 kvm_report_emulation_failure(&svm
->vcpu
, "pagetable");
978 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
982 static int nm_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
984 svm
->vmcb
->control
.intercept_exceptions
&= ~(1 << NM_VECTOR
);
985 if (!(svm
->vcpu
.cr0
& X86_CR0_TS
))
986 svm
->vmcb
->save
.cr0
&= ~X86_CR0_TS
;
987 svm
->vcpu
.fpu_active
= 1;
992 static int shutdown_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
995 * VMCB is undefined after a SHUTDOWN intercept
996 * so reinitialize it.
998 clear_page(svm
->vmcb
);
999 init_vmcb(svm
->vmcb
);
1001 kvm_run
->exit_reason
= KVM_EXIT_SHUTDOWN
;
1005 static int io_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1007 u32 io_info
= svm
->vmcb
->control
.exit_info_1
; //address size bug?
1008 int size
, down
, in
, string
, rep
;
1011 ++svm
->vcpu
.stat
.io_exits
;
1013 svm
->next_rip
= svm
->vmcb
->control
.exit_info_2
;
1015 string
= (io_info
& SVM_IOIO_STR_MASK
) != 0;
1018 if (emulate_instruction(&svm
->vcpu
, kvm_run
, 0, 0) == EMULATE_DO_MMIO
)
1023 in
= (io_info
& SVM_IOIO_TYPE_MASK
) != 0;
1024 port
= io_info
>> 16;
1025 size
= (io_info
& SVM_IOIO_SIZE_MASK
) >> SVM_IOIO_SIZE_SHIFT
;
1026 rep
= (io_info
& SVM_IOIO_REP_MASK
) != 0;
1027 down
= (svm
->vmcb
->save
.rflags
& X86_EFLAGS_DF
) != 0;
1029 return kvm_emulate_pio(&svm
->vcpu
, kvm_run
, in
, size
, port
);
1032 static int nop_on_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1037 static int halt_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1039 svm
->next_rip
= svm
->vmcb
->save
.rip
+ 1;
1040 skip_emulated_instruction(&svm
->vcpu
);
1041 return kvm_emulate_halt(&svm
->vcpu
);
1044 static int vmmcall_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1046 svm
->next_rip
= svm
->vmcb
->save
.rip
+ 3;
1047 skip_emulated_instruction(&svm
->vcpu
);
1048 return kvm_hypercall(&svm
->vcpu
, kvm_run
);
1051 static int invalid_op_interception(struct vcpu_svm
*svm
,
1052 struct kvm_run
*kvm_run
)
1054 inject_ud(&svm
->vcpu
);
1058 static int task_switch_interception(struct vcpu_svm
*svm
,
1059 struct kvm_run
*kvm_run
)
1061 pr_unimpl(&svm
->vcpu
, "%s: task switch is unsupported\n", __FUNCTION__
);
1062 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
1066 static int cpuid_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1068 svm
->next_rip
= svm
->vmcb
->save
.rip
+ 2;
1069 kvm_emulate_cpuid(&svm
->vcpu
);
1073 static int emulate_on_interception(struct vcpu_svm
*svm
,
1074 struct kvm_run
*kvm_run
)
1076 if (emulate_instruction(&svm
->vcpu
, NULL
, 0, 0) != EMULATE_DONE
)
1077 pr_unimpl(&svm
->vcpu
, "%s: failed\n", __FUNCTION__
);
1081 static int svm_get_msr(struct kvm_vcpu
*vcpu
, unsigned ecx
, u64
*data
)
1083 struct vcpu_svm
*svm
= to_svm(vcpu
);
1086 case MSR_IA32_TIME_STAMP_COUNTER
: {
1090 *data
= svm
->vmcb
->control
.tsc_offset
+ tsc
;
1094 *data
= svm
->vmcb
->save
.star
;
1096 #ifdef CONFIG_X86_64
1098 *data
= svm
->vmcb
->save
.lstar
;
1101 *data
= svm
->vmcb
->save
.cstar
;
1103 case MSR_KERNEL_GS_BASE
:
1104 *data
= svm
->vmcb
->save
.kernel_gs_base
;
1106 case MSR_SYSCALL_MASK
:
1107 *data
= svm
->vmcb
->save
.sfmask
;
1110 case MSR_IA32_SYSENTER_CS
:
1111 *data
= svm
->vmcb
->save
.sysenter_cs
;
1113 case MSR_IA32_SYSENTER_EIP
:
1114 *data
= svm
->vmcb
->save
.sysenter_eip
;
1116 case MSR_IA32_SYSENTER_ESP
:
1117 *data
= svm
->vmcb
->save
.sysenter_esp
;
1120 return kvm_get_msr_common(vcpu
, ecx
, data
);
1125 static int rdmsr_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1127 u32 ecx
= svm
->vcpu
.regs
[VCPU_REGS_RCX
];
1130 if (svm_get_msr(&svm
->vcpu
, ecx
, &data
))
1131 svm_inject_gp(&svm
->vcpu
, 0);
1133 svm
->vmcb
->save
.rax
= data
& 0xffffffff;
1134 svm
->vcpu
.regs
[VCPU_REGS_RDX
] = data
>> 32;
1135 svm
->next_rip
= svm
->vmcb
->save
.rip
+ 2;
1136 skip_emulated_instruction(&svm
->vcpu
);
1141 static int svm_set_msr(struct kvm_vcpu
*vcpu
, unsigned ecx
, u64 data
)
1143 struct vcpu_svm
*svm
= to_svm(vcpu
);
1146 case MSR_IA32_TIME_STAMP_COUNTER
: {
1150 svm
->vmcb
->control
.tsc_offset
= data
- tsc
;
1154 svm
->vmcb
->save
.star
= data
;
1156 #ifdef CONFIG_X86_64
1158 svm
->vmcb
->save
.lstar
= data
;
1161 svm
->vmcb
->save
.cstar
= data
;
1163 case MSR_KERNEL_GS_BASE
:
1164 svm
->vmcb
->save
.kernel_gs_base
= data
;
1166 case MSR_SYSCALL_MASK
:
1167 svm
->vmcb
->save
.sfmask
= data
;
1170 case MSR_IA32_SYSENTER_CS
:
1171 svm
->vmcb
->save
.sysenter_cs
= data
;
1173 case MSR_IA32_SYSENTER_EIP
:
1174 svm
->vmcb
->save
.sysenter_eip
= data
;
1176 case MSR_IA32_SYSENTER_ESP
:
1177 svm
->vmcb
->save
.sysenter_esp
= data
;
1180 return kvm_set_msr_common(vcpu
, ecx
, data
);
1185 static int wrmsr_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1187 u32 ecx
= svm
->vcpu
.regs
[VCPU_REGS_RCX
];
1188 u64 data
= (svm
->vmcb
->save
.rax
& -1u)
1189 | ((u64
)(svm
->vcpu
.regs
[VCPU_REGS_RDX
] & -1u) << 32);
1190 svm
->next_rip
= svm
->vmcb
->save
.rip
+ 2;
1191 if (svm_set_msr(&svm
->vcpu
, ecx
, data
))
1192 svm_inject_gp(&svm
->vcpu
, 0);
1194 skip_emulated_instruction(&svm
->vcpu
);
1198 static int msr_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1200 if (svm
->vmcb
->control
.exit_info_1
)
1201 return wrmsr_interception(svm
, kvm_run
);
1203 return rdmsr_interception(svm
, kvm_run
);
1206 static int interrupt_window_interception(struct vcpu_svm
*svm
,
1207 struct kvm_run
*kvm_run
)
1209 svm
->vmcb
->control
.intercept
&= ~(1ULL << INTERCEPT_VINTR
);
1210 svm
->vmcb
->control
.int_ctl
&= ~V_IRQ_MASK
;
1212 * If the user space waits to inject interrupts, exit as soon as
1215 if (kvm_run
->request_interrupt_window
&&
1216 !svm
->vcpu
.irq_summary
) {
1217 ++svm
->vcpu
.stat
.irq_window_exits
;
1218 kvm_run
->exit_reason
= KVM_EXIT_IRQ_WINDOW_OPEN
;
1225 static int (*svm_exit_handlers
[])(struct vcpu_svm
*svm
,
1226 struct kvm_run
*kvm_run
) = {
1227 [SVM_EXIT_READ_CR0
] = emulate_on_interception
,
1228 [SVM_EXIT_READ_CR3
] = emulate_on_interception
,
1229 [SVM_EXIT_READ_CR4
] = emulate_on_interception
,
1231 [SVM_EXIT_WRITE_CR0
] = emulate_on_interception
,
1232 [SVM_EXIT_WRITE_CR3
] = emulate_on_interception
,
1233 [SVM_EXIT_WRITE_CR4
] = emulate_on_interception
,
1234 [SVM_EXIT_READ_DR0
] = emulate_on_interception
,
1235 [SVM_EXIT_READ_DR1
] = emulate_on_interception
,
1236 [SVM_EXIT_READ_DR2
] = emulate_on_interception
,
1237 [SVM_EXIT_READ_DR3
] = emulate_on_interception
,
1238 [SVM_EXIT_WRITE_DR0
] = emulate_on_interception
,
1239 [SVM_EXIT_WRITE_DR1
] = emulate_on_interception
,
1240 [SVM_EXIT_WRITE_DR2
] = emulate_on_interception
,
1241 [SVM_EXIT_WRITE_DR3
] = emulate_on_interception
,
1242 [SVM_EXIT_WRITE_DR5
] = emulate_on_interception
,
1243 [SVM_EXIT_WRITE_DR7
] = emulate_on_interception
,
1244 [SVM_EXIT_EXCP_BASE
+ PF_VECTOR
] = pf_interception
,
1245 [SVM_EXIT_EXCP_BASE
+ NM_VECTOR
] = nm_interception
,
1246 [SVM_EXIT_INTR
] = nop_on_interception
,
1247 [SVM_EXIT_NMI
] = nop_on_interception
,
1248 [SVM_EXIT_SMI
] = nop_on_interception
,
1249 [SVM_EXIT_INIT
] = nop_on_interception
,
1250 [SVM_EXIT_VINTR
] = interrupt_window_interception
,
1251 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
1252 [SVM_EXIT_CPUID
] = cpuid_interception
,
1253 [SVM_EXIT_INVD
] = emulate_on_interception
,
1254 [SVM_EXIT_HLT
] = halt_interception
,
1255 [SVM_EXIT_INVLPG
] = emulate_on_interception
,
1256 [SVM_EXIT_INVLPGA
] = invalid_op_interception
,
1257 [SVM_EXIT_IOIO
] = io_interception
,
1258 [SVM_EXIT_MSR
] = msr_interception
,
1259 [SVM_EXIT_TASK_SWITCH
] = task_switch_interception
,
1260 [SVM_EXIT_SHUTDOWN
] = shutdown_interception
,
1261 [SVM_EXIT_VMRUN
] = invalid_op_interception
,
1262 [SVM_EXIT_VMMCALL
] = vmmcall_interception
,
1263 [SVM_EXIT_VMLOAD
] = invalid_op_interception
,
1264 [SVM_EXIT_VMSAVE
] = invalid_op_interception
,
1265 [SVM_EXIT_STGI
] = invalid_op_interception
,
1266 [SVM_EXIT_CLGI
] = invalid_op_interception
,
1267 [SVM_EXIT_SKINIT
] = invalid_op_interception
,
1268 [SVM_EXIT_WBINVD
] = emulate_on_interception
,
1269 [SVM_EXIT_MONITOR
] = invalid_op_interception
,
1270 [SVM_EXIT_MWAIT
] = invalid_op_interception
,
1274 static int handle_exit(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
)
1276 struct vcpu_svm
*svm
= to_svm(vcpu
);
1277 u32 exit_code
= svm
->vmcb
->control
.exit_code
;
1281 if (svm
->vmcb
->control
.exit_code
== SVM_EXIT_ERR
) {
1282 kvm_run
->exit_reason
= KVM_EXIT_FAIL_ENTRY
;
1283 kvm_run
->fail_entry
.hardware_entry_failure_reason
1284 = svm
->vmcb
->control
.exit_code
;
1288 if (is_external_interrupt(svm
->vmcb
->control
.exit_int_info
) &&
1289 exit_code
!= SVM_EXIT_EXCP_BASE
+ PF_VECTOR
)
1290 printk(KERN_ERR
"%s: unexpected exit_ini_info 0x%x "
1292 __FUNCTION__
, svm
->vmcb
->control
.exit_int_info
,
1295 if (exit_code
>= ARRAY_SIZE(svm_exit_handlers
)
1296 || svm_exit_handlers
[exit_code
] == 0) {
1297 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
1298 kvm_run
->hw
.hardware_exit_reason
= exit_code
;
1302 return svm_exit_handlers
[exit_code
](svm
, kvm_run
);
1305 static void reload_tss(struct kvm_vcpu
*vcpu
)
1307 int cpu
= raw_smp_processor_id();
1309 struct svm_cpu_data
*svm_data
= per_cpu(svm_data
, cpu
);
1310 svm_data
->tss_desc
->type
= 9; //available 32/64-bit TSS
1314 static void pre_svm_run(struct vcpu_svm
*svm
)
1316 int cpu
= raw_smp_processor_id();
1318 struct svm_cpu_data
*svm_data
= per_cpu(svm_data
, cpu
);
1320 svm
->vmcb
->control
.tlb_ctl
= TLB_CONTROL_DO_NOTHING
;
1321 if (svm
->vcpu
.cpu
!= cpu
||
1322 svm
->asid_generation
!= svm_data
->asid_generation
)
1323 new_asid(svm
, svm_data
);
1327 static inline void svm_inject_irq(struct vcpu_svm
*svm
, int irq
)
1329 struct vmcb_control_area
*control
;
1331 control
= &svm
->vmcb
->control
;
1332 control
->int_vector
= irq
;
1333 control
->int_ctl
&= ~V_INTR_PRIO_MASK
;
1334 control
->int_ctl
|= V_IRQ_MASK
|
1335 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT
);
1338 static void svm_set_irq(struct kvm_vcpu
*vcpu
, int irq
)
1340 struct vcpu_svm
*svm
= to_svm(vcpu
);
1342 svm_inject_irq(svm
, irq
);
1345 static void svm_intr_assist(struct kvm_vcpu
*vcpu
)
1347 struct vcpu_svm
*svm
= to_svm(vcpu
);
1348 struct vmcb
*vmcb
= svm
->vmcb
;
1349 int intr_vector
= -1;
1351 kvm_inject_pending_timer_irqs(vcpu
);
1352 if ((vmcb
->control
.exit_int_info
& SVM_EVTINJ_VALID
) &&
1353 ((vmcb
->control
.exit_int_info
& SVM_EVTINJ_TYPE_MASK
) == 0)) {
1354 intr_vector
= vmcb
->control
.exit_int_info
&
1355 SVM_EVTINJ_VEC_MASK
;
1356 vmcb
->control
.exit_int_info
= 0;
1357 svm_inject_irq(svm
, intr_vector
);
1361 if (vmcb
->control
.int_ctl
& V_IRQ_MASK
)
1364 if (!kvm_cpu_has_interrupt(vcpu
))
1367 if (!(vmcb
->save
.rflags
& X86_EFLAGS_IF
) ||
1368 (vmcb
->control
.int_state
& SVM_INTERRUPT_SHADOW_MASK
) ||
1369 (vmcb
->control
.event_inj
& SVM_EVTINJ_VALID
)) {
1370 /* unable to deliver irq, set pending irq */
1371 vmcb
->control
.intercept
|= (1ULL << INTERCEPT_VINTR
);
1372 svm_inject_irq(svm
, 0x0);
1375 /* Okay, we can deliver the interrupt: grab it and update PIC state. */
1376 intr_vector
= kvm_cpu_get_interrupt(vcpu
);
1377 svm_inject_irq(svm
, intr_vector
);
1378 kvm_timer_intr_post(vcpu
, intr_vector
);
1381 static void kvm_reput_irq(struct vcpu_svm
*svm
)
1383 struct vmcb_control_area
*control
= &svm
->vmcb
->control
;
1385 if ((control
->int_ctl
& V_IRQ_MASK
)
1386 && !irqchip_in_kernel(svm
->vcpu
.kvm
)) {
1387 control
->int_ctl
&= ~V_IRQ_MASK
;
1388 push_irq(&svm
->vcpu
, control
->int_vector
);
1391 svm
->vcpu
.interrupt_window_open
=
1392 !(control
->int_state
& SVM_INTERRUPT_SHADOW_MASK
);
1395 static void svm_do_inject_vector(struct vcpu_svm
*svm
)
1397 struct kvm_vcpu
*vcpu
= &svm
->vcpu
;
1398 int word_index
= __ffs(vcpu
->irq_summary
);
1399 int bit_index
= __ffs(vcpu
->irq_pending
[word_index
]);
1400 int irq
= word_index
* BITS_PER_LONG
+ bit_index
;
1402 clear_bit(bit_index
, &vcpu
->irq_pending
[word_index
]);
1403 if (!vcpu
->irq_pending
[word_index
])
1404 clear_bit(word_index
, &vcpu
->irq_summary
);
1405 svm_inject_irq(svm
, irq
);
1408 static void do_interrupt_requests(struct kvm_vcpu
*vcpu
,
1409 struct kvm_run
*kvm_run
)
1411 struct vcpu_svm
*svm
= to_svm(vcpu
);
1412 struct vmcb_control_area
*control
= &svm
->vmcb
->control
;
1414 svm
->vcpu
.interrupt_window_open
=
1415 (!(control
->int_state
& SVM_INTERRUPT_SHADOW_MASK
) &&
1416 (svm
->vmcb
->save
.rflags
& X86_EFLAGS_IF
));
1418 if (svm
->vcpu
.interrupt_window_open
&& svm
->vcpu
.irq_summary
)
1420 * If interrupts enabled, and not blocked by sti or mov ss. Good.
1422 svm_do_inject_vector(svm
);
1425 * Interrupts blocked. Wait for unblock.
1427 if (!svm
->vcpu
.interrupt_window_open
&&
1428 (svm
->vcpu
.irq_summary
|| kvm_run
->request_interrupt_window
)) {
1429 control
->intercept
|= 1ULL << INTERCEPT_VINTR
;
1431 control
->intercept
&= ~(1ULL << INTERCEPT_VINTR
);
1434 static void save_db_regs(unsigned long *db_regs
)
1436 asm volatile ("mov %%dr0, %0" : "=r"(db_regs
[0]));
1437 asm volatile ("mov %%dr1, %0" : "=r"(db_regs
[1]));
1438 asm volatile ("mov %%dr2, %0" : "=r"(db_regs
[2]));
1439 asm volatile ("mov %%dr3, %0" : "=r"(db_regs
[3]));
1442 static void load_db_regs(unsigned long *db_regs
)
1444 asm volatile ("mov %0, %%dr0" : : "r"(db_regs
[0]));
1445 asm volatile ("mov %0, %%dr1" : : "r"(db_regs
[1]));
1446 asm volatile ("mov %0, %%dr2" : : "r"(db_regs
[2]));
1447 asm volatile ("mov %0, %%dr3" : : "r"(db_regs
[3]));
1450 static void svm_flush_tlb(struct kvm_vcpu
*vcpu
)
1452 force_new_asid(vcpu
);
1455 static void svm_prepare_guest_switch(struct kvm_vcpu
*vcpu
)
1459 static void svm_vcpu_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1461 struct vcpu_svm
*svm
= to_svm(vcpu
);
1468 save_host_msrs(vcpu
);
1469 fs_selector
= read_fs();
1470 gs_selector
= read_gs();
1471 ldt_selector
= read_ldt();
1472 svm
->host_cr2
= kvm_read_cr2();
1473 svm
->host_dr6
= read_dr6();
1474 svm
->host_dr7
= read_dr7();
1475 svm
->vmcb
->save
.cr2
= vcpu
->cr2
;
1477 if (svm
->vmcb
->save
.dr7
& 0xff) {
1479 save_db_regs(svm
->host_db_regs
);
1480 load_db_regs(svm
->db_regs
);
1488 #ifdef CONFIG_X86_64
1489 "push %%rbx; push %%rcx; push %%rdx;"
1490 "push %%rsi; push %%rdi; push %%rbp;"
1491 "push %%r8; push %%r9; push %%r10; push %%r11;"
1492 "push %%r12; push %%r13; push %%r14; push %%r15;"
1494 "push %%ebx; push %%ecx; push %%edx;"
1495 "push %%esi; push %%edi; push %%ebp;"
1498 #ifdef CONFIG_X86_64
1499 "mov %c[rbx](%[svm]), %%rbx \n\t"
1500 "mov %c[rcx](%[svm]), %%rcx \n\t"
1501 "mov %c[rdx](%[svm]), %%rdx \n\t"
1502 "mov %c[rsi](%[svm]), %%rsi \n\t"
1503 "mov %c[rdi](%[svm]), %%rdi \n\t"
1504 "mov %c[rbp](%[svm]), %%rbp \n\t"
1505 "mov %c[r8](%[svm]), %%r8 \n\t"
1506 "mov %c[r9](%[svm]), %%r9 \n\t"
1507 "mov %c[r10](%[svm]), %%r10 \n\t"
1508 "mov %c[r11](%[svm]), %%r11 \n\t"
1509 "mov %c[r12](%[svm]), %%r12 \n\t"
1510 "mov %c[r13](%[svm]), %%r13 \n\t"
1511 "mov %c[r14](%[svm]), %%r14 \n\t"
1512 "mov %c[r15](%[svm]), %%r15 \n\t"
1514 "mov %c[rbx](%[svm]), %%ebx \n\t"
1515 "mov %c[rcx](%[svm]), %%ecx \n\t"
1516 "mov %c[rdx](%[svm]), %%edx \n\t"
1517 "mov %c[rsi](%[svm]), %%esi \n\t"
1518 "mov %c[rdi](%[svm]), %%edi \n\t"
1519 "mov %c[rbp](%[svm]), %%ebp \n\t"
1522 #ifdef CONFIG_X86_64
1523 /* Enter guest mode */
1525 "mov %c[vmcb](%[svm]), %%rax \n\t"
1531 /* Enter guest mode */
1533 "mov %c[vmcb](%[svm]), %%eax \n\t"
1540 /* Save guest registers, load host registers */
1541 #ifdef CONFIG_X86_64
1542 "mov %%rbx, %c[rbx](%[svm]) \n\t"
1543 "mov %%rcx, %c[rcx](%[svm]) \n\t"
1544 "mov %%rdx, %c[rdx](%[svm]) \n\t"
1545 "mov %%rsi, %c[rsi](%[svm]) \n\t"
1546 "mov %%rdi, %c[rdi](%[svm]) \n\t"
1547 "mov %%rbp, %c[rbp](%[svm]) \n\t"
1548 "mov %%r8, %c[r8](%[svm]) \n\t"
1549 "mov %%r9, %c[r9](%[svm]) \n\t"
1550 "mov %%r10, %c[r10](%[svm]) \n\t"
1551 "mov %%r11, %c[r11](%[svm]) \n\t"
1552 "mov %%r12, %c[r12](%[svm]) \n\t"
1553 "mov %%r13, %c[r13](%[svm]) \n\t"
1554 "mov %%r14, %c[r14](%[svm]) \n\t"
1555 "mov %%r15, %c[r15](%[svm]) \n\t"
1557 "pop %%r15; pop %%r14; pop %%r13; pop %%r12;"
1558 "pop %%r11; pop %%r10; pop %%r9; pop %%r8;"
1559 "pop %%rbp; pop %%rdi; pop %%rsi;"
1560 "pop %%rdx; pop %%rcx; pop %%rbx; \n\t"
1562 "mov %%ebx, %c[rbx](%[svm]) \n\t"
1563 "mov %%ecx, %c[rcx](%[svm]) \n\t"
1564 "mov %%edx, %c[rdx](%[svm]) \n\t"
1565 "mov %%esi, %c[rsi](%[svm]) \n\t"
1566 "mov %%edi, %c[rdi](%[svm]) \n\t"
1567 "mov %%ebp, %c[rbp](%[svm]) \n\t"
1569 "pop %%ebp; pop %%edi; pop %%esi;"
1570 "pop %%edx; pop %%ecx; pop %%ebx; \n\t"
1574 [vmcb
]"i"(offsetof(struct vcpu_svm
, vmcb_pa
)),
1575 [rbx
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_RBX
])),
1576 [rcx
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_RCX
])),
1577 [rdx
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_RDX
])),
1578 [rsi
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_RSI
])),
1579 [rdi
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_RDI
])),
1580 [rbp
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_RBP
]))
1581 #ifdef CONFIG_X86_64
1582 ,[r8
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_R8
])),
1583 [r9
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_R9
])),
1584 [r10
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_R10
])),
1585 [r11
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_R11
])),
1586 [r12
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_R12
])),
1587 [r13
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_R13
])),
1588 [r14
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_R14
])),
1589 [r15
]"i"(offsetof(struct vcpu_svm
,vcpu
.regs
[VCPU_REGS_R15
]))
1593 if ((svm
->vmcb
->save
.dr7
& 0xff))
1594 load_db_regs(svm
->host_db_regs
);
1596 vcpu
->cr2
= svm
->vmcb
->save
.cr2
;
1598 write_dr6(svm
->host_dr6
);
1599 write_dr7(svm
->host_dr7
);
1600 kvm_write_cr2(svm
->host_cr2
);
1602 load_fs(fs_selector
);
1603 load_gs(gs_selector
);
1604 load_ldt(ldt_selector
);
1605 load_host_msrs(vcpu
);
1609 local_irq_disable();
1616 static void svm_set_cr3(struct kvm_vcpu
*vcpu
, unsigned long root
)
1618 struct vcpu_svm
*svm
= to_svm(vcpu
);
1620 svm
->vmcb
->save
.cr3
= root
;
1621 force_new_asid(vcpu
);
1623 if (vcpu
->fpu_active
) {
1624 svm
->vmcb
->control
.intercept_exceptions
|= (1 << NM_VECTOR
);
1625 svm
->vmcb
->save
.cr0
|= X86_CR0_TS
;
1626 vcpu
->fpu_active
= 0;
1630 static void svm_inject_page_fault(struct kvm_vcpu
*vcpu
,
1634 struct vcpu_svm
*svm
= to_svm(vcpu
);
1635 uint32_t exit_int_info
= svm
->vmcb
->control
.exit_int_info
;
1637 ++vcpu
->stat
.pf_guest
;
1639 if (is_page_fault(exit_int_info
)) {
1641 svm
->vmcb
->control
.event_inj_err
= 0;
1642 svm
->vmcb
->control
.event_inj
= SVM_EVTINJ_VALID
|
1643 SVM_EVTINJ_VALID_ERR
|
1644 SVM_EVTINJ_TYPE_EXEPT
|
1649 svm
->vmcb
->save
.cr2
= addr
;
1650 svm
->vmcb
->control
.event_inj
= SVM_EVTINJ_VALID
|
1651 SVM_EVTINJ_VALID_ERR
|
1652 SVM_EVTINJ_TYPE_EXEPT
|
1654 svm
->vmcb
->control
.event_inj_err
= err_code
;
1658 static int is_disabled(void)
1662 rdmsrl(MSR_VM_CR
, vm_cr
);
1663 if (vm_cr
& (1 << SVM_VM_CR_SVM_DISABLE
))
1670 svm_patch_hypercall(struct kvm_vcpu
*vcpu
, unsigned char *hypercall
)
1673 * Patch in the VMMCALL instruction:
1675 hypercall
[0] = 0x0f;
1676 hypercall
[1] = 0x01;
1677 hypercall
[2] = 0xd9;
1678 hypercall
[3] = 0xc3;
1681 static void svm_check_processor_compat(void *rtn
)
1686 static struct kvm_x86_ops svm_x86_ops
= {
1687 .cpu_has_kvm_support
= has_svm
,
1688 .disabled_by_bios
= is_disabled
,
1689 .hardware_setup
= svm_hardware_setup
,
1690 .hardware_unsetup
= svm_hardware_unsetup
,
1691 .check_processor_compatibility
= svm_check_processor_compat
,
1692 .hardware_enable
= svm_hardware_enable
,
1693 .hardware_disable
= svm_hardware_disable
,
1695 .vcpu_create
= svm_create_vcpu
,
1696 .vcpu_free
= svm_free_vcpu
,
1697 .vcpu_reset
= svm_vcpu_reset
,
1699 .prepare_guest_switch
= svm_prepare_guest_switch
,
1700 .vcpu_load
= svm_vcpu_load
,
1701 .vcpu_put
= svm_vcpu_put
,
1702 .vcpu_decache
= svm_vcpu_decache
,
1704 .set_guest_debug
= svm_guest_debug
,
1705 .get_msr
= svm_get_msr
,
1706 .set_msr
= svm_set_msr
,
1707 .get_segment_base
= svm_get_segment_base
,
1708 .get_segment
= svm_get_segment
,
1709 .set_segment
= svm_set_segment
,
1710 .get_cs_db_l_bits
= kvm_get_cs_db_l_bits
,
1711 .decache_cr4_guest_bits
= svm_decache_cr4_guest_bits
,
1712 .set_cr0
= svm_set_cr0
,
1713 .set_cr3
= svm_set_cr3
,
1714 .set_cr4
= svm_set_cr4
,
1715 .set_efer
= svm_set_efer
,
1716 .get_idt
= svm_get_idt
,
1717 .set_idt
= svm_set_idt
,
1718 .get_gdt
= svm_get_gdt
,
1719 .set_gdt
= svm_set_gdt
,
1720 .get_dr
= svm_get_dr
,
1721 .set_dr
= svm_set_dr
,
1722 .cache_regs
= svm_cache_regs
,
1723 .decache_regs
= svm_decache_regs
,
1724 .get_rflags
= svm_get_rflags
,
1725 .set_rflags
= svm_set_rflags
,
1727 .tlb_flush
= svm_flush_tlb
,
1728 .inject_page_fault
= svm_inject_page_fault
,
1730 .inject_gp
= svm_inject_gp
,
1732 .run
= svm_vcpu_run
,
1733 .handle_exit
= handle_exit
,
1734 .skip_emulated_instruction
= skip_emulated_instruction
,
1735 .patch_hypercall
= svm_patch_hypercall
,
1736 .get_irq
= svm_get_irq
,
1737 .set_irq
= svm_set_irq
,
1738 .inject_pending_irq
= svm_intr_assist
,
1739 .inject_pending_vectors
= do_interrupt_requests
,
1742 static int __init
svm_init(void)
1744 return kvm_init_x86(&svm_x86_ops
, sizeof(struct vcpu_svm
),
1748 static void __exit
svm_exit(void)
1753 module_init(svm_init
)
1754 module_exit(svm_exit
)