2 * Kernel-based Virtual Machine driver for Linux
6 * Copyright (C) 2006 Qumranet, Inc.
9 * Yaniv Kamay <yaniv@qumranet.com>
10 * Avi Kivity <avi@qumranet.com>
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
16 #include <linux/kvm_host.h>
21 #include "kvm_cache_regs.h"
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/vmalloc.h>
26 #include <linux/highmem.h>
27 #include <linux/sched.h>
31 #define __ex(x) __kvm_handle_fault_on_reboot(x)
33 MODULE_AUTHOR("Qumranet");
34 MODULE_LICENSE("GPL");
36 #define IOPM_ALLOC_ORDER 2
37 #define MSRPM_ALLOC_ORDER 1
39 #define DR7_GD_MASK (1 << 13)
40 #define DR6_BD_MASK (1 << 13)
42 #define SEG_TYPE_LDT 2
43 #define SEG_TYPE_BUSY_TSS16 3
45 #define SVM_FEATURE_NPT (1 << 0)
46 #define SVM_FEATURE_LBRV (1 << 1)
47 #define SVM_FEATURE_SVML (1 << 2)
49 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
51 /* enable NPT for AMD64 and X86 with PAE */
52 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
53 static bool npt_enabled
= true;
55 static bool npt_enabled
= false;
59 module_param(npt
, int, S_IRUGO
);
61 static void kvm_reput_irq(struct vcpu_svm
*svm
);
62 static void svm_flush_tlb(struct kvm_vcpu
*vcpu
);
64 static inline struct vcpu_svm
*to_svm(struct kvm_vcpu
*vcpu
)
66 return container_of(vcpu
, struct vcpu_svm
, vcpu
);
69 static unsigned long iopm_base
;
71 struct kvm_ldttss_desc
{
74 unsigned base1
: 8, type
: 5, dpl
: 2, p
: 1;
75 unsigned limit1
: 4, zero0
: 3, g
: 1, base2
: 8;
78 } __attribute__((packed
));
86 struct kvm_ldttss_desc
*tss_desc
;
88 struct page
*save_area
;
91 static DEFINE_PER_CPU(struct svm_cpu_data
*, svm_data
);
92 static uint32_t svm_features
;
94 struct svm_init_data
{
99 static u32 msrpm_ranges
[] = {0, 0xc0000000, 0xc0010000};
101 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
102 #define MSRS_RANGE_SIZE 2048
103 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
105 #define MAX_INST_SIZE 15
107 static inline u32
svm_has(u32 feat
)
109 return svm_features
& feat
;
112 static inline u8
pop_irq(struct kvm_vcpu
*vcpu
)
114 int word_index
= __ffs(vcpu
->arch
.irq_summary
);
115 int bit_index
= __ffs(vcpu
->arch
.irq_pending
[word_index
]);
116 int irq
= word_index
* BITS_PER_LONG
+ bit_index
;
118 clear_bit(bit_index
, &vcpu
->arch
.irq_pending
[word_index
]);
119 if (!vcpu
->arch
.irq_pending
[word_index
])
120 clear_bit(word_index
, &vcpu
->arch
.irq_summary
);
124 static inline void push_irq(struct kvm_vcpu
*vcpu
, u8 irq
)
126 set_bit(irq
, vcpu
->arch
.irq_pending
);
127 set_bit(irq
/ BITS_PER_LONG
, &vcpu
->arch
.irq_summary
);
130 static inline void clgi(void)
132 asm volatile (__ex(SVM_CLGI
));
135 static inline void stgi(void)
137 asm volatile (__ex(SVM_STGI
));
140 static inline void invlpga(unsigned long addr
, u32 asid
)
142 asm volatile (__ex(SVM_INVLPGA
) :: "a"(addr
), "c"(asid
));
145 static inline unsigned long kvm_read_cr2(void)
149 asm volatile ("mov %%cr2, %0" : "=r" (cr2
));
153 static inline void kvm_write_cr2(unsigned long val
)
155 asm volatile ("mov %0, %%cr2" :: "r" (val
));
158 static inline unsigned long read_dr6(void)
162 asm volatile ("mov %%dr6, %0" : "=r" (dr6
));
166 static inline void write_dr6(unsigned long val
)
168 asm volatile ("mov %0, %%dr6" :: "r" (val
));
171 static inline unsigned long read_dr7(void)
175 asm volatile ("mov %%dr7, %0" : "=r" (dr7
));
179 static inline void write_dr7(unsigned long val
)
181 asm volatile ("mov %0, %%dr7" :: "r" (val
));
184 static inline void force_new_asid(struct kvm_vcpu
*vcpu
)
186 to_svm(vcpu
)->asid_generation
--;
189 static inline void flush_guest_tlb(struct kvm_vcpu
*vcpu
)
191 force_new_asid(vcpu
);
194 static void svm_set_efer(struct kvm_vcpu
*vcpu
, u64 efer
)
196 if (!npt_enabled
&& !(efer
& EFER_LMA
))
199 to_svm(vcpu
)->vmcb
->save
.efer
= efer
| MSR_EFER_SVME_MASK
;
200 vcpu
->arch
.shadow_efer
= efer
;
203 static void svm_queue_exception(struct kvm_vcpu
*vcpu
, unsigned nr
,
204 bool has_error_code
, u32 error_code
)
206 struct vcpu_svm
*svm
= to_svm(vcpu
);
208 svm
->vmcb
->control
.event_inj
= nr
210 | (has_error_code
? SVM_EVTINJ_VALID_ERR
: 0)
211 | SVM_EVTINJ_TYPE_EXEPT
;
212 svm
->vmcb
->control
.event_inj_err
= error_code
;
215 static bool svm_exception_injected(struct kvm_vcpu
*vcpu
)
217 struct vcpu_svm
*svm
= to_svm(vcpu
);
219 return !(svm
->vmcb
->control
.exit_int_info
& SVM_EXITINTINFO_VALID
);
222 static int is_external_interrupt(u32 info
)
224 info
&= SVM_EVTINJ_TYPE_MASK
| SVM_EVTINJ_VALID
;
225 return info
== (SVM_EVTINJ_VALID
| SVM_EVTINJ_TYPE_INTR
);
228 static void skip_emulated_instruction(struct kvm_vcpu
*vcpu
)
230 struct vcpu_svm
*svm
= to_svm(vcpu
);
232 if (!svm
->next_rip
) {
233 printk(KERN_DEBUG
"%s: NOP\n", __func__
);
236 if (svm
->next_rip
- kvm_rip_read(vcpu
) > MAX_INST_SIZE
)
237 printk(KERN_ERR
"%s: ip 0x%lx next 0x%llx\n",
238 __func__
, kvm_rip_read(vcpu
), svm
->next_rip
);
240 kvm_rip_write(vcpu
, svm
->next_rip
);
241 svm
->vmcb
->control
.int_state
&= ~SVM_INTERRUPT_SHADOW_MASK
;
243 vcpu
->arch
.interrupt_window_open
= 1;
246 static int has_svm(void)
248 uint32_t eax
, ebx
, ecx
, edx
;
250 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_AMD
) {
251 printk(KERN_INFO
"has_svm: not amd\n");
255 cpuid(0x80000000, &eax
, &ebx
, &ecx
, &edx
);
256 if (eax
< SVM_CPUID_FUNC
) {
257 printk(KERN_INFO
"has_svm: can't execute cpuid_8000000a\n");
261 cpuid(0x80000001, &eax
, &ebx
, &ecx
, &edx
);
262 if (!(ecx
& (1 << SVM_CPUID_FEATURE_SHIFT
))) {
263 printk(KERN_DEBUG
"has_svm: svm not available\n");
269 static void svm_hardware_disable(void *garbage
)
273 wrmsrl(MSR_VM_HSAVE_PA
, 0);
274 rdmsrl(MSR_EFER
, efer
);
275 wrmsrl(MSR_EFER
, efer
& ~MSR_EFER_SVME_MASK
);
278 static void svm_hardware_enable(void *garbage
)
281 struct svm_cpu_data
*svm_data
;
283 struct desc_ptr gdt_descr
;
284 struct desc_struct
*gdt
;
285 int me
= raw_smp_processor_id();
288 printk(KERN_ERR
"svm_cpu_init: err EOPNOTSUPP on %d\n", me
);
291 svm_data
= per_cpu(svm_data
, me
);
294 printk(KERN_ERR
"svm_cpu_init: svm_data is NULL on %d\n",
299 svm_data
->asid_generation
= 1;
300 svm_data
->max_asid
= cpuid_ebx(SVM_CPUID_FUNC
) - 1;
301 svm_data
->next_asid
= svm_data
->max_asid
+ 1;
303 asm volatile ("sgdt %0" : "=m"(gdt_descr
));
304 gdt
= (struct desc_struct
*)gdt_descr
.address
;
305 svm_data
->tss_desc
= (struct kvm_ldttss_desc
*)(gdt
+ GDT_ENTRY_TSS
);
307 rdmsrl(MSR_EFER
, efer
);
308 wrmsrl(MSR_EFER
, efer
| MSR_EFER_SVME_MASK
);
310 wrmsrl(MSR_VM_HSAVE_PA
,
311 page_to_pfn(svm_data
->save_area
) << PAGE_SHIFT
);
314 static void svm_cpu_uninit(int cpu
)
316 struct svm_cpu_data
*svm_data
317 = per_cpu(svm_data
, raw_smp_processor_id());
322 per_cpu(svm_data
, raw_smp_processor_id()) = NULL
;
323 __free_page(svm_data
->save_area
);
327 static int svm_cpu_init(int cpu
)
329 struct svm_cpu_data
*svm_data
;
332 svm_data
= kzalloc(sizeof(struct svm_cpu_data
), GFP_KERNEL
);
336 svm_data
->save_area
= alloc_page(GFP_KERNEL
);
338 if (!svm_data
->save_area
)
341 per_cpu(svm_data
, cpu
) = svm_data
;
351 static void set_msr_interception(u32
*msrpm
, unsigned msr
,
356 for (i
= 0; i
< NUM_MSR_MAPS
; i
++) {
357 if (msr
>= msrpm_ranges
[i
] &&
358 msr
< msrpm_ranges
[i
] + MSRS_IN_RANGE
) {
359 u32 msr_offset
= (i
* MSRS_IN_RANGE
+ msr
-
360 msrpm_ranges
[i
]) * 2;
362 u32
*base
= msrpm
+ (msr_offset
/ 32);
363 u32 msr_shift
= msr_offset
% 32;
364 u32 mask
= ((write
) ? 0 : 2) | ((read
) ? 0 : 1);
365 *base
= (*base
& ~(0x3 << msr_shift
)) |
373 static void svm_vcpu_init_msrpm(u32
*msrpm
)
375 memset(msrpm
, 0xff, PAGE_SIZE
* (1 << MSRPM_ALLOC_ORDER
));
378 set_msr_interception(msrpm
, MSR_GS_BASE
, 1, 1);
379 set_msr_interception(msrpm
, MSR_FS_BASE
, 1, 1);
380 set_msr_interception(msrpm
, MSR_KERNEL_GS_BASE
, 1, 1);
381 set_msr_interception(msrpm
, MSR_LSTAR
, 1, 1);
382 set_msr_interception(msrpm
, MSR_CSTAR
, 1, 1);
383 set_msr_interception(msrpm
, MSR_SYSCALL_MASK
, 1, 1);
385 set_msr_interception(msrpm
, MSR_K6_STAR
, 1, 1);
386 set_msr_interception(msrpm
, MSR_IA32_SYSENTER_CS
, 1, 1);
387 set_msr_interception(msrpm
, MSR_IA32_SYSENTER_ESP
, 1, 1);
388 set_msr_interception(msrpm
, MSR_IA32_SYSENTER_EIP
, 1, 1);
391 static void svm_enable_lbrv(struct vcpu_svm
*svm
)
393 u32
*msrpm
= svm
->msrpm
;
395 svm
->vmcb
->control
.lbr_ctl
= 1;
396 set_msr_interception(msrpm
, MSR_IA32_LASTBRANCHFROMIP
, 1, 1);
397 set_msr_interception(msrpm
, MSR_IA32_LASTBRANCHTOIP
, 1, 1);
398 set_msr_interception(msrpm
, MSR_IA32_LASTINTFROMIP
, 1, 1);
399 set_msr_interception(msrpm
, MSR_IA32_LASTINTTOIP
, 1, 1);
402 static void svm_disable_lbrv(struct vcpu_svm
*svm
)
404 u32
*msrpm
= svm
->msrpm
;
406 svm
->vmcb
->control
.lbr_ctl
= 0;
407 set_msr_interception(msrpm
, MSR_IA32_LASTBRANCHFROMIP
, 0, 0);
408 set_msr_interception(msrpm
, MSR_IA32_LASTBRANCHTOIP
, 0, 0);
409 set_msr_interception(msrpm
, MSR_IA32_LASTINTFROMIP
, 0, 0);
410 set_msr_interception(msrpm
, MSR_IA32_LASTINTTOIP
, 0, 0);
413 static __init
int svm_hardware_setup(void)
416 struct page
*iopm_pages
;
420 iopm_pages
= alloc_pages(GFP_KERNEL
, IOPM_ALLOC_ORDER
);
425 iopm_va
= page_address(iopm_pages
);
426 memset(iopm_va
, 0xff, PAGE_SIZE
* (1 << IOPM_ALLOC_ORDER
));
427 clear_bit(0x80, iopm_va
); /* allow direct access to PC debug port */
428 iopm_base
= page_to_pfn(iopm_pages
) << PAGE_SHIFT
;
430 if (boot_cpu_has(X86_FEATURE_NX
))
431 kvm_enable_efer_bits(EFER_NX
);
433 for_each_online_cpu(cpu
) {
434 r
= svm_cpu_init(cpu
);
439 svm_features
= cpuid_edx(SVM_CPUID_FUNC
);
441 if (!svm_has(SVM_FEATURE_NPT
))
444 if (npt_enabled
&& !npt
) {
445 printk(KERN_INFO
"kvm: Nested Paging disabled\n");
450 printk(KERN_INFO
"kvm: Nested Paging enabled\n");
458 __free_pages(iopm_pages
, IOPM_ALLOC_ORDER
);
463 static __exit
void svm_hardware_unsetup(void)
467 for_each_online_cpu(cpu
)
470 __free_pages(pfn_to_page(iopm_base
>> PAGE_SHIFT
), IOPM_ALLOC_ORDER
);
474 static void init_seg(struct vmcb_seg
*seg
)
477 seg
->attrib
= SVM_SELECTOR_P_MASK
| SVM_SELECTOR_S_MASK
|
478 SVM_SELECTOR_WRITE_MASK
; /* Read/Write Data Segment */
483 static void init_sys_seg(struct vmcb_seg
*seg
, uint32_t type
)
486 seg
->attrib
= SVM_SELECTOR_P_MASK
| type
;
491 static void init_vmcb(struct vcpu_svm
*svm
)
493 struct vmcb_control_area
*control
= &svm
->vmcb
->control
;
494 struct vmcb_save_area
*save
= &svm
->vmcb
->save
;
496 control
->intercept_cr_read
= INTERCEPT_CR0_MASK
|
500 control
->intercept_cr_write
= INTERCEPT_CR0_MASK
|
505 control
->intercept_dr_read
= INTERCEPT_DR0_MASK
|
510 control
->intercept_dr_write
= INTERCEPT_DR0_MASK
|
517 control
->intercept_exceptions
= (1 << PF_VECTOR
) |
522 control
->intercept
= (1ULL << INTERCEPT_INTR
) |
523 (1ULL << INTERCEPT_NMI
) |
524 (1ULL << INTERCEPT_SMI
) |
525 (1ULL << INTERCEPT_CPUID
) |
526 (1ULL << INTERCEPT_INVD
) |
527 (1ULL << INTERCEPT_HLT
) |
528 (1ULL << INTERCEPT_INVLPG
) |
529 (1ULL << INTERCEPT_INVLPGA
) |
530 (1ULL << INTERCEPT_IOIO_PROT
) |
531 (1ULL << INTERCEPT_MSR_PROT
) |
532 (1ULL << INTERCEPT_TASK_SWITCH
) |
533 (1ULL << INTERCEPT_SHUTDOWN
) |
534 (1ULL << INTERCEPT_VMRUN
) |
535 (1ULL << INTERCEPT_VMMCALL
) |
536 (1ULL << INTERCEPT_VMLOAD
) |
537 (1ULL << INTERCEPT_VMSAVE
) |
538 (1ULL << INTERCEPT_STGI
) |
539 (1ULL << INTERCEPT_CLGI
) |
540 (1ULL << INTERCEPT_SKINIT
) |
541 (1ULL << INTERCEPT_WBINVD
) |
542 (1ULL << INTERCEPT_MONITOR
) |
543 (1ULL << INTERCEPT_MWAIT
);
545 control
->iopm_base_pa
= iopm_base
;
546 control
->msrpm_base_pa
= __pa(svm
->msrpm
);
547 control
->tsc_offset
= 0;
548 control
->int_ctl
= V_INTR_MASKING_MASK
;
556 save
->cs
.selector
= 0xf000;
557 /* Executable/Readable Code Segment */
558 save
->cs
.attrib
= SVM_SELECTOR_READ_MASK
| SVM_SELECTOR_P_MASK
|
559 SVM_SELECTOR_S_MASK
| SVM_SELECTOR_CODE_MASK
;
560 save
->cs
.limit
= 0xffff;
562 * cs.base should really be 0xffff0000, but vmx can't handle that, so
563 * be consistent with it.
565 * Replace when we have real mode working for vmx.
567 save
->cs
.base
= 0xf0000;
569 save
->gdtr
.limit
= 0xffff;
570 save
->idtr
.limit
= 0xffff;
572 init_sys_seg(&save
->ldtr
, SEG_TYPE_LDT
);
573 init_sys_seg(&save
->tr
, SEG_TYPE_BUSY_TSS16
);
575 save
->efer
= MSR_EFER_SVME_MASK
;
576 save
->dr6
= 0xffff0ff0;
579 save
->rip
= 0x0000fff0;
580 svm
->vcpu
.arch
.regs
[VCPU_REGS_RIP
] = save
->rip
;
583 * cr0 val on cpu init should be 0x60000010, we enable cpu
584 * cache by default. the orderly way is to enable cache in bios.
586 save
->cr0
= 0x00000010 | X86_CR0_PG
| X86_CR0_WP
;
587 save
->cr4
= X86_CR4_PAE
;
591 /* Setup VMCB for Nested Paging */
592 control
->nested_ctl
= 1;
593 control
->intercept
&= ~((1ULL << INTERCEPT_TASK_SWITCH
) |
594 (1ULL << INTERCEPT_INVLPG
));
595 control
->intercept_exceptions
&= ~(1 << PF_VECTOR
);
596 control
->intercept_cr_read
&= ~(INTERCEPT_CR0_MASK
|
598 control
->intercept_cr_write
&= ~(INTERCEPT_CR0_MASK
|
600 save
->g_pat
= 0x0007040600070406ULL
;
601 /* enable caching because the QEMU Bios doesn't enable it */
602 save
->cr0
= X86_CR0_ET
;
606 force_new_asid(&svm
->vcpu
);
609 static int svm_vcpu_reset(struct kvm_vcpu
*vcpu
)
611 struct vcpu_svm
*svm
= to_svm(vcpu
);
615 if (vcpu
->vcpu_id
!= 0) {
616 kvm_rip_write(vcpu
, 0);
617 svm
->vmcb
->save
.cs
.base
= svm
->vcpu
.arch
.sipi_vector
<< 12;
618 svm
->vmcb
->save
.cs
.selector
= svm
->vcpu
.arch
.sipi_vector
<< 8;
620 vcpu
->arch
.regs_avail
= ~0;
621 vcpu
->arch
.regs_dirty
= ~0;
626 static struct kvm_vcpu
*svm_create_vcpu(struct kvm
*kvm
, unsigned int id
)
628 struct vcpu_svm
*svm
;
630 struct page
*msrpm_pages
;
633 svm
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
639 err
= kvm_vcpu_init(&svm
->vcpu
, kvm
, id
);
643 page
= alloc_page(GFP_KERNEL
);
650 msrpm_pages
= alloc_pages(GFP_KERNEL
, MSRPM_ALLOC_ORDER
);
653 svm
->msrpm
= page_address(msrpm_pages
);
654 svm_vcpu_init_msrpm(svm
->msrpm
);
656 svm
->vmcb
= page_address(page
);
657 clear_page(svm
->vmcb
);
658 svm
->vmcb_pa
= page_to_pfn(page
) << PAGE_SHIFT
;
659 svm
->asid_generation
= 0;
660 memset(svm
->db_regs
, 0, sizeof(svm
->db_regs
));
664 svm
->vcpu
.fpu_active
= 1;
665 svm
->vcpu
.arch
.apic_base
= 0xfee00000 | MSR_IA32_APICBASE_ENABLE
;
666 if (svm
->vcpu
.vcpu_id
== 0)
667 svm
->vcpu
.arch
.apic_base
|= MSR_IA32_APICBASE_BSP
;
672 kvm_vcpu_uninit(&svm
->vcpu
);
674 kmem_cache_free(kvm_vcpu_cache
, svm
);
679 static void svm_free_vcpu(struct kvm_vcpu
*vcpu
)
681 struct vcpu_svm
*svm
= to_svm(vcpu
);
683 __free_page(pfn_to_page(svm
->vmcb_pa
>> PAGE_SHIFT
));
684 __free_pages(virt_to_page(svm
->msrpm
), MSRPM_ALLOC_ORDER
);
685 kvm_vcpu_uninit(vcpu
);
686 kmem_cache_free(kvm_vcpu_cache
, svm
);
689 static void svm_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
691 struct vcpu_svm
*svm
= to_svm(vcpu
);
694 if (unlikely(cpu
!= vcpu
->cpu
)) {
698 * Make sure that the guest sees a monotonically
702 delta
= vcpu
->arch
.host_tsc
- tsc_this
;
703 svm
->vmcb
->control
.tsc_offset
+= delta
;
705 kvm_migrate_timers(vcpu
);
708 for (i
= 0; i
< NR_HOST_SAVE_USER_MSRS
; i
++)
709 rdmsrl(host_save_user_msrs
[i
], svm
->host_user_msrs
[i
]);
712 static void svm_vcpu_put(struct kvm_vcpu
*vcpu
)
714 struct vcpu_svm
*svm
= to_svm(vcpu
);
717 ++vcpu
->stat
.host_state_reload
;
718 for (i
= 0; i
< NR_HOST_SAVE_USER_MSRS
; i
++)
719 wrmsrl(host_save_user_msrs
[i
], svm
->host_user_msrs
[i
]);
721 rdtscll(vcpu
->arch
.host_tsc
);
724 static unsigned long svm_get_rflags(struct kvm_vcpu
*vcpu
)
726 return to_svm(vcpu
)->vmcb
->save
.rflags
;
729 static void svm_set_rflags(struct kvm_vcpu
*vcpu
, unsigned long rflags
)
731 to_svm(vcpu
)->vmcb
->save
.rflags
= rflags
;
734 static struct vmcb_seg
*svm_seg(struct kvm_vcpu
*vcpu
, int seg
)
736 struct vmcb_save_area
*save
= &to_svm(vcpu
)->vmcb
->save
;
739 case VCPU_SREG_CS
: return &save
->cs
;
740 case VCPU_SREG_DS
: return &save
->ds
;
741 case VCPU_SREG_ES
: return &save
->es
;
742 case VCPU_SREG_FS
: return &save
->fs
;
743 case VCPU_SREG_GS
: return &save
->gs
;
744 case VCPU_SREG_SS
: return &save
->ss
;
745 case VCPU_SREG_TR
: return &save
->tr
;
746 case VCPU_SREG_LDTR
: return &save
->ldtr
;
752 static u64
svm_get_segment_base(struct kvm_vcpu
*vcpu
, int seg
)
754 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
759 static void svm_get_segment(struct kvm_vcpu
*vcpu
,
760 struct kvm_segment
*var
, int seg
)
762 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
765 var
->limit
= s
->limit
;
766 var
->selector
= s
->selector
;
767 var
->type
= s
->attrib
& SVM_SELECTOR_TYPE_MASK
;
768 var
->s
= (s
->attrib
>> SVM_SELECTOR_S_SHIFT
) & 1;
769 var
->dpl
= (s
->attrib
>> SVM_SELECTOR_DPL_SHIFT
) & 3;
770 var
->present
= (s
->attrib
>> SVM_SELECTOR_P_SHIFT
) & 1;
771 var
->avl
= (s
->attrib
>> SVM_SELECTOR_AVL_SHIFT
) & 1;
772 var
->l
= (s
->attrib
>> SVM_SELECTOR_L_SHIFT
) & 1;
773 var
->db
= (s
->attrib
>> SVM_SELECTOR_DB_SHIFT
) & 1;
774 var
->g
= (s
->attrib
>> SVM_SELECTOR_G_SHIFT
) & 1;
775 var
->unusable
= !var
->present
;
778 static int svm_get_cpl(struct kvm_vcpu
*vcpu
)
780 struct vmcb_save_area
*save
= &to_svm(vcpu
)->vmcb
->save
;
785 static void svm_get_idt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
787 struct vcpu_svm
*svm
= to_svm(vcpu
);
789 dt
->limit
= svm
->vmcb
->save
.idtr
.limit
;
790 dt
->base
= svm
->vmcb
->save
.idtr
.base
;
793 static void svm_set_idt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
795 struct vcpu_svm
*svm
= to_svm(vcpu
);
797 svm
->vmcb
->save
.idtr
.limit
= dt
->limit
;
798 svm
->vmcb
->save
.idtr
.base
= dt
->base
;
801 static void svm_get_gdt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
803 struct vcpu_svm
*svm
= to_svm(vcpu
);
805 dt
->limit
= svm
->vmcb
->save
.gdtr
.limit
;
806 dt
->base
= svm
->vmcb
->save
.gdtr
.base
;
809 static void svm_set_gdt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
811 struct vcpu_svm
*svm
= to_svm(vcpu
);
813 svm
->vmcb
->save
.gdtr
.limit
= dt
->limit
;
814 svm
->vmcb
->save
.gdtr
.base
= dt
->base
;
817 static void svm_decache_cr4_guest_bits(struct kvm_vcpu
*vcpu
)
821 static void svm_set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
)
823 struct vcpu_svm
*svm
= to_svm(vcpu
);
826 if (vcpu
->arch
.shadow_efer
& EFER_LME
) {
827 if (!is_paging(vcpu
) && (cr0
& X86_CR0_PG
)) {
828 vcpu
->arch
.shadow_efer
|= EFER_LMA
;
829 svm
->vmcb
->save
.efer
|= EFER_LMA
| EFER_LME
;
832 if (is_paging(vcpu
) && !(cr0
& X86_CR0_PG
)) {
833 vcpu
->arch
.shadow_efer
&= ~EFER_LMA
;
834 svm
->vmcb
->save
.efer
&= ~(EFER_LMA
| EFER_LME
);
841 if ((vcpu
->arch
.cr0
& X86_CR0_TS
) && !(cr0
& X86_CR0_TS
)) {
842 svm
->vmcb
->control
.intercept_exceptions
&= ~(1 << NM_VECTOR
);
843 vcpu
->fpu_active
= 1;
846 vcpu
->arch
.cr0
= cr0
;
847 cr0
|= X86_CR0_PG
| X86_CR0_WP
;
848 if (!vcpu
->fpu_active
) {
849 svm
->vmcb
->control
.intercept_exceptions
|= (1 << NM_VECTOR
);
854 * re-enable caching here because the QEMU bios
855 * does not do it - this results in some delay at
858 cr0
&= ~(X86_CR0_CD
| X86_CR0_NW
);
859 svm
->vmcb
->save
.cr0
= cr0
;
862 static void svm_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
)
864 unsigned long host_cr4_mce
= read_cr4() & X86_CR4_MCE
;
865 unsigned long old_cr4
= to_svm(vcpu
)->vmcb
->save
.cr4
;
867 if (npt_enabled
&& ((old_cr4
^ cr4
) & X86_CR4_PGE
))
868 force_new_asid(vcpu
);
870 vcpu
->arch
.cr4
= cr4
;
874 to_svm(vcpu
)->vmcb
->save
.cr4
= cr4
;
877 static void svm_set_segment(struct kvm_vcpu
*vcpu
,
878 struct kvm_segment
*var
, int seg
)
880 struct vcpu_svm
*svm
= to_svm(vcpu
);
881 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
884 s
->limit
= var
->limit
;
885 s
->selector
= var
->selector
;
889 s
->attrib
= (var
->type
& SVM_SELECTOR_TYPE_MASK
);
890 s
->attrib
|= (var
->s
& 1) << SVM_SELECTOR_S_SHIFT
;
891 s
->attrib
|= (var
->dpl
& 3) << SVM_SELECTOR_DPL_SHIFT
;
892 s
->attrib
|= (var
->present
& 1) << SVM_SELECTOR_P_SHIFT
;
893 s
->attrib
|= (var
->avl
& 1) << SVM_SELECTOR_AVL_SHIFT
;
894 s
->attrib
|= (var
->l
& 1) << SVM_SELECTOR_L_SHIFT
;
895 s
->attrib
|= (var
->db
& 1) << SVM_SELECTOR_DB_SHIFT
;
896 s
->attrib
|= (var
->g
& 1) << SVM_SELECTOR_G_SHIFT
;
898 if (seg
== VCPU_SREG_CS
)
900 = (svm
->vmcb
->save
.cs
.attrib
901 >> SVM_SELECTOR_DPL_SHIFT
) & 3;
905 static int svm_guest_debug(struct kvm_vcpu
*vcpu
, struct kvm_debug_guest
*dbg
)
910 static int svm_get_irq(struct kvm_vcpu
*vcpu
)
912 struct vcpu_svm
*svm
= to_svm(vcpu
);
913 u32 exit_int_info
= svm
->vmcb
->control
.exit_int_info
;
915 if (is_external_interrupt(exit_int_info
))
916 return exit_int_info
& SVM_EVTINJ_VEC_MASK
;
920 static void load_host_msrs(struct kvm_vcpu
*vcpu
)
923 wrmsrl(MSR_GS_BASE
, to_svm(vcpu
)->host_gs_base
);
927 static void save_host_msrs(struct kvm_vcpu
*vcpu
)
930 rdmsrl(MSR_GS_BASE
, to_svm(vcpu
)->host_gs_base
);
934 static void new_asid(struct vcpu_svm
*svm
, struct svm_cpu_data
*svm_data
)
936 if (svm_data
->next_asid
> svm_data
->max_asid
) {
937 ++svm_data
->asid_generation
;
938 svm_data
->next_asid
= 1;
939 svm
->vmcb
->control
.tlb_ctl
= TLB_CONTROL_FLUSH_ALL_ASID
;
942 svm
->vcpu
.cpu
= svm_data
->cpu
;
943 svm
->asid_generation
= svm_data
->asid_generation
;
944 svm
->vmcb
->control
.asid
= svm_data
->next_asid
++;
947 static unsigned long svm_get_dr(struct kvm_vcpu
*vcpu
, int dr
)
949 unsigned long val
= to_svm(vcpu
)->db_regs
[dr
];
950 KVMTRACE_2D(DR_READ
, vcpu
, (u32
)dr
, (u32
)val
, handler
);
954 static void svm_set_dr(struct kvm_vcpu
*vcpu
, int dr
, unsigned long value
,
957 struct vcpu_svm
*svm
= to_svm(vcpu
);
961 if (svm
->vmcb
->save
.dr7
& DR7_GD_MASK
) {
962 svm
->vmcb
->save
.dr7
&= ~DR7_GD_MASK
;
963 svm
->vmcb
->save
.dr6
|= DR6_BD_MASK
;
964 *exception
= DB_VECTOR
;
970 svm
->db_regs
[dr
] = value
;
973 if (vcpu
->arch
.cr4
& X86_CR4_DE
) {
974 *exception
= UD_VECTOR
;
978 if (value
& ~((1ULL << 32) - 1)) {
979 *exception
= GP_VECTOR
;
982 svm
->vmcb
->save
.dr7
= value
;
986 printk(KERN_DEBUG
"%s: unexpected dr %u\n",
988 *exception
= UD_VECTOR
;
993 static int pf_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
995 u32 exit_int_info
= svm
->vmcb
->control
.exit_int_info
;
996 struct kvm
*kvm
= svm
->vcpu
.kvm
;
999 bool event_injection
= false;
1001 if (!irqchip_in_kernel(kvm
) &&
1002 is_external_interrupt(exit_int_info
)) {
1003 event_injection
= true;
1004 push_irq(&svm
->vcpu
, exit_int_info
& SVM_EVTINJ_VEC_MASK
);
1007 fault_address
= svm
->vmcb
->control
.exit_info_2
;
1008 error_code
= svm
->vmcb
->control
.exit_info_1
;
1011 KVMTRACE_3D(PAGE_FAULT
, &svm
->vcpu
, error_code
,
1012 (u32
)fault_address
, (u32
)(fault_address
>> 32),
1015 KVMTRACE_3D(TDP_FAULT
, &svm
->vcpu
, error_code
,
1016 (u32
)fault_address
, (u32
)(fault_address
>> 32),
1019 * FIXME: Tis shouldn't be necessary here, but there is a flush
1020 * missing in the MMU code. Until we find this bug, flush the
1021 * complete TLB here on an NPF
1024 svm_flush_tlb(&svm
->vcpu
);
1026 if (!npt_enabled
&& event_injection
)
1027 kvm_mmu_unprotect_page_virt(&svm
->vcpu
, fault_address
);
1028 return kvm_mmu_page_fault(&svm
->vcpu
, fault_address
, error_code
);
1031 static int ud_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1035 er
= emulate_instruction(&svm
->vcpu
, kvm_run
, 0, 0, EMULTYPE_TRAP_UD
);
1036 if (er
!= EMULATE_DONE
)
1037 kvm_queue_exception(&svm
->vcpu
, UD_VECTOR
);
1041 static int nm_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1043 svm
->vmcb
->control
.intercept_exceptions
&= ~(1 << NM_VECTOR
);
1044 if (!(svm
->vcpu
.arch
.cr0
& X86_CR0_TS
))
1045 svm
->vmcb
->save
.cr0
&= ~X86_CR0_TS
;
1046 svm
->vcpu
.fpu_active
= 1;
1051 static int mc_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1054 * On an #MC intercept the MCE handler is not called automatically in
1055 * the host. So do it by hand here.
1059 /* not sure if we ever come back to this point */
1064 static int shutdown_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1067 * VMCB is undefined after a SHUTDOWN intercept
1068 * so reinitialize it.
1070 clear_page(svm
->vmcb
);
1073 kvm_run
->exit_reason
= KVM_EXIT_SHUTDOWN
;
1077 static int io_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1079 u32 io_info
= svm
->vmcb
->control
.exit_info_1
; /* address size bug? */
1080 int size
, down
, in
, string
, rep
;
1083 ++svm
->vcpu
.stat
.io_exits
;
1085 svm
->next_rip
= svm
->vmcb
->control
.exit_info_2
;
1087 string
= (io_info
& SVM_IOIO_STR_MASK
) != 0;
1090 if (emulate_instruction(&svm
->vcpu
,
1091 kvm_run
, 0, 0, 0) == EMULATE_DO_MMIO
)
1096 in
= (io_info
& SVM_IOIO_TYPE_MASK
) != 0;
1097 port
= io_info
>> 16;
1098 size
= (io_info
& SVM_IOIO_SIZE_MASK
) >> SVM_IOIO_SIZE_SHIFT
;
1099 rep
= (io_info
& SVM_IOIO_REP_MASK
) != 0;
1100 down
= (svm
->vmcb
->save
.rflags
& X86_EFLAGS_DF
) != 0;
1102 return kvm_emulate_pio(&svm
->vcpu
, kvm_run
, in
, size
, port
);
1105 static int nmi_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1107 KVMTRACE_0D(NMI
, &svm
->vcpu
, handler
);
1111 static int intr_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1113 ++svm
->vcpu
.stat
.irq_exits
;
1114 KVMTRACE_0D(INTR
, &svm
->vcpu
, handler
);
1118 static int nop_on_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1123 static int halt_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1125 svm
->next_rip
= kvm_rip_read(&svm
->vcpu
) + 1;
1126 skip_emulated_instruction(&svm
->vcpu
);
1127 return kvm_emulate_halt(&svm
->vcpu
);
1130 static int vmmcall_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1132 svm
->next_rip
= kvm_rip_read(&svm
->vcpu
) + 3;
1133 skip_emulated_instruction(&svm
->vcpu
);
1134 kvm_emulate_hypercall(&svm
->vcpu
);
1138 static int invalid_op_interception(struct vcpu_svm
*svm
,
1139 struct kvm_run
*kvm_run
)
1141 kvm_queue_exception(&svm
->vcpu
, UD_VECTOR
);
1145 static int task_switch_interception(struct vcpu_svm
*svm
,
1146 struct kvm_run
*kvm_run
)
1150 tss_selector
= (u16
)svm
->vmcb
->control
.exit_info_1
;
1151 if (svm
->vmcb
->control
.exit_info_2
&
1152 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET
))
1153 return kvm_task_switch(&svm
->vcpu
, tss_selector
,
1155 if (svm
->vmcb
->control
.exit_info_2
&
1156 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP
))
1157 return kvm_task_switch(&svm
->vcpu
, tss_selector
,
1159 return kvm_task_switch(&svm
->vcpu
, tss_selector
, TASK_SWITCH_CALL
);
1162 static int cpuid_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1164 svm
->next_rip
= kvm_rip_read(&svm
->vcpu
) + 2;
1165 kvm_emulate_cpuid(&svm
->vcpu
);
1169 static int invlpg_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1171 if (emulate_instruction(&svm
->vcpu
, kvm_run
, 0, 0, 0) != EMULATE_DONE
)
1172 pr_unimpl(&svm
->vcpu
, "%s: failed\n", __func__
);
1176 static int emulate_on_interception(struct vcpu_svm
*svm
,
1177 struct kvm_run
*kvm_run
)
1179 if (emulate_instruction(&svm
->vcpu
, NULL
, 0, 0, 0) != EMULATE_DONE
)
1180 pr_unimpl(&svm
->vcpu
, "%s: failed\n", __func__
);
1184 static int cr8_write_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1186 emulate_instruction(&svm
->vcpu
, NULL
, 0, 0, 0);
1187 if (irqchip_in_kernel(svm
->vcpu
.kvm
))
1189 kvm_run
->exit_reason
= KVM_EXIT_SET_TPR
;
1193 static int svm_get_msr(struct kvm_vcpu
*vcpu
, unsigned ecx
, u64
*data
)
1195 struct vcpu_svm
*svm
= to_svm(vcpu
);
1198 case MSR_IA32_TIME_STAMP_COUNTER
: {
1202 *data
= svm
->vmcb
->control
.tsc_offset
+ tsc
;
1206 *data
= svm
->vmcb
->save
.star
;
1208 #ifdef CONFIG_X86_64
1210 *data
= svm
->vmcb
->save
.lstar
;
1213 *data
= svm
->vmcb
->save
.cstar
;
1215 case MSR_KERNEL_GS_BASE
:
1216 *data
= svm
->vmcb
->save
.kernel_gs_base
;
1218 case MSR_SYSCALL_MASK
:
1219 *data
= svm
->vmcb
->save
.sfmask
;
1222 case MSR_IA32_SYSENTER_CS
:
1223 *data
= svm
->vmcb
->save
.sysenter_cs
;
1225 case MSR_IA32_SYSENTER_EIP
:
1226 *data
= svm
->vmcb
->save
.sysenter_eip
;
1228 case MSR_IA32_SYSENTER_ESP
:
1229 *data
= svm
->vmcb
->save
.sysenter_esp
;
1231 /* Nobody will change the following 5 values in the VMCB so
1232 we can safely return them on rdmsr. They will always be 0
1233 until LBRV is implemented. */
1234 case MSR_IA32_DEBUGCTLMSR
:
1235 *data
= svm
->vmcb
->save
.dbgctl
;
1237 case MSR_IA32_LASTBRANCHFROMIP
:
1238 *data
= svm
->vmcb
->save
.br_from
;
1240 case MSR_IA32_LASTBRANCHTOIP
:
1241 *data
= svm
->vmcb
->save
.br_to
;
1243 case MSR_IA32_LASTINTFROMIP
:
1244 *data
= svm
->vmcb
->save
.last_excp_from
;
1246 case MSR_IA32_LASTINTTOIP
:
1247 *data
= svm
->vmcb
->save
.last_excp_to
;
1250 return kvm_get_msr_common(vcpu
, ecx
, data
);
1255 static int rdmsr_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1257 u32 ecx
= svm
->vcpu
.arch
.regs
[VCPU_REGS_RCX
];
1260 if (svm_get_msr(&svm
->vcpu
, ecx
, &data
))
1261 kvm_inject_gp(&svm
->vcpu
, 0);
1263 KVMTRACE_3D(MSR_READ
, &svm
->vcpu
, ecx
, (u32
)data
,
1264 (u32
)(data
>> 32), handler
);
1266 svm
->vcpu
.arch
.regs
[VCPU_REGS_RAX
] = data
& 0xffffffff;
1267 svm
->vcpu
.arch
.regs
[VCPU_REGS_RDX
] = data
>> 32;
1268 svm
->next_rip
= kvm_rip_read(&svm
->vcpu
) + 2;
1269 skip_emulated_instruction(&svm
->vcpu
);
1274 static int svm_set_msr(struct kvm_vcpu
*vcpu
, unsigned ecx
, u64 data
)
1276 struct vcpu_svm
*svm
= to_svm(vcpu
);
1279 case MSR_IA32_TIME_STAMP_COUNTER
: {
1283 svm
->vmcb
->control
.tsc_offset
= data
- tsc
;
1287 svm
->vmcb
->save
.star
= data
;
1289 #ifdef CONFIG_X86_64
1291 svm
->vmcb
->save
.lstar
= data
;
1294 svm
->vmcb
->save
.cstar
= data
;
1296 case MSR_KERNEL_GS_BASE
:
1297 svm
->vmcb
->save
.kernel_gs_base
= data
;
1299 case MSR_SYSCALL_MASK
:
1300 svm
->vmcb
->save
.sfmask
= data
;
1303 case MSR_IA32_SYSENTER_CS
:
1304 svm
->vmcb
->save
.sysenter_cs
= data
;
1306 case MSR_IA32_SYSENTER_EIP
:
1307 svm
->vmcb
->save
.sysenter_eip
= data
;
1309 case MSR_IA32_SYSENTER_ESP
:
1310 svm
->vmcb
->save
.sysenter_esp
= data
;
1312 case MSR_IA32_DEBUGCTLMSR
:
1313 if (!svm_has(SVM_FEATURE_LBRV
)) {
1314 pr_unimpl(vcpu
, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
1318 if (data
& DEBUGCTL_RESERVED_BITS
)
1321 svm
->vmcb
->save
.dbgctl
= data
;
1322 if (data
& (1ULL<<0))
1323 svm_enable_lbrv(svm
);
1325 svm_disable_lbrv(svm
);
1327 case MSR_K7_EVNTSEL0
:
1328 case MSR_K7_EVNTSEL1
:
1329 case MSR_K7_EVNTSEL2
:
1330 case MSR_K7_EVNTSEL3
:
1331 case MSR_K7_PERFCTR0
:
1332 case MSR_K7_PERFCTR1
:
1333 case MSR_K7_PERFCTR2
:
1334 case MSR_K7_PERFCTR3
:
1336 * Just discard all writes to the performance counters; this
1337 * should keep both older linux and windows 64-bit guests
1340 pr_unimpl(vcpu
, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", ecx
, data
);
1344 return kvm_set_msr_common(vcpu
, ecx
, data
);
1349 static int wrmsr_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1351 u32 ecx
= svm
->vcpu
.arch
.regs
[VCPU_REGS_RCX
];
1352 u64 data
= (svm
->vcpu
.arch
.regs
[VCPU_REGS_RAX
] & -1u)
1353 | ((u64
)(svm
->vcpu
.arch
.regs
[VCPU_REGS_RDX
] & -1u) << 32);
1355 KVMTRACE_3D(MSR_WRITE
, &svm
->vcpu
, ecx
, (u32
)data
, (u32
)(data
>> 32),
1358 svm
->next_rip
= kvm_rip_read(&svm
->vcpu
) + 2;
1359 if (svm_set_msr(&svm
->vcpu
, ecx
, data
))
1360 kvm_inject_gp(&svm
->vcpu
, 0);
1362 skip_emulated_instruction(&svm
->vcpu
);
1366 static int msr_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1368 if (svm
->vmcb
->control
.exit_info_1
)
1369 return wrmsr_interception(svm
, kvm_run
);
1371 return rdmsr_interception(svm
, kvm_run
);
1374 static int interrupt_window_interception(struct vcpu_svm
*svm
,
1375 struct kvm_run
*kvm_run
)
1377 KVMTRACE_0D(PEND_INTR
, &svm
->vcpu
, handler
);
1379 svm
->vmcb
->control
.intercept
&= ~(1ULL << INTERCEPT_VINTR
);
1380 svm
->vmcb
->control
.int_ctl
&= ~V_IRQ_MASK
;
1382 * If the user space waits to inject interrupts, exit as soon as
1385 if (kvm_run
->request_interrupt_window
&&
1386 !svm
->vcpu
.arch
.irq_summary
) {
1387 ++svm
->vcpu
.stat
.irq_window_exits
;
1388 kvm_run
->exit_reason
= KVM_EXIT_IRQ_WINDOW_OPEN
;
1395 static int (*svm_exit_handlers
[])(struct vcpu_svm
*svm
,
1396 struct kvm_run
*kvm_run
) = {
1397 [SVM_EXIT_READ_CR0
] = emulate_on_interception
,
1398 [SVM_EXIT_READ_CR3
] = emulate_on_interception
,
1399 [SVM_EXIT_READ_CR4
] = emulate_on_interception
,
1400 [SVM_EXIT_READ_CR8
] = emulate_on_interception
,
1402 [SVM_EXIT_WRITE_CR0
] = emulate_on_interception
,
1403 [SVM_EXIT_WRITE_CR3
] = emulate_on_interception
,
1404 [SVM_EXIT_WRITE_CR4
] = emulate_on_interception
,
1405 [SVM_EXIT_WRITE_CR8
] = cr8_write_interception
,
1406 [SVM_EXIT_READ_DR0
] = emulate_on_interception
,
1407 [SVM_EXIT_READ_DR1
] = emulate_on_interception
,
1408 [SVM_EXIT_READ_DR2
] = emulate_on_interception
,
1409 [SVM_EXIT_READ_DR3
] = emulate_on_interception
,
1410 [SVM_EXIT_WRITE_DR0
] = emulate_on_interception
,
1411 [SVM_EXIT_WRITE_DR1
] = emulate_on_interception
,
1412 [SVM_EXIT_WRITE_DR2
] = emulate_on_interception
,
1413 [SVM_EXIT_WRITE_DR3
] = emulate_on_interception
,
1414 [SVM_EXIT_WRITE_DR5
] = emulate_on_interception
,
1415 [SVM_EXIT_WRITE_DR7
] = emulate_on_interception
,
1416 [SVM_EXIT_EXCP_BASE
+ UD_VECTOR
] = ud_interception
,
1417 [SVM_EXIT_EXCP_BASE
+ PF_VECTOR
] = pf_interception
,
1418 [SVM_EXIT_EXCP_BASE
+ NM_VECTOR
] = nm_interception
,
1419 [SVM_EXIT_EXCP_BASE
+ MC_VECTOR
] = mc_interception
,
1420 [SVM_EXIT_INTR
] = intr_interception
,
1421 [SVM_EXIT_NMI
] = nmi_interception
,
1422 [SVM_EXIT_SMI
] = nop_on_interception
,
1423 [SVM_EXIT_INIT
] = nop_on_interception
,
1424 [SVM_EXIT_VINTR
] = interrupt_window_interception
,
1425 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
1426 [SVM_EXIT_CPUID
] = cpuid_interception
,
1427 [SVM_EXIT_INVD
] = emulate_on_interception
,
1428 [SVM_EXIT_HLT
] = halt_interception
,
1429 [SVM_EXIT_INVLPG
] = invlpg_interception
,
1430 [SVM_EXIT_INVLPGA
] = invalid_op_interception
,
1431 [SVM_EXIT_IOIO
] = io_interception
,
1432 [SVM_EXIT_MSR
] = msr_interception
,
1433 [SVM_EXIT_TASK_SWITCH
] = task_switch_interception
,
1434 [SVM_EXIT_SHUTDOWN
] = shutdown_interception
,
1435 [SVM_EXIT_VMRUN
] = invalid_op_interception
,
1436 [SVM_EXIT_VMMCALL
] = vmmcall_interception
,
1437 [SVM_EXIT_VMLOAD
] = invalid_op_interception
,
1438 [SVM_EXIT_VMSAVE
] = invalid_op_interception
,
1439 [SVM_EXIT_STGI
] = invalid_op_interception
,
1440 [SVM_EXIT_CLGI
] = invalid_op_interception
,
1441 [SVM_EXIT_SKINIT
] = invalid_op_interception
,
1442 [SVM_EXIT_WBINVD
] = emulate_on_interception
,
1443 [SVM_EXIT_MONITOR
] = invalid_op_interception
,
1444 [SVM_EXIT_MWAIT
] = invalid_op_interception
,
1445 [SVM_EXIT_NPF
] = pf_interception
,
1448 static int handle_exit(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
)
1450 struct vcpu_svm
*svm
= to_svm(vcpu
);
1451 u32 exit_code
= svm
->vmcb
->control
.exit_code
;
1453 KVMTRACE_3D(VMEXIT
, vcpu
, exit_code
, (u32
)svm
->vmcb
->save
.rip
,
1454 (u32
)((u64
)svm
->vmcb
->save
.rip
>> 32), entryexit
);
1458 if ((vcpu
->arch
.cr0
^ svm
->vmcb
->save
.cr0
) & X86_CR0_PG
) {
1459 svm_set_cr0(vcpu
, svm
->vmcb
->save
.cr0
);
1462 vcpu
->arch
.cr0
= svm
->vmcb
->save
.cr0
;
1463 vcpu
->arch
.cr3
= svm
->vmcb
->save
.cr3
;
1464 if (is_paging(vcpu
) && is_pae(vcpu
) && !is_long_mode(vcpu
)) {
1465 if (!load_pdptrs(vcpu
, vcpu
->arch
.cr3
)) {
1466 kvm_inject_gp(vcpu
, 0);
1471 kvm_mmu_reset_context(vcpu
);
1478 if (svm
->vmcb
->control
.exit_code
== SVM_EXIT_ERR
) {
1479 kvm_run
->exit_reason
= KVM_EXIT_FAIL_ENTRY
;
1480 kvm_run
->fail_entry
.hardware_entry_failure_reason
1481 = svm
->vmcb
->control
.exit_code
;
1485 if (is_external_interrupt(svm
->vmcb
->control
.exit_int_info
) &&
1486 exit_code
!= SVM_EXIT_EXCP_BASE
+ PF_VECTOR
&&
1487 exit_code
!= SVM_EXIT_NPF
)
1488 printk(KERN_ERR
"%s: unexpected exit_ini_info 0x%x "
1490 __func__
, svm
->vmcb
->control
.exit_int_info
,
1493 if (exit_code
>= ARRAY_SIZE(svm_exit_handlers
)
1494 || !svm_exit_handlers
[exit_code
]) {
1495 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
1496 kvm_run
->hw
.hardware_exit_reason
= exit_code
;
1500 return svm_exit_handlers
[exit_code
](svm
, kvm_run
);
1503 static void reload_tss(struct kvm_vcpu
*vcpu
)
1505 int cpu
= raw_smp_processor_id();
1507 struct svm_cpu_data
*svm_data
= per_cpu(svm_data
, cpu
);
1508 svm_data
->tss_desc
->type
= 9; /* available 32/64-bit TSS */
1512 static void pre_svm_run(struct vcpu_svm
*svm
)
1514 int cpu
= raw_smp_processor_id();
1516 struct svm_cpu_data
*svm_data
= per_cpu(svm_data
, cpu
);
1518 svm
->vmcb
->control
.tlb_ctl
= TLB_CONTROL_DO_NOTHING
;
1519 if (svm
->vcpu
.cpu
!= cpu
||
1520 svm
->asid_generation
!= svm_data
->asid_generation
)
1521 new_asid(svm
, svm_data
);
1525 static inline void svm_inject_irq(struct vcpu_svm
*svm
, int irq
)
1527 struct vmcb_control_area
*control
;
1529 KVMTRACE_1D(INJ_VIRQ
, &svm
->vcpu
, (u32
)irq
, handler
);
1531 ++svm
->vcpu
.stat
.irq_injections
;
1532 control
= &svm
->vmcb
->control
;
1533 control
->int_vector
= irq
;
1534 control
->int_ctl
&= ~V_INTR_PRIO_MASK
;
1535 control
->int_ctl
|= V_IRQ_MASK
|
1536 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT
);
1539 static void svm_set_irq(struct kvm_vcpu
*vcpu
, int irq
)
1541 struct vcpu_svm
*svm
= to_svm(vcpu
);
1543 svm_inject_irq(svm
, irq
);
1546 static void update_cr8_intercept(struct kvm_vcpu
*vcpu
)
1548 struct vcpu_svm
*svm
= to_svm(vcpu
);
1549 struct vmcb
*vmcb
= svm
->vmcb
;
1552 if (!irqchip_in_kernel(vcpu
->kvm
) || vcpu
->arch
.apic
->vapic_addr
)
1555 vmcb
->control
.intercept_cr_write
&= ~INTERCEPT_CR8_MASK
;
1557 max_irr
= kvm_lapic_find_highest_irr(vcpu
);
1561 tpr
= kvm_lapic_get_cr8(vcpu
) << 4;
1563 if (tpr
>= (max_irr
& 0xf0))
1564 vmcb
->control
.intercept_cr_write
|= INTERCEPT_CR8_MASK
;
1567 static void svm_intr_assist(struct kvm_vcpu
*vcpu
)
1569 struct vcpu_svm
*svm
= to_svm(vcpu
);
1570 struct vmcb
*vmcb
= svm
->vmcb
;
1571 int intr_vector
= -1;
1573 if ((vmcb
->control
.exit_int_info
& SVM_EVTINJ_VALID
) &&
1574 ((vmcb
->control
.exit_int_info
& SVM_EVTINJ_TYPE_MASK
) == 0)) {
1575 intr_vector
= vmcb
->control
.exit_int_info
&
1576 SVM_EVTINJ_VEC_MASK
;
1577 vmcb
->control
.exit_int_info
= 0;
1578 svm_inject_irq(svm
, intr_vector
);
1582 if (vmcb
->control
.int_ctl
& V_IRQ_MASK
)
1585 if (!kvm_cpu_has_interrupt(vcpu
))
1588 if (!(vmcb
->save
.rflags
& X86_EFLAGS_IF
) ||
1589 (vmcb
->control
.int_state
& SVM_INTERRUPT_SHADOW_MASK
) ||
1590 (vmcb
->control
.event_inj
& SVM_EVTINJ_VALID
)) {
1591 /* unable to deliver irq, set pending irq */
1592 vmcb
->control
.intercept
|= (1ULL << INTERCEPT_VINTR
);
1593 svm_inject_irq(svm
, 0x0);
1596 /* Okay, we can deliver the interrupt: grab it and update PIC state. */
1597 intr_vector
= kvm_cpu_get_interrupt(vcpu
);
1598 svm_inject_irq(svm
, intr_vector
);
1599 kvm_timer_intr_post(vcpu
, intr_vector
);
1601 update_cr8_intercept(vcpu
);
1604 static void kvm_reput_irq(struct vcpu_svm
*svm
)
1606 struct vmcb_control_area
*control
= &svm
->vmcb
->control
;
1608 if ((control
->int_ctl
& V_IRQ_MASK
)
1609 && !irqchip_in_kernel(svm
->vcpu
.kvm
)) {
1610 control
->int_ctl
&= ~V_IRQ_MASK
;
1611 push_irq(&svm
->vcpu
, control
->int_vector
);
1614 svm
->vcpu
.arch
.interrupt_window_open
=
1615 !(control
->int_state
& SVM_INTERRUPT_SHADOW_MASK
);
1618 static void svm_do_inject_vector(struct vcpu_svm
*svm
)
1620 struct kvm_vcpu
*vcpu
= &svm
->vcpu
;
1621 int word_index
= __ffs(vcpu
->arch
.irq_summary
);
1622 int bit_index
= __ffs(vcpu
->arch
.irq_pending
[word_index
]);
1623 int irq
= word_index
* BITS_PER_LONG
+ bit_index
;
1625 clear_bit(bit_index
, &vcpu
->arch
.irq_pending
[word_index
]);
1626 if (!vcpu
->arch
.irq_pending
[word_index
])
1627 clear_bit(word_index
, &vcpu
->arch
.irq_summary
);
1628 svm_inject_irq(svm
, irq
);
1631 static void do_interrupt_requests(struct kvm_vcpu
*vcpu
,
1632 struct kvm_run
*kvm_run
)
1634 struct vcpu_svm
*svm
= to_svm(vcpu
);
1635 struct vmcb_control_area
*control
= &svm
->vmcb
->control
;
1637 svm
->vcpu
.arch
.interrupt_window_open
=
1638 (!(control
->int_state
& SVM_INTERRUPT_SHADOW_MASK
) &&
1639 (svm
->vmcb
->save
.rflags
& X86_EFLAGS_IF
));
1641 if (svm
->vcpu
.arch
.interrupt_window_open
&& svm
->vcpu
.arch
.irq_summary
)
1643 * If interrupts enabled, and not blocked by sti or mov ss. Good.
1645 svm_do_inject_vector(svm
);
1648 * Interrupts blocked. Wait for unblock.
1650 if (!svm
->vcpu
.arch
.interrupt_window_open
&&
1651 (svm
->vcpu
.arch
.irq_summary
|| kvm_run
->request_interrupt_window
))
1652 control
->intercept
|= 1ULL << INTERCEPT_VINTR
;
1654 control
->intercept
&= ~(1ULL << INTERCEPT_VINTR
);
1657 static int svm_set_tss_addr(struct kvm
*kvm
, unsigned int addr
)
1662 static void save_db_regs(unsigned long *db_regs
)
1664 asm volatile ("mov %%dr0, %0" : "=r"(db_regs
[0]));
1665 asm volatile ("mov %%dr1, %0" : "=r"(db_regs
[1]));
1666 asm volatile ("mov %%dr2, %0" : "=r"(db_regs
[2]));
1667 asm volatile ("mov %%dr3, %0" : "=r"(db_regs
[3]));
1670 static void load_db_regs(unsigned long *db_regs
)
1672 asm volatile ("mov %0, %%dr0" : : "r"(db_regs
[0]));
1673 asm volatile ("mov %0, %%dr1" : : "r"(db_regs
[1]));
1674 asm volatile ("mov %0, %%dr2" : : "r"(db_regs
[2]));
1675 asm volatile ("mov %0, %%dr3" : : "r"(db_regs
[3]));
1678 static void svm_flush_tlb(struct kvm_vcpu
*vcpu
)
1680 force_new_asid(vcpu
);
1683 static void svm_prepare_guest_switch(struct kvm_vcpu
*vcpu
)
1687 static inline void sync_cr8_to_lapic(struct kvm_vcpu
*vcpu
)
1689 struct vcpu_svm
*svm
= to_svm(vcpu
);
1691 if (!(svm
->vmcb
->control
.intercept_cr_write
& INTERCEPT_CR8_MASK
)) {
1692 int cr8
= svm
->vmcb
->control
.int_ctl
& V_TPR_MASK
;
1693 kvm_lapic_set_tpr(vcpu
, cr8
);
1697 static inline void sync_lapic_to_cr8(struct kvm_vcpu
*vcpu
)
1699 struct vcpu_svm
*svm
= to_svm(vcpu
);
1702 if (!irqchip_in_kernel(vcpu
->kvm
))
1705 cr8
= kvm_get_cr8(vcpu
);
1706 svm
->vmcb
->control
.int_ctl
&= ~V_TPR_MASK
;
1707 svm
->vmcb
->control
.int_ctl
|= cr8
& V_TPR_MASK
;
1710 #ifdef CONFIG_X86_64
1716 static void svm_vcpu_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1718 struct vcpu_svm
*svm
= to_svm(vcpu
);
1723 svm
->vmcb
->save
.rax
= vcpu
->arch
.regs
[VCPU_REGS_RAX
];
1724 svm
->vmcb
->save
.rsp
= vcpu
->arch
.regs
[VCPU_REGS_RSP
];
1725 svm
->vmcb
->save
.rip
= vcpu
->arch
.regs
[VCPU_REGS_RIP
];
1729 sync_lapic_to_cr8(vcpu
);
1731 save_host_msrs(vcpu
);
1732 fs_selector
= kvm_read_fs();
1733 gs_selector
= kvm_read_gs();
1734 ldt_selector
= kvm_read_ldt();
1735 svm
->host_cr2
= kvm_read_cr2();
1736 svm
->host_dr6
= read_dr6();
1737 svm
->host_dr7
= read_dr7();
1738 svm
->vmcb
->save
.cr2
= vcpu
->arch
.cr2
;
1739 /* required for live migration with NPT */
1741 svm
->vmcb
->save
.cr3
= vcpu
->arch
.cr3
;
1743 if (svm
->vmcb
->save
.dr7
& 0xff) {
1745 save_db_regs(svm
->host_db_regs
);
1746 load_db_regs(svm
->db_regs
);
1754 "push %%"R
"bp; \n\t"
1755 "mov %c[rbx](%[svm]), %%"R
"bx \n\t"
1756 "mov %c[rcx](%[svm]), %%"R
"cx \n\t"
1757 "mov %c[rdx](%[svm]), %%"R
"dx \n\t"
1758 "mov %c[rsi](%[svm]), %%"R
"si \n\t"
1759 "mov %c[rdi](%[svm]), %%"R
"di \n\t"
1760 "mov %c[rbp](%[svm]), %%"R
"bp \n\t"
1761 #ifdef CONFIG_X86_64
1762 "mov %c[r8](%[svm]), %%r8 \n\t"
1763 "mov %c[r9](%[svm]), %%r9 \n\t"
1764 "mov %c[r10](%[svm]), %%r10 \n\t"
1765 "mov %c[r11](%[svm]), %%r11 \n\t"
1766 "mov %c[r12](%[svm]), %%r12 \n\t"
1767 "mov %c[r13](%[svm]), %%r13 \n\t"
1768 "mov %c[r14](%[svm]), %%r14 \n\t"
1769 "mov %c[r15](%[svm]), %%r15 \n\t"
1772 /* Enter guest mode */
1774 "mov %c[vmcb](%[svm]), %%"R
"ax \n\t"
1775 __ex(SVM_VMLOAD
) "\n\t"
1776 __ex(SVM_VMRUN
) "\n\t"
1777 __ex(SVM_VMSAVE
) "\n\t"
1780 /* Save guest registers, load host registers */
1781 "mov %%"R
"bx, %c[rbx](%[svm]) \n\t"
1782 "mov %%"R
"cx, %c[rcx](%[svm]) \n\t"
1783 "mov %%"R
"dx, %c[rdx](%[svm]) \n\t"
1784 "mov %%"R
"si, %c[rsi](%[svm]) \n\t"
1785 "mov %%"R
"di, %c[rdi](%[svm]) \n\t"
1786 "mov %%"R
"bp, %c[rbp](%[svm]) \n\t"
1787 #ifdef CONFIG_X86_64
1788 "mov %%r8, %c[r8](%[svm]) \n\t"
1789 "mov %%r9, %c[r9](%[svm]) \n\t"
1790 "mov %%r10, %c[r10](%[svm]) \n\t"
1791 "mov %%r11, %c[r11](%[svm]) \n\t"
1792 "mov %%r12, %c[r12](%[svm]) \n\t"
1793 "mov %%r13, %c[r13](%[svm]) \n\t"
1794 "mov %%r14, %c[r14](%[svm]) \n\t"
1795 "mov %%r15, %c[r15](%[svm]) \n\t"
1800 [vmcb
]"i"(offsetof(struct vcpu_svm
, vmcb_pa
)),
1801 [rbx
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_RBX
])),
1802 [rcx
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_RCX
])),
1803 [rdx
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_RDX
])),
1804 [rsi
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_RSI
])),
1805 [rdi
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_RDI
])),
1806 [rbp
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_RBP
]))
1807 #ifdef CONFIG_X86_64
1808 , [r8
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_R8
])),
1809 [r9
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_R9
])),
1810 [r10
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_R10
])),
1811 [r11
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_R11
])),
1812 [r12
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_R12
])),
1813 [r13
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_R13
])),
1814 [r14
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_R14
])),
1815 [r15
]"i"(offsetof(struct vcpu_svm
, vcpu
.arch
.regs
[VCPU_REGS_R15
]))
1818 , R
"bx", R
"cx", R
"dx", R
"si", R
"di"
1819 #ifdef CONFIG_X86_64
1820 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
1824 if ((svm
->vmcb
->save
.dr7
& 0xff))
1825 load_db_regs(svm
->host_db_regs
);
1827 vcpu
->arch
.cr2
= svm
->vmcb
->save
.cr2
;
1828 vcpu
->arch
.regs
[VCPU_REGS_RAX
] = svm
->vmcb
->save
.rax
;
1829 vcpu
->arch
.regs
[VCPU_REGS_RSP
] = svm
->vmcb
->save
.rsp
;
1830 vcpu
->arch
.regs
[VCPU_REGS_RIP
] = svm
->vmcb
->save
.rip
;
1832 write_dr6(svm
->host_dr6
);
1833 write_dr7(svm
->host_dr7
);
1834 kvm_write_cr2(svm
->host_cr2
);
1836 kvm_load_fs(fs_selector
);
1837 kvm_load_gs(gs_selector
);
1838 kvm_load_ldt(ldt_selector
);
1839 load_host_msrs(vcpu
);
1843 local_irq_disable();
1847 sync_cr8_to_lapic(vcpu
);
1854 static void svm_set_cr3(struct kvm_vcpu
*vcpu
, unsigned long root
)
1856 struct vcpu_svm
*svm
= to_svm(vcpu
);
1859 svm
->vmcb
->control
.nested_cr3
= root
;
1860 force_new_asid(vcpu
);
1864 svm
->vmcb
->save
.cr3
= root
;
1865 force_new_asid(vcpu
);
1867 if (vcpu
->fpu_active
) {
1868 svm
->vmcb
->control
.intercept_exceptions
|= (1 << NM_VECTOR
);
1869 svm
->vmcb
->save
.cr0
|= X86_CR0_TS
;
1870 vcpu
->fpu_active
= 0;
1874 static int is_disabled(void)
1878 rdmsrl(MSR_VM_CR
, vm_cr
);
1879 if (vm_cr
& (1 << SVM_VM_CR_SVM_DISABLE
))
1886 svm_patch_hypercall(struct kvm_vcpu
*vcpu
, unsigned char *hypercall
)
1889 * Patch in the VMMCALL instruction:
1891 hypercall
[0] = 0x0f;
1892 hypercall
[1] = 0x01;
1893 hypercall
[2] = 0xd9;
1896 static void svm_check_processor_compat(void *rtn
)
1901 static bool svm_cpu_has_accelerated_tpr(void)
1906 static int get_npt_level(void)
1908 #ifdef CONFIG_X86_64
1909 return PT64_ROOT_LEVEL
;
1911 return PT32E_ROOT_LEVEL
;
1915 static int svm_get_mt_mask_shift(void)
1920 static struct kvm_x86_ops svm_x86_ops
= {
1921 .cpu_has_kvm_support
= has_svm
,
1922 .disabled_by_bios
= is_disabled
,
1923 .hardware_setup
= svm_hardware_setup
,
1924 .hardware_unsetup
= svm_hardware_unsetup
,
1925 .check_processor_compatibility
= svm_check_processor_compat
,
1926 .hardware_enable
= svm_hardware_enable
,
1927 .hardware_disable
= svm_hardware_disable
,
1928 .cpu_has_accelerated_tpr
= svm_cpu_has_accelerated_tpr
,
1930 .vcpu_create
= svm_create_vcpu
,
1931 .vcpu_free
= svm_free_vcpu
,
1932 .vcpu_reset
= svm_vcpu_reset
,
1934 .prepare_guest_switch
= svm_prepare_guest_switch
,
1935 .vcpu_load
= svm_vcpu_load
,
1936 .vcpu_put
= svm_vcpu_put
,
1938 .set_guest_debug
= svm_guest_debug
,
1939 .get_msr
= svm_get_msr
,
1940 .set_msr
= svm_set_msr
,
1941 .get_segment_base
= svm_get_segment_base
,
1942 .get_segment
= svm_get_segment
,
1943 .set_segment
= svm_set_segment
,
1944 .get_cpl
= svm_get_cpl
,
1945 .get_cs_db_l_bits
= kvm_get_cs_db_l_bits
,
1946 .decache_cr4_guest_bits
= svm_decache_cr4_guest_bits
,
1947 .set_cr0
= svm_set_cr0
,
1948 .set_cr3
= svm_set_cr3
,
1949 .set_cr4
= svm_set_cr4
,
1950 .set_efer
= svm_set_efer
,
1951 .get_idt
= svm_get_idt
,
1952 .set_idt
= svm_set_idt
,
1953 .get_gdt
= svm_get_gdt
,
1954 .set_gdt
= svm_set_gdt
,
1955 .get_dr
= svm_get_dr
,
1956 .set_dr
= svm_set_dr
,
1957 .get_rflags
= svm_get_rflags
,
1958 .set_rflags
= svm_set_rflags
,
1960 .tlb_flush
= svm_flush_tlb
,
1962 .run
= svm_vcpu_run
,
1963 .handle_exit
= handle_exit
,
1964 .skip_emulated_instruction
= skip_emulated_instruction
,
1965 .patch_hypercall
= svm_patch_hypercall
,
1966 .get_irq
= svm_get_irq
,
1967 .set_irq
= svm_set_irq
,
1968 .queue_exception
= svm_queue_exception
,
1969 .exception_injected
= svm_exception_injected
,
1970 .inject_pending_irq
= svm_intr_assist
,
1971 .inject_pending_vectors
= do_interrupt_requests
,
1973 .set_tss_addr
= svm_set_tss_addr
,
1974 .get_tdp_level
= get_npt_level
,
1975 .get_mt_mask_shift
= svm_get_mt_mask_shift
,
1978 static int __init
svm_init(void)
1980 return kvm_init(&svm_x86_ops
, sizeof(struct vcpu_svm
),
1984 static void __exit
svm_exit(void)
1989 module_init(svm_init
)
1990 module_exit(svm_exit
)