7 static void setup_svm(void)
9 void *hsave
= alloc_page();
11 wrmsr(MSR_VM_HSAVE_PA
, virt_to_phys(hsave
));
12 wrmsr(MSR_EFER
, rdmsr(MSR_EFER
) | EFER_SVME
);
15 static void vmcb_set_seg(struct vmcb_seg
*seg
, u16 selector
,
16 u64 base
, u32 limit
, u32 attr
)
18 seg
->selector
= selector
;
24 static void vmcb_ident(struct vmcb
*vmcb
)
26 u64 vmcb_phys
= virt_to_phys(vmcb
);
27 struct vmcb_save_area
*save
= &vmcb
->save
;
28 struct vmcb_control_area
*ctrl
= &vmcb
->control
;
29 u32 data_seg_attr
= 3 | SVM_SELECTOR_S_MASK
| SVM_SELECTOR_P_MASK
30 | SVM_SELECTOR_DB_MASK
| SVM_SELECTOR_G_MASK
;
31 u32 code_seg_attr
= 9 | SVM_SELECTOR_S_MASK
| SVM_SELECTOR_P_MASK
32 | SVM_SELECTOR_L_MASK
| SVM_SELECTOR_G_MASK
;
33 struct descriptor_table_ptr desc_table_ptr
;
35 memset(vmcb
, 0, sizeof(*vmcb
));
36 asm volatile ("vmsave" : : "a"(vmcb_phys
) : "memory");
37 vmcb_set_seg(&save
->es
, read_es(), 0, -1U, data_seg_attr
);
38 vmcb_set_seg(&save
->cs
, read_cs(), 0, -1U, code_seg_attr
);
39 vmcb_set_seg(&save
->ss
, read_ss(), 0, -1U, data_seg_attr
);
40 vmcb_set_seg(&save
->ds
, read_ds(), 0, -1U, data_seg_attr
);
41 sgdt(&desc_table_ptr
);
42 vmcb_set_seg(&save
->gdtr
, 0, desc_table_ptr
.base
, desc_table_ptr
.limit
, 0);
43 sidt(&desc_table_ptr
);
44 vmcb_set_seg(&save
->idtr
, 0, desc_table_ptr
.base
, desc_table_ptr
.limit
, 0);
46 save
->efer
= rdmsr(MSR_EFER
);
47 save
->cr4
= read_cr4();
48 save
->cr3
= read_cr3();
49 save
->cr0
= read_cr0();
50 save
->dr7
= read_dr7();
51 save
->dr6
= read_dr6();
52 save
->cr2
= read_cr2();
53 save
->g_pat
= rdmsr(MSR_IA32_CR_PAT
);
54 save
->dbgctl
= rdmsr(MSR_IA32_DEBUGCTLMSR
);
55 ctrl
->intercept
= (1ULL << INTERCEPT_VMRUN
) | (1ULL << INTERCEPT_VMMCALL
);
60 void (*prepare
)(struct test
*test
);
61 void (*guest_func
)(struct test
*test
);
62 bool (*finished
)(struct test
*test
);
63 bool (*succeeded
)(struct test
*test
);
69 static void test_thunk(struct test
*test
)
71 test
->guest_func(test
);
72 asm volatile ("vmmcall" : : : "memory");
75 static bool test_run(struct test
*test
, struct vmcb
*vmcb
)
77 u64 vmcb_phys
= virt_to_phys(vmcb
);
78 u64 guest_stack
[10000];
83 vmcb
->save
.rip
= (ulong
)test_thunk
;
84 vmcb
->save
.rsp
= (ulong
)(guest_stack
+ ARRAY_SIZE(guest_stack
));
96 : : "a"(vmcb_phys
), "D"(test
)
97 : "rbx", "rcx", "rdx", "rsi",
98 "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
101 } while (!test
->finished(test
));
103 success
= test
->succeeded(test
);
105 printf("%s: %s\n", test
->name
, success
? "PASS" : "FAIL");
110 static void default_prepare(struct test
*test
)
112 vmcb_ident(test
->vmcb
);
115 static bool default_finished(struct test
*test
)
117 return true; /* one vmexit */
120 static void null_test(struct test
*test
)
124 static bool null_check(struct test
*test
)
126 return test
->vmcb
->control
.exit_code
== SVM_EXIT_VMMCALL
;
129 static void prepare_no_vmrun_int(struct test
*test
)
131 test
->vmcb
->control
.intercept
&= ~(1ULL << INTERCEPT_VMRUN
);
134 static bool check_no_vmrun_int(struct test
*test
)
136 return test
->vmcb
->control
.exit_code
== SVM_EXIT_ERR
;
139 static void test_vmrun(struct test
*test
)
141 asm volatile ("vmrun" : : "a"(virt_to_phys(test
->vmcb
)));
144 static bool check_vmrun(struct test
*test
)
146 return test
->vmcb
->control
.exit_code
== SVM_EXIT_VMRUN
;
149 static struct test tests
[] = {
150 { "null", default_prepare
, null_test
, default_finished
, null_check
},
151 { "vmrun", default_prepare
, test_vmrun
, default_finished
, check_vmrun
},
152 { "vmrun intercept check", prepare_no_vmrun_int
, null_test
,
153 default_finished
, check_no_vmrun_int
},
157 int main(int ac
, char **av
)
164 if (!(cpuid(0x80000001).c
& 4)) {
165 printf("SVM not availble\n");
173 nr
= ARRAY_SIZE(tests
);
175 for (i
= 0; i
< nr
; ++i
) {
176 passed
+= test_run(&tests
[i
], vmcb
);
179 printf("\nSUMMARY: %d TESTS, %d FAILURES\n", nr
, (nr
- passed
));
180 return passed
== nr
? 0 : 1;