8 static void setup_svm(void)
10 void *hsave
= alloc_page();
12 wrmsr(MSR_VM_HSAVE_PA
, virt_to_phys(hsave
));
13 wrmsr(MSR_EFER
, rdmsr(MSR_EFER
) | EFER_SVME
);
16 static void vmcb_set_seg(struct vmcb_seg
*seg
, u16 selector
,
17 u64 base
, u32 limit
, u32 attr
)
19 seg
->selector
= selector
;
25 static void vmcb_ident(struct vmcb
*vmcb
)
27 u64 vmcb_phys
= virt_to_phys(vmcb
);
28 struct vmcb_save_area
*save
= &vmcb
->save
;
29 struct vmcb_control_area
*ctrl
= &vmcb
->control
;
30 u32 data_seg_attr
= 3 | SVM_SELECTOR_S_MASK
| SVM_SELECTOR_P_MASK
31 | SVM_SELECTOR_DB_MASK
| SVM_SELECTOR_G_MASK
;
32 u32 code_seg_attr
= 9 | SVM_SELECTOR_S_MASK
| SVM_SELECTOR_P_MASK
33 | SVM_SELECTOR_L_MASK
| SVM_SELECTOR_G_MASK
;
34 struct descriptor_table_ptr desc_table_ptr
;
36 memset(vmcb
, 0, sizeof(*vmcb
));
37 asm volatile ("vmsave" : : "a"(vmcb_phys
) : "memory");
38 vmcb_set_seg(&save
->es
, read_es(), 0, -1U, data_seg_attr
);
39 vmcb_set_seg(&save
->cs
, read_cs(), 0, -1U, code_seg_attr
);
40 vmcb_set_seg(&save
->ss
, read_ss(), 0, -1U, data_seg_attr
);
41 vmcb_set_seg(&save
->ds
, read_ds(), 0, -1U, data_seg_attr
);
42 sgdt(&desc_table_ptr
);
43 vmcb_set_seg(&save
->gdtr
, 0, desc_table_ptr
.base
, desc_table_ptr
.limit
, 0);
44 sidt(&desc_table_ptr
);
45 vmcb_set_seg(&save
->idtr
, 0, desc_table_ptr
.base
, desc_table_ptr
.limit
, 0);
48 save
->efer
= rdmsr(MSR_EFER
);
49 save
->cr4
= read_cr4();
50 save
->cr3
= read_cr3();
51 save
->cr0
= read_cr0();
52 save
->dr7
= read_dr7();
53 save
->dr6
= read_dr6();
54 save
->cr2
= read_cr2();
55 save
->g_pat
= rdmsr(MSR_IA32_CR_PAT
);
56 save
->dbgctl
= rdmsr(MSR_IA32_DEBUGCTLMSR
);
57 ctrl
->intercept
= (1ULL << INTERCEPT_VMRUN
) | (1ULL << INTERCEPT_VMMCALL
);
62 bool (*supported
)(void);
63 void (*prepare
)(struct test
*test
);
64 void (*guest_func
)(struct test
*test
);
65 bool (*finished
)(struct test
*test
);
66 bool (*succeeded
)(struct test
*test
);
72 static void test_thunk(struct test
*test
)
74 test
->guest_func(test
);
75 asm volatile ("vmmcall" : : : "memory");
78 static bool test_run(struct test
*test
, struct vmcb
*vmcb
)
80 u64 vmcb_phys
= virt_to_phys(vmcb
);
81 u64 guest_stack
[10000];
86 vmcb
->save
.rip
= (ulong
)test_thunk
;
87 vmcb
->save
.rsp
= (ulong
)(guest_stack
+ ARRAY_SIZE(guest_stack
));
99 : : "a"(vmcb_phys
), "D"(test
)
100 : "rbx", "rcx", "rdx", "rsi",
101 "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
104 } while (!test
->finished(test
));
106 success
= test
->succeeded(test
);
108 printf("%s: %s\n", test
->name
, success
? "PASS" : "FAIL");
113 static bool default_supported(void)
118 static void default_prepare(struct test
*test
)
120 vmcb_ident(test
->vmcb
);
124 static bool default_finished(struct test
*test
)
126 return true; /* one vmexit */
129 static void null_test(struct test
*test
)
133 static bool null_check(struct test
*test
)
135 return test
->vmcb
->control
.exit_code
== SVM_EXIT_VMMCALL
;
138 static void prepare_no_vmrun_int(struct test
*test
)
140 test
->vmcb
->control
.intercept
&= ~(1ULL << INTERCEPT_VMRUN
);
143 static bool check_no_vmrun_int(struct test
*test
)
145 return test
->vmcb
->control
.exit_code
== SVM_EXIT_ERR
;
148 static void test_vmrun(struct test
*test
)
150 asm volatile ("vmrun" : : "a"(virt_to_phys(test
->vmcb
)));
153 static bool check_vmrun(struct test
*test
)
155 return test
->vmcb
->control
.exit_code
== SVM_EXIT_VMRUN
;
158 static void prepare_cr3_intercept(struct test
*test
)
160 default_prepare(test
);
161 test
->vmcb
->control
.intercept_cr_read
|= 1 << 3;
164 static void test_cr3_intercept(struct test
*test
)
166 asm volatile ("mov %%cr3, %0" : "=r"(test
->scratch
) : : "memory");
169 static bool check_cr3_intercept(struct test
*test
)
171 return test
->vmcb
->control
.exit_code
== SVM_EXIT_READ_CR3
;
174 static bool check_cr3_nointercept(struct test
*test
)
176 return null_check(test
) && test
->scratch
== read_cr3();
179 static void corrupt_cr3_intercept_bypass(void *_test
)
181 struct test
*test
= _test
;
182 extern volatile u32 mmio_insn
;
184 while (!__sync_bool_compare_and_swap(&test
->scratch
, 1, 2))
189 mmio_insn
= 0x90d8200f; // mov %cr3, %rax; nop
192 static void prepare_cr3_intercept_bypass(struct test
*test
)
194 default_prepare(test
);
195 test
->vmcb
->control
.intercept_cr_read
|= 1 << 3;
196 on_cpu_async(1, corrupt_cr3_intercept_bypass
, test
);
199 static void test_cr3_intercept_bypass(struct test
*test
)
204 while (test
->scratch
!= 2)
207 asm volatile ("mmio_insn: mov %0, (%0); nop"
208 : "+a"(a
) : : "memory");
212 static struct test tests
[] = {
213 { "null", default_supported
, default_prepare
, null_test
,
214 default_finished
, null_check
},
215 { "vmrun", default_supported
, default_prepare
, test_vmrun
,
216 default_finished
, check_vmrun
},
217 { "vmrun intercept check", default_supported
, prepare_no_vmrun_int
,
218 null_test
, default_finished
, check_no_vmrun_int
},
219 { "cr3 read intercept", default_supported
, prepare_cr3_intercept
,
220 test_cr3_intercept
, default_finished
, check_cr3_intercept
},
221 { "cr3 read nointercept", default_supported
, default_prepare
,
222 test_cr3_intercept
, default_finished
, check_cr3_nointercept
},
223 { "cr3 read intercept emulate", default_supported
,
224 prepare_cr3_intercept_bypass
, test_cr3_intercept_bypass
,
225 default_finished
, check_cr3_intercept
},
228 int main(int ac
, char **av
)
230 int i
, nr
, passed
, done
;
236 if (!(cpuid(0x80000001).c
& 4)) {
237 printf("SVM not availble\n");
245 nr
= ARRAY_SIZE(tests
);
247 for (i
= 0; i
< nr
; ++i
) {
248 if (!tests
[i
].supported())
251 passed
+= test_run(&tests
[i
], vmcb
);
254 printf("\nSUMMARY: %d TESTS, %d FAILURES\n", done
, (done
- passed
));
255 return passed
== done
? 0 : 1;