Include hw/boards.h a bit less
[qemu/ar7.git] / target / i386 / whpx-all.c
blobb57d1c6683ef1e4c84fe8fc1a1008aab224c9755
1 /*
2 * QEMU Windows Hypervisor Platform accelerator (WHPX)
4 * Copyright Microsoft Corp. 2017
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
9 */
11 #include "qemu/osdep.h"
12 #include "cpu.h"
13 #include "exec/address-spaces.h"
14 #include "exec/ioport.h"
15 #include "qemu-common.h"
16 #include "sysemu/accel.h"
17 #include "sysemu/whpx.h"
18 #include "sysemu/sysemu.h"
19 #include "sysemu/cpus.h"
20 #include "qemu/main-loop.h"
21 #include "qemu/error-report.h"
22 #include "qapi/error.h"
23 #include "migration/blocker.h"
24 #include "whp-dispatch.h"
26 #include <WinHvPlatform.h>
27 #include <WinHvEmulation.h>
29 struct whpx_state {
30 uint64_t mem_quota;
31 WHV_PARTITION_HANDLE partition;
34 static const WHV_REGISTER_NAME whpx_register_names[] = {
36 /* X64 General purpose registers */
37 WHvX64RegisterRax,
38 WHvX64RegisterRcx,
39 WHvX64RegisterRdx,
40 WHvX64RegisterRbx,
41 WHvX64RegisterRsp,
42 WHvX64RegisterRbp,
43 WHvX64RegisterRsi,
44 WHvX64RegisterRdi,
45 WHvX64RegisterR8,
46 WHvX64RegisterR9,
47 WHvX64RegisterR10,
48 WHvX64RegisterR11,
49 WHvX64RegisterR12,
50 WHvX64RegisterR13,
51 WHvX64RegisterR14,
52 WHvX64RegisterR15,
53 WHvX64RegisterRip,
54 WHvX64RegisterRflags,
56 /* X64 Segment registers */
57 WHvX64RegisterEs,
58 WHvX64RegisterCs,
59 WHvX64RegisterSs,
60 WHvX64RegisterDs,
61 WHvX64RegisterFs,
62 WHvX64RegisterGs,
63 WHvX64RegisterLdtr,
64 WHvX64RegisterTr,
66 /* X64 Table registers */
67 WHvX64RegisterIdtr,
68 WHvX64RegisterGdtr,
70 /* X64 Control Registers */
71 WHvX64RegisterCr0,
72 WHvX64RegisterCr2,
73 WHvX64RegisterCr3,
74 WHvX64RegisterCr4,
75 WHvX64RegisterCr8,
77 /* X64 Debug Registers */
79 * WHvX64RegisterDr0,
80 * WHvX64RegisterDr1,
81 * WHvX64RegisterDr2,
82 * WHvX64RegisterDr3,
83 * WHvX64RegisterDr6,
84 * WHvX64RegisterDr7,
87 /* X64 Floating Point and Vector Registers */
88 WHvX64RegisterXmm0,
89 WHvX64RegisterXmm1,
90 WHvX64RegisterXmm2,
91 WHvX64RegisterXmm3,
92 WHvX64RegisterXmm4,
93 WHvX64RegisterXmm5,
94 WHvX64RegisterXmm6,
95 WHvX64RegisterXmm7,
96 WHvX64RegisterXmm8,
97 WHvX64RegisterXmm9,
98 WHvX64RegisterXmm10,
99 WHvX64RegisterXmm11,
100 WHvX64RegisterXmm12,
101 WHvX64RegisterXmm13,
102 WHvX64RegisterXmm14,
103 WHvX64RegisterXmm15,
104 WHvX64RegisterFpMmx0,
105 WHvX64RegisterFpMmx1,
106 WHvX64RegisterFpMmx2,
107 WHvX64RegisterFpMmx3,
108 WHvX64RegisterFpMmx4,
109 WHvX64RegisterFpMmx5,
110 WHvX64RegisterFpMmx6,
111 WHvX64RegisterFpMmx7,
112 WHvX64RegisterFpControlStatus,
113 WHvX64RegisterXmmControlStatus,
115 /* X64 MSRs */
116 WHvX64RegisterTsc,
117 WHvX64RegisterEfer,
118 #ifdef TARGET_X86_64
119 WHvX64RegisterKernelGsBase,
120 #endif
121 WHvX64RegisterApicBase,
122 /* WHvX64RegisterPat, */
123 WHvX64RegisterSysenterCs,
124 WHvX64RegisterSysenterEip,
125 WHvX64RegisterSysenterEsp,
126 WHvX64RegisterStar,
127 #ifdef TARGET_X86_64
128 WHvX64RegisterLstar,
129 WHvX64RegisterCstar,
130 WHvX64RegisterSfmask,
131 #endif
133 /* Interrupt / Event Registers */
135 * WHvRegisterPendingInterruption,
136 * WHvRegisterInterruptState,
137 * WHvRegisterPendingEvent0,
138 * WHvRegisterPendingEvent1
139 * WHvX64RegisterDeliverabilityNotifications,
143 struct whpx_register_set {
144 WHV_REGISTER_VALUE values[RTL_NUMBER_OF(whpx_register_names)];
147 struct whpx_vcpu {
148 WHV_EMULATOR_HANDLE emulator;
149 bool window_registered;
150 bool interruptable;
151 uint64_t tpr;
152 uint64_t apic_base;
153 bool interruption_pending;
155 /* Must be the last field as it may have a tail */
156 WHV_RUN_VP_EXIT_CONTEXT exit_ctx;
159 static bool whpx_allowed;
160 static bool whp_dispatch_initialized;
161 static HMODULE hWinHvPlatform, hWinHvEmulation;
163 struct whpx_state whpx_global;
164 struct WHPDispatch whp_dispatch;
168 * VP support
171 static struct whpx_vcpu *get_whpx_vcpu(CPUState *cpu)
173 return (struct whpx_vcpu *)cpu->hax_vcpu;
176 static WHV_X64_SEGMENT_REGISTER whpx_seg_q2h(const SegmentCache *qs, int v86,
177 int r86)
179 WHV_X64_SEGMENT_REGISTER hs;
180 unsigned flags = qs->flags;
182 hs.Base = qs->base;
183 hs.Limit = qs->limit;
184 hs.Selector = qs->selector;
186 if (v86) {
187 hs.Attributes = 0;
188 hs.SegmentType = 3;
189 hs.Present = 1;
190 hs.DescriptorPrivilegeLevel = 3;
191 hs.NonSystemSegment = 1;
193 } else {
194 hs.Attributes = (flags >> DESC_TYPE_SHIFT);
196 if (r86) {
197 /* hs.Base &= 0xfffff; */
201 return hs;
204 static SegmentCache whpx_seg_h2q(const WHV_X64_SEGMENT_REGISTER *hs)
206 SegmentCache qs;
208 qs.base = hs->Base;
209 qs.limit = hs->Limit;
210 qs.selector = hs->Selector;
212 qs.flags = ((uint32_t)hs->Attributes) << DESC_TYPE_SHIFT;
214 return qs;
217 static void whpx_set_registers(CPUState *cpu)
219 struct whpx_state *whpx = &whpx_global;
220 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
221 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
222 X86CPU *x86_cpu = X86_CPU(cpu);
223 struct whpx_register_set vcxt;
224 HRESULT hr;
225 int idx;
226 int idx_next;
227 int i;
228 int v86, r86;
230 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
232 memset(&vcxt, 0, sizeof(struct whpx_register_set));
234 v86 = (env->eflags & VM_MASK);
235 r86 = !(env->cr[0] & CR0_PE_MASK);
237 vcpu->tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
238 vcpu->apic_base = cpu_get_apic_base(x86_cpu->apic_state);
240 idx = 0;
242 /* Indexes for first 16 registers match between HV and QEMU definitions */
243 idx_next = 16;
244 for (idx = 0; idx < CPU_NB_REGS; idx += 1) {
245 vcxt.values[idx].Reg64 = (uint64_t)env->regs[idx];
247 idx = idx_next;
249 /* Same goes for RIP and RFLAGS */
250 assert(whpx_register_names[idx] == WHvX64RegisterRip);
251 vcxt.values[idx++].Reg64 = env->eip;
253 assert(whpx_register_names[idx] == WHvX64RegisterRflags);
254 vcxt.values[idx++].Reg64 = env->eflags;
256 /* Translate 6+4 segment registers. HV and QEMU order matches */
257 assert(idx == WHvX64RegisterEs);
258 for (i = 0; i < 6; i += 1, idx += 1) {
259 vcxt.values[idx].Segment = whpx_seg_q2h(&env->segs[i], v86, r86);
262 assert(idx == WHvX64RegisterLdtr);
263 vcxt.values[idx++].Segment = whpx_seg_q2h(&env->ldt, 0, 0);
265 assert(idx == WHvX64RegisterTr);
266 vcxt.values[idx++].Segment = whpx_seg_q2h(&env->tr, 0, 0);
268 assert(idx == WHvX64RegisterIdtr);
269 vcxt.values[idx].Table.Base = env->idt.base;
270 vcxt.values[idx].Table.Limit = env->idt.limit;
271 idx += 1;
273 assert(idx == WHvX64RegisterGdtr);
274 vcxt.values[idx].Table.Base = env->gdt.base;
275 vcxt.values[idx].Table.Limit = env->gdt.limit;
276 idx += 1;
278 /* CR0, 2, 3, 4, 8 */
279 assert(whpx_register_names[idx] == WHvX64RegisterCr0);
280 vcxt.values[idx++].Reg64 = env->cr[0];
281 assert(whpx_register_names[idx] == WHvX64RegisterCr2);
282 vcxt.values[idx++].Reg64 = env->cr[2];
283 assert(whpx_register_names[idx] == WHvX64RegisterCr3);
284 vcxt.values[idx++].Reg64 = env->cr[3];
285 assert(whpx_register_names[idx] == WHvX64RegisterCr4);
286 vcxt.values[idx++].Reg64 = env->cr[4];
287 assert(whpx_register_names[idx] == WHvX64RegisterCr8);
288 vcxt.values[idx++].Reg64 = vcpu->tpr;
290 /* 8 Debug Registers - Skipped */
292 /* 16 XMM registers */
293 assert(whpx_register_names[idx] == WHvX64RegisterXmm0);
294 idx_next = idx + 16;
295 for (i = 0; i < sizeof(env->xmm_regs) / sizeof(ZMMReg); i += 1, idx += 1) {
296 vcxt.values[idx].Reg128.Low64 = env->xmm_regs[i].ZMM_Q(0);
297 vcxt.values[idx].Reg128.High64 = env->xmm_regs[i].ZMM_Q(1);
299 idx = idx_next;
301 /* 8 FP registers */
302 assert(whpx_register_names[idx] == WHvX64RegisterFpMmx0);
303 for (i = 0; i < 8; i += 1, idx += 1) {
304 vcxt.values[idx].Fp.AsUINT128.Low64 = env->fpregs[i].mmx.MMX_Q(0);
305 /* vcxt.values[idx].Fp.AsUINT128.High64 =
306 env->fpregs[i].mmx.MMX_Q(1);
310 /* FP control status register */
311 assert(whpx_register_names[idx] == WHvX64RegisterFpControlStatus);
312 vcxt.values[idx].FpControlStatus.FpControl = env->fpuc;
313 vcxt.values[idx].FpControlStatus.FpStatus =
314 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
315 vcxt.values[idx].FpControlStatus.FpTag = 0;
316 for (i = 0; i < 8; ++i) {
317 vcxt.values[idx].FpControlStatus.FpTag |= (!env->fptags[i]) << i;
319 vcxt.values[idx].FpControlStatus.Reserved = 0;
320 vcxt.values[idx].FpControlStatus.LastFpOp = env->fpop;
321 vcxt.values[idx].FpControlStatus.LastFpRip = env->fpip;
322 idx += 1;
324 /* XMM control status register */
325 assert(whpx_register_names[idx] == WHvX64RegisterXmmControlStatus);
326 vcxt.values[idx].XmmControlStatus.LastFpRdp = 0;
327 vcxt.values[idx].XmmControlStatus.XmmStatusControl = env->mxcsr;
328 vcxt.values[idx].XmmControlStatus.XmmStatusControlMask = 0x0000ffff;
329 idx += 1;
331 /* MSRs */
332 assert(whpx_register_names[idx] == WHvX64RegisterTsc);
333 vcxt.values[idx++].Reg64 = env->tsc;
334 assert(whpx_register_names[idx] == WHvX64RegisterEfer);
335 vcxt.values[idx++].Reg64 = env->efer;
336 #ifdef TARGET_X86_64
337 assert(whpx_register_names[idx] == WHvX64RegisterKernelGsBase);
338 vcxt.values[idx++].Reg64 = env->kernelgsbase;
339 #endif
341 assert(whpx_register_names[idx] == WHvX64RegisterApicBase);
342 vcxt.values[idx++].Reg64 = vcpu->apic_base;
344 /* WHvX64RegisterPat - Skipped */
346 assert(whpx_register_names[idx] == WHvX64RegisterSysenterCs);
347 vcxt.values[idx++].Reg64 = env->sysenter_cs;
348 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEip);
349 vcxt.values[idx++].Reg64 = env->sysenter_eip;
350 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEsp);
351 vcxt.values[idx++].Reg64 = env->sysenter_esp;
352 assert(whpx_register_names[idx] == WHvX64RegisterStar);
353 vcxt.values[idx++].Reg64 = env->star;
354 #ifdef TARGET_X86_64
355 assert(whpx_register_names[idx] == WHvX64RegisterLstar);
356 vcxt.values[idx++].Reg64 = env->lstar;
357 assert(whpx_register_names[idx] == WHvX64RegisterCstar);
358 vcxt.values[idx++].Reg64 = env->cstar;
359 assert(whpx_register_names[idx] == WHvX64RegisterSfmask);
360 vcxt.values[idx++].Reg64 = env->fmask;
361 #endif
363 /* Interrupt / Event Registers - Skipped */
365 assert(idx == RTL_NUMBER_OF(whpx_register_names));
367 hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
368 whpx->partition, cpu->cpu_index,
369 whpx_register_names,
370 RTL_NUMBER_OF(whpx_register_names),
371 &vcxt.values[0]);
373 if (FAILED(hr)) {
374 error_report("WHPX: Failed to set virtual processor context, hr=%08lx",
375 hr);
378 return;
381 static void whpx_get_registers(CPUState *cpu)
383 struct whpx_state *whpx = &whpx_global;
384 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
385 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
386 X86CPU *x86_cpu = X86_CPU(cpu);
387 struct whpx_register_set vcxt;
388 uint64_t tpr, apic_base;
389 HRESULT hr;
390 int idx;
391 int idx_next;
392 int i;
394 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
396 hr = whp_dispatch.WHvGetVirtualProcessorRegisters(
397 whpx->partition, cpu->cpu_index,
398 whpx_register_names,
399 RTL_NUMBER_OF(whpx_register_names),
400 &vcxt.values[0]);
401 if (FAILED(hr)) {
402 error_report("WHPX: Failed to get virtual processor context, hr=%08lx",
403 hr);
406 idx = 0;
408 /* Indexes for first 16 registers match between HV and QEMU definitions */
409 idx_next = 16;
410 for (idx = 0; idx < CPU_NB_REGS; idx += 1) {
411 env->regs[idx] = vcxt.values[idx].Reg64;
413 idx = idx_next;
415 /* Same goes for RIP and RFLAGS */
416 assert(whpx_register_names[idx] == WHvX64RegisterRip);
417 env->eip = vcxt.values[idx++].Reg64;
418 assert(whpx_register_names[idx] == WHvX64RegisterRflags);
419 env->eflags = vcxt.values[idx++].Reg64;
421 /* Translate 6+4 segment registers. HV and QEMU order matches */
422 assert(idx == WHvX64RegisterEs);
423 for (i = 0; i < 6; i += 1, idx += 1) {
424 env->segs[i] = whpx_seg_h2q(&vcxt.values[idx].Segment);
427 assert(idx == WHvX64RegisterLdtr);
428 env->ldt = whpx_seg_h2q(&vcxt.values[idx++].Segment);
429 assert(idx == WHvX64RegisterTr);
430 env->tr = whpx_seg_h2q(&vcxt.values[idx++].Segment);
431 assert(idx == WHvX64RegisterIdtr);
432 env->idt.base = vcxt.values[idx].Table.Base;
433 env->idt.limit = vcxt.values[idx].Table.Limit;
434 idx += 1;
435 assert(idx == WHvX64RegisterGdtr);
436 env->gdt.base = vcxt.values[idx].Table.Base;
437 env->gdt.limit = vcxt.values[idx].Table.Limit;
438 idx += 1;
440 /* CR0, 2, 3, 4, 8 */
441 assert(whpx_register_names[idx] == WHvX64RegisterCr0);
442 env->cr[0] = vcxt.values[idx++].Reg64;
443 assert(whpx_register_names[idx] == WHvX64RegisterCr2);
444 env->cr[2] = vcxt.values[idx++].Reg64;
445 assert(whpx_register_names[idx] == WHvX64RegisterCr3);
446 env->cr[3] = vcxt.values[idx++].Reg64;
447 assert(whpx_register_names[idx] == WHvX64RegisterCr4);
448 env->cr[4] = vcxt.values[idx++].Reg64;
449 assert(whpx_register_names[idx] == WHvX64RegisterCr8);
450 tpr = vcxt.values[idx++].Reg64;
451 if (tpr != vcpu->tpr) {
452 vcpu->tpr = tpr;
453 cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
456 /* 8 Debug Registers - Skipped */
458 /* 16 XMM registers */
459 assert(whpx_register_names[idx] == WHvX64RegisterXmm0);
460 idx_next = idx + 16;
461 for (i = 0; i < sizeof(env->xmm_regs) / sizeof(ZMMReg); i += 1, idx += 1) {
462 env->xmm_regs[i].ZMM_Q(0) = vcxt.values[idx].Reg128.Low64;
463 env->xmm_regs[i].ZMM_Q(1) = vcxt.values[idx].Reg128.High64;
465 idx = idx_next;
467 /* 8 FP registers */
468 assert(whpx_register_names[idx] == WHvX64RegisterFpMmx0);
469 for (i = 0; i < 8; i += 1, idx += 1) {
470 env->fpregs[i].mmx.MMX_Q(0) = vcxt.values[idx].Fp.AsUINT128.Low64;
471 /* env->fpregs[i].mmx.MMX_Q(1) =
472 vcxt.values[idx].Fp.AsUINT128.High64;
476 /* FP control status register */
477 assert(whpx_register_names[idx] == WHvX64RegisterFpControlStatus);
478 env->fpuc = vcxt.values[idx].FpControlStatus.FpControl;
479 env->fpstt = (vcxt.values[idx].FpControlStatus.FpStatus >> 11) & 0x7;
480 env->fpus = vcxt.values[idx].FpControlStatus.FpStatus & ~0x3800;
481 for (i = 0; i < 8; ++i) {
482 env->fptags[i] = !((vcxt.values[idx].FpControlStatus.FpTag >> i) & 1);
484 env->fpop = vcxt.values[idx].FpControlStatus.LastFpOp;
485 env->fpip = vcxt.values[idx].FpControlStatus.LastFpRip;
486 idx += 1;
488 /* XMM control status register */
489 assert(whpx_register_names[idx] == WHvX64RegisterXmmControlStatus);
490 env->mxcsr = vcxt.values[idx].XmmControlStatus.XmmStatusControl;
491 idx += 1;
493 /* MSRs */
494 assert(whpx_register_names[idx] == WHvX64RegisterTsc);
495 env->tsc = vcxt.values[idx++].Reg64;
496 assert(whpx_register_names[idx] == WHvX64RegisterEfer);
497 env->efer = vcxt.values[idx++].Reg64;
498 #ifdef TARGET_X86_64
499 assert(whpx_register_names[idx] == WHvX64RegisterKernelGsBase);
500 env->kernelgsbase = vcxt.values[idx++].Reg64;
501 #endif
503 assert(whpx_register_names[idx] == WHvX64RegisterApicBase);
504 apic_base = vcxt.values[idx++].Reg64;
505 if (apic_base != vcpu->apic_base) {
506 vcpu->apic_base = apic_base;
507 cpu_set_apic_base(x86_cpu->apic_state, vcpu->apic_base);
510 /* WHvX64RegisterPat - Skipped */
512 assert(whpx_register_names[idx] == WHvX64RegisterSysenterCs);
513 env->sysenter_cs = vcxt.values[idx++].Reg64;;
514 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEip);
515 env->sysenter_eip = vcxt.values[idx++].Reg64;
516 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEsp);
517 env->sysenter_esp = vcxt.values[idx++].Reg64;
518 assert(whpx_register_names[idx] == WHvX64RegisterStar);
519 env->star = vcxt.values[idx++].Reg64;
520 #ifdef TARGET_X86_64
521 assert(whpx_register_names[idx] == WHvX64RegisterLstar);
522 env->lstar = vcxt.values[idx++].Reg64;
523 assert(whpx_register_names[idx] == WHvX64RegisterCstar);
524 env->cstar = vcxt.values[idx++].Reg64;
525 assert(whpx_register_names[idx] == WHvX64RegisterSfmask);
526 env->fmask = vcxt.values[idx++].Reg64;
527 #endif
529 /* Interrupt / Event Registers - Skipped */
531 assert(idx == RTL_NUMBER_OF(whpx_register_names));
533 return;
536 static HRESULT CALLBACK whpx_emu_ioport_callback(
537 void *ctx,
538 WHV_EMULATOR_IO_ACCESS_INFO *IoAccess)
540 MemTxAttrs attrs = { 0 };
541 address_space_rw(&address_space_io, IoAccess->Port, attrs,
542 (uint8_t *)&IoAccess->Data, IoAccess->AccessSize,
543 IoAccess->Direction);
544 return S_OK;
547 static HRESULT CALLBACK whpx_emu_mmio_callback(
548 void *ctx,
549 WHV_EMULATOR_MEMORY_ACCESS_INFO *ma)
551 cpu_physical_memory_rw(ma->GpaAddress, ma->Data, ma->AccessSize,
552 ma->Direction);
553 return S_OK;
556 static HRESULT CALLBACK whpx_emu_getreg_callback(
557 void *ctx,
558 const WHV_REGISTER_NAME *RegisterNames,
559 UINT32 RegisterCount,
560 WHV_REGISTER_VALUE *RegisterValues)
562 HRESULT hr;
563 struct whpx_state *whpx = &whpx_global;
564 CPUState *cpu = (CPUState *)ctx;
566 hr = whp_dispatch.WHvGetVirtualProcessorRegisters(
567 whpx->partition, cpu->cpu_index,
568 RegisterNames, RegisterCount,
569 RegisterValues);
570 if (FAILED(hr)) {
571 error_report("WHPX: Failed to get virtual processor registers,"
572 " hr=%08lx", hr);
575 return hr;
578 static HRESULT CALLBACK whpx_emu_setreg_callback(
579 void *ctx,
580 const WHV_REGISTER_NAME *RegisterNames,
581 UINT32 RegisterCount,
582 const WHV_REGISTER_VALUE *RegisterValues)
584 HRESULT hr;
585 struct whpx_state *whpx = &whpx_global;
586 CPUState *cpu = (CPUState *)ctx;
588 hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
589 whpx->partition, cpu->cpu_index,
590 RegisterNames, RegisterCount,
591 RegisterValues);
592 if (FAILED(hr)) {
593 error_report("WHPX: Failed to set virtual processor registers,"
594 " hr=%08lx", hr);
598 * The emulator just successfully wrote the register state. We clear the
599 * dirty state so we avoid the double write on resume of the VP.
601 cpu->vcpu_dirty = false;
603 return hr;
606 static HRESULT CALLBACK whpx_emu_translate_callback(
607 void *ctx,
608 WHV_GUEST_VIRTUAL_ADDRESS Gva,
609 WHV_TRANSLATE_GVA_FLAGS TranslateFlags,
610 WHV_TRANSLATE_GVA_RESULT_CODE *TranslationResult,
611 WHV_GUEST_PHYSICAL_ADDRESS *Gpa)
613 HRESULT hr;
614 struct whpx_state *whpx = &whpx_global;
615 CPUState *cpu = (CPUState *)ctx;
616 WHV_TRANSLATE_GVA_RESULT res;
618 hr = whp_dispatch.WHvTranslateGva(whpx->partition, cpu->cpu_index,
619 Gva, TranslateFlags, &res, Gpa);
620 if (FAILED(hr)) {
621 error_report("WHPX: Failed to translate GVA, hr=%08lx", hr);
622 } else {
623 *TranslationResult = res.ResultCode;
626 return hr;
629 static const WHV_EMULATOR_CALLBACKS whpx_emu_callbacks = {
630 .Size = sizeof(WHV_EMULATOR_CALLBACKS),
631 .WHvEmulatorIoPortCallback = whpx_emu_ioport_callback,
632 .WHvEmulatorMemoryCallback = whpx_emu_mmio_callback,
633 .WHvEmulatorGetVirtualProcessorRegisters = whpx_emu_getreg_callback,
634 .WHvEmulatorSetVirtualProcessorRegisters = whpx_emu_setreg_callback,
635 .WHvEmulatorTranslateGvaPage = whpx_emu_translate_callback,
638 static int whpx_handle_mmio(CPUState *cpu, WHV_MEMORY_ACCESS_CONTEXT *ctx)
640 HRESULT hr;
641 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
642 WHV_EMULATOR_STATUS emu_status;
644 hr = whp_dispatch.WHvEmulatorTryMmioEmulation(
645 vcpu->emulator, cpu,
646 &vcpu->exit_ctx.VpContext, ctx,
647 &emu_status);
648 if (FAILED(hr)) {
649 error_report("WHPX: Failed to parse MMIO access, hr=%08lx", hr);
650 return -1;
653 if (!emu_status.EmulationSuccessful) {
654 error_report("WHPX: Failed to emulate MMIO access with"
655 " EmulatorReturnStatus: %u", emu_status.AsUINT32);
656 return -1;
659 return 0;
662 static int whpx_handle_portio(CPUState *cpu,
663 WHV_X64_IO_PORT_ACCESS_CONTEXT *ctx)
665 HRESULT hr;
666 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
667 WHV_EMULATOR_STATUS emu_status;
669 hr = whp_dispatch.WHvEmulatorTryIoEmulation(
670 vcpu->emulator, cpu,
671 &vcpu->exit_ctx.VpContext, ctx,
672 &emu_status);
673 if (FAILED(hr)) {
674 error_report("WHPX: Failed to parse PortIO access, hr=%08lx", hr);
675 return -1;
678 if (!emu_status.EmulationSuccessful) {
679 error_report("WHPX: Failed to emulate PortIO access with"
680 " EmulatorReturnStatus: %u", emu_status.AsUINT32);
681 return -1;
684 return 0;
687 static int whpx_handle_halt(CPUState *cpu)
689 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
690 int ret = 0;
692 qemu_mutex_lock_iothread();
693 if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
694 (env->eflags & IF_MASK)) &&
695 !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
696 cpu->exception_index = EXCP_HLT;
697 cpu->halted = true;
698 ret = 1;
700 qemu_mutex_unlock_iothread();
702 return ret;
705 static void whpx_vcpu_pre_run(CPUState *cpu)
707 HRESULT hr;
708 struct whpx_state *whpx = &whpx_global;
709 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
710 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
711 X86CPU *x86_cpu = X86_CPU(cpu);
712 int irq;
713 uint8_t tpr;
714 WHV_X64_PENDING_INTERRUPTION_REGISTER new_int;
715 UINT32 reg_count = 0;
716 WHV_REGISTER_VALUE reg_values[3];
717 WHV_REGISTER_NAME reg_names[3];
719 memset(&new_int, 0, sizeof(new_int));
720 memset(reg_values, 0, sizeof(reg_values));
722 qemu_mutex_lock_iothread();
724 /* Inject NMI */
725 if (!vcpu->interruption_pending &&
726 cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
727 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
728 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
729 vcpu->interruptable = false;
730 new_int.InterruptionType = WHvX64PendingNmi;
731 new_int.InterruptionPending = 1;
732 new_int.InterruptionVector = 2;
734 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
735 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
740 * Force the VCPU out of its inner loop to process any INIT requests or
741 * commit pending TPR access.
743 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
744 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
745 !(env->hflags & HF_SMM_MASK)) {
746 cpu->exit_request = 1;
748 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
749 cpu->exit_request = 1;
753 /* Get pending hard interruption or replay one that was overwritten */
754 if (!vcpu->interruption_pending &&
755 vcpu->interruptable && (env->eflags & IF_MASK)) {
756 assert(!new_int.InterruptionPending);
757 if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
758 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
759 irq = cpu_get_pic_interrupt(env);
760 if (irq >= 0) {
761 new_int.InterruptionType = WHvX64PendingInterrupt;
762 new_int.InterruptionPending = 1;
763 new_int.InterruptionVector = irq;
768 /* Setup interrupt state if new one was prepared */
769 if (new_int.InterruptionPending) {
770 reg_values[reg_count].PendingInterruption = new_int;
771 reg_names[reg_count] = WHvRegisterPendingInterruption;
772 reg_count += 1;
775 /* Sync the TPR to the CR8 if was modified during the intercept */
776 tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
777 if (tpr != vcpu->tpr) {
778 vcpu->tpr = tpr;
779 reg_values[reg_count].Reg64 = tpr;
780 cpu->exit_request = 1;
781 reg_names[reg_count] = WHvX64RegisterCr8;
782 reg_count += 1;
785 /* Update the state of the interrupt delivery notification */
786 if (!vcpu->window_registered &&
787 cpu->interrupt_request & CPU_INTERRUPT_HARD) {
788 reg_values[reg_count].DeliverabilityNotifications.InterruptNotification
789 = 1;
790 vcpu->window_registered = 1;
791 reg_names[reg_count] = WHvX64RegisterDeliverabilityNotifications;
792 reg_count += 1;
795 qemu_mutex_unlock_iothread();
797 if (reg_count) {
798 hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
799 whpx->partition, cpu->cpu_index,
800 reg_names, reg_count, reg_values);
801 if (FAILED(hr)) {
802 error_report("WHPX: Failed to set interrupt state registers,"
803 " hr=%08lx", hr);
807 return;
810 static void whpx_vcpu_post_run(CPUState *cpu)
812 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
813 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
814 X86CPU *x86_cpu = X86_CPU(cpu);
816 env->eflags = vcpu->exit_ctx.VpContext.Rflags;
818 uint64_t tpr = vcpu->exit_ctx.VpContext.Cr8;
819 if (vcpu->tpr != tpr) {
820 vcpu->tpr = tpr;
821 qemu_mutex_lock_iothread();
822 cpu_set_apic_tpr(x86_cpu->apic_state, vcpu->tpr);
823 qemu_mutex_unlock_iothread();
826 vcpu->interruption_pending =
827 vcpu->exit_ctx.VpContext.ExecutionState.InterruptionPending;
829 vcpu->interruptable =
830 !vcpu->exit_ctx.VpContext.ExecutionState.InterruptShadow;
832 return;
835 static void whpx_vcpu_process_async_events(CPUState *cpu)
837 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
838 X86CPU *x86_cpu = X86_CPU(cpu);
839 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
841 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
842 !(env->hflags & HF_SMM_MASK)) {
844 do_cpu_init(x86_cpu);
845 cpu->vcpu_dirty = true;
846 vcpu->interruptable = true;
849 if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
850 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
851 apic_poll_irq(x86_cpu->apic_state);
854 if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
855 (env->eflags & IF_MASK)) ||
856 (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
857 cpu->halted = false;
860 if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
861 if (!cpu->vcpu_dirty) {
862 whpx_get_registers(cpu);
864 do_cpu_sipi(x86_cpu);
867 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
868 cpu->interrupt_request &= ~CPU_INTERRUPT_TPR;
869 if (!cpu->vcpu_dirty) {
870 whpx_get_registers(cpu);
872 apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip,
873 env->tpr_access_type);
876 return;
879 static int whpx_vcpu_run(CPUState *cpu)
881 HRESULT hr;
882 struct whpx_state *whpx = &whpx_global;
883 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
884 int ret;
886 whpx_vcpu_process_async_events(cpu);
887 if (cpu->halted) {
888 cpu->exception_index = EXCP_HLT;
889 atomic_set(&cpu->exit_request, false);
890 return 0;
893 qemu_mutex_unlock_iothread();
894 cpu_exec_start(cpu);
896 do {
897 if (cpu->vcpu_dirty) {
898 whpx_set_registers(cpu);
899 cpu->vcpu_dirty = false;
902 whpx_vcpu_pre_run(cpu);
904 if (atomic_read(&cpu->exit_request)) {
905 whpx_vcpu_kick(cpu);
908 hr = whp_dispatch.WHvRunVirtualProcessor(
909 whpx->partition, cpu->cpu_index,
910 &vcpu->exit_ctx, sizeof(vcpu->exit_ctx));
912 if (FAILED(hr)) {
913 error_report("WHPX: Failed to exec a virtual processor,"
914 " hr=%08lx", hr);
915 ret = -1;
916 break;
919 whpx_vcpu_post_run(cpu);
921 switch (vcpu->exit_ctx.ExitReason) {
922 case WHvRunVpExitReasonMemoryAccess:
923 ret = whpx_handle_mmio(cpu, &vcpu->exit_ctx.MemoryAccess);
924 break;
926 case WHvRunVpExitReasonX64IoPortAccess:
927 ret = whpx_handle_portio(cpu, &vcpu->exit_ctx.IoPortAccess);
928 break;
930 case WHvRunVpExitReasonX64InterruptWindow:
931 vcpu->window_registered = 0;
932 ret = 0;
933 break;
935 case WHvRunVpExitReasonX64Halt:
936 ret = whpx_handle_halt(cpu);
937 break;
939 case WHvRunVpExitReasonCanceled:
940 cpu->exception_index = EXCP_INTERRUPT;
941 ret = 1;
942 break;
944 case WHvRunVpExitReasonX64MsrAccess: {
945 WHV_REGISTER_VALUE reg_values[3] = {0};
946 WHV_REGISTER_NAME reg_names[3];
947 UINT32 reg_count;
949 reg_names[0] = WHvX64RegisterRip;
950 reg_names[1] = WHvX64RegisterRax;
951 reg_names[2] = WHvX64RegisterRdx;
953 reg_values[0].Reg64 =
954 vcpu->exit_ctx.VpContext.Rip +
955 vcpu->exit_ctx.VpContext.InstructionLength;
958 * For all unsupported MSR access we:
959 * ignore writes
960 * return 0 on read.
962 reg_count = vcpu->exit_ctx.MsrAccess.AccessInfo.IsWrite ?
963 1 : 3;
965 hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
966 whpx->partition,
967 cpu->cpu_index,
968 reg_names, reg_count,
969 reg_values);
971 if (FAILED(hr)) {
972 error_report("WHPX: Failed to set MsrAccess state "
973 " registers, hr=%08lx", hr);
975 ret = 0;
976 break;
978 case WHvRunVpExitReasonX64Cpuid: {
979 WHV_REGISTER_VALUE reg_values[5];
980 WHV_REGISTER_NAME reg_names[5];
981 UINT32 reg_count = 5;
982 UINT64 rip, rax, rcx, rdx, rbx;
984 memset(reg_values, 0, sizeof(reg_values));
986 rip = vcpu->exit_ctx.VpContext.Rip +
987 vcpu->exit_ctx.VpContext.InstructionLength;
988 switch (vcpu->exit_ctx.CpuidAccess.Rax) {
989 case 1:
990 rax = vcpu->exit_ctx.CpuidAccess.DefaultResultRax;
991 /* Advertise that we are running on a hypervisor */
992 rcx =
993 vcpu->exit_ctx.CpuidAccess.DefaultResultRcx |
994 CPUID_EXT_HYPERVISOR;
996 rdx = vcpu->exit_ctx.CpuidAccess.DefaultResultRdx;
997 rbx = vcpu->exit_ctx.CpuidAccess.DefaultResultRbx;
998 break;
999 case 0x80000001:
1000 rax = vcpu->exit_ctx.CpuidAccess.DefaultResultRax;
1001 /* Remove any support of OSVW */
1002 rcx =
1003 vcpu->exit_ctx.CpuidAccess.DefaultResultRcx &
1004 ~CPUID_EXT3_OSVW;
1006 rdx = vcpu->exit_ctx.CpuidAccess.DefaultResultRdx;
1007 rbx = vcpu->exit_ctx.CpuidAccess.DefaultResultRbx;
1008 break;
1009 default:
1010 rax = vcpu->exit_ctx.CpuidAccess.DefaultResultRax;
1011 rcx = vcpu->exit_ctx.CpuidAccess.DefaultResultRcx;
1012 rdx = vcpu->exit_ctx.CpuidAccess.DefaultResultRdx;
1013 rbx = vcpu->exit_ctx.CpuidAccess.DefaultResultRbx;
1016 reg_names[0] = WHvX64RegisterRip;
1017 reg_names[1] = WHvX64RegisterRax;
1018 reg_names[2] = WHvX64RegisterRcx;
1019 reg_names[3] = WHvX64RegisterRdx;
1020 reg_names[4] = WHvX64RegisterRbx;
1022 reg_values[0].Reg64 = rip;
1023 reg_values[1].Reg64 = rax;
1024 reg_values[2].Reg64 = rcx;
1025 reg_values[3].Reg64 = rdx;
1026 reg_values[4].Reg64 = rbx;
1028 hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
1029 whpx->partition, cpu->cpu_index,
1030 reg_names,
1031 reg_count,
1032 reg_values);
1034 if (FAILED(hr)) {
1035 error_report("WHPX: Failed to set CpuidAccess state registers,"
1036 " hr=%08lx", hr);
1038 ret = 0;
1039 break;
1041 case WHvRunVpExitReasonNone:
1042 case WHvRunVpExitReasonUnrecoverableException:
1043 case WHvRunVpExitReasonInvalidVpRegisterValue:
1044 case WHvRunVpExitReasonUnsupportedFeature:
1045 case WHvRunVpExitReasonException:
1046 default:
1047 error_report("WHPX: Unexpected VP exit code %d",
1048 vcpu->exit_ctx.ExitReason);
1049 whpx_get_registers(cpu);
1050 qemu_mutex_lock_iothread();
1051 qemu_system_guest_panicked(cpu_get_crash_info(cpu));
1052 qemu_mutex_unlock_iothread();
1053 break;
1056 } while (!ret);
1058 cpu_exec_end(cpu);
1059 qemu_mutex_lock_iothread();
1060 current_cpu = cpu;
1062 atomic_set(&cpu->exit_request, false);
1064 return ret < 0;
1067 static void do_whpx_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
1069 whpx_get_registers(cpu);
1070 cpu->vcpu_dirty = true;
1073 static void do_whpx_cpu_synchronize_post_reset(CPUState *cpu,
1074 run_on_cpu_data arg)
1076 whpx_set_registers(cpu);
1077 cpu->vcpu_dirty = false;
1080 static void do_whpx_cpu_synchronize_post_init(CPUState *cpu,
1081 run_on_cpu_data arg)
1083 whpx_set_registers(cpu);
1084 cpu->vcpu_dirty = false;
1087 static void do_whpx_cpu_synchronize_pre_loadvm(CPUState *cpu,
1088 run_on_cpu_data arg)
1090 cpu->vcpu_dirty = true;
1094 * CPU support.
1097 void whpx_cpu_synchronize_state(CPUState *cpu)
1099 if (!cpu->vcpu_dirty) {
1100 run_on_cpu(cpu, do_whpx_cpu_synchronize_state, RUN_ON_CPU_NULL);
1104 void whpx_cpu_synchronize_post_reset(CPUState *cpu)
1106 run_on_cpu(cpu, do_whpx_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
1109 void whpx_cpu_synchronize_post_init(CPUState *cpu)
1111 run_on_cpu(cpu, do_whpx_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
1114 void whpx_cpu_synchronize_pre_loadvm(CPUState *cpu)
1116 run_on_cpu(cpu, do_whpx_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
1120 * Vcpu support.
1123 static Error *whpx_migration_blocker;
1125 int whpx_init_vcpu(CPUState *cpu)
1127 HRESULT hr;
1128 struct whpx_state *whpx = &whpx_global;
1129 struct whpx_vcpu *vcpu;
1130 Error *local_error = NULL;
1132 /* Add migration blockers for all unsupported features of the
1133 * Windows Hypervisor Platform
1135 if (whpx_migration_blocker == NULL) {
1136 error_setg(&whpx_migration_blocker,
1137 "State blocked due to non-migratable CPUID feature support,"
1138 "dirty memory tracking support, and XSAVE/XRSTOR support");
1140 (void)migrate_add_blocker(whpx_migration_blocker, &local_error);
1141 if (local_error) {
1142 error_report_err(local_error);
1143 migrate_del_blocker(whpx_migration_blocker);
1144 error_free(whpx_migration_blocker);
1145 return -EINVAL;
1149 vcpu = g_malloc0(sizeof(struct whpx_vcpu));
1151 if (!vcpu) {
1152 error_report("WHPX: Failed to allocte VCPU context.");
1153 return -ENOMEM;
1156 hr = whp_dispatch.WHvEmulatorCreateEmulator(
1157 &whpx_emu_callbacks,
1158 &vcpu->emulator);
1159 if (FAILED(hr)) {
1160 error_report("WHPX: Failed to setup instruction completion support,"
1161 " hr=%08lx", hr);
1162 g_free(vcpu);
1163 return -EINVAL;
1166 hr = whp_dispatch.WHvCreateVirtualProcessor(
1167 whpx->partition, cpu->cpu_index, 0);
1168 if (FAILED(hr)) {
1169 error_report("WHPX: Failed to create a virtual processor,"
1170 " hr=%08lx", hr);
1171 whp_dispatch.WHvEmulatorDestroyEmulator(vcpu->emulator);
1172 g_free(vcpu);
1173 return -EINVAL;
1176 vcpu->interruptable = true;
1178 cpu->vcpu_dirty = true;
1179 cpu->hax_vcpu = (struct hax_vcpu_state *)vcpu;
1181 return 0;
1184 int whpx_vcpu_exec(CPUState *cpu)
1186 int ret;
1187 int fatal;
1189 for (;;) {
1190 if (cpu->exception_index >= EXCP_INTERRUPT) {
1191 ret = cpu->exception_index;
1192 cpu->exception_index = -1;
1193 break;
1196 fatal = whpx_vcpu_run(cpu);
1198 if (fatal) {
1199 error_report("WHPX: Failed to exec a virtual processor");
1200 abort();
1204 return ret;
1207 void whpx_destroy_vcpu(CPUState *cpu)
1209 struct whpx_state *whpx = &whpx_global;
1210 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
1212 whp_dispatch.WHvDeleteVirtualProcessor(whpx->partition, cpu->cpu_index);
1213 whp_dispatch.WHvEmulatorDestroyEmulator(vcpu->emulator);
1214 g_free(cpu->hax_vcpu);
1215 return;
1218 void whpx_vcpu_kick(CPUState *cpu)
1220 struct whpx_state *whpx = &whpx_global;
1221 whp_dispatch.WHvCancelRunVirtualProcessor(
1222 whpx->partition, cpu->cpu_index, 0);
1226 * Memory support.
1229 static void whpx_update_mapping(hwaddr start_pa, ram_addr_t size,
1230 void *host_va, int add, int rom,
1231 const char *name)
1233 struct whpx_state *whpx = &whpx_global;
1234 HRESULT hr;
1237 if (add) {
1238 printf("WHPX: ADD PA:%p Size:%p, Host:%p, %s, '%s'\n",
1239 (void*)start_pa, (void*)size, host_va,
1240 (rom ? "ROM" : "RAM"), name);
1241 } else {
1242 printf("WHPX: DEL PA:%p Size:%p, Host:%p, '%s'\n",
1243 (void*)start_pa, (void*)size, host_va, name);
1247 if (add) {
1248 hr = whp_dispatch.WHvMapGpaRange(whpx->partition,
1249 host_va,
1250 start_pa,
1251 size,
1252 (WHvMapGpaRangeFlagRead |
1253 WHvMapGpaRangeFlagExecute |
1254 (rom ? 0 : WHvMapGpaRangeFlagWrite)));
1255 } else {
1256 hr = whp_dispatch.WHvUnmapGpaRange(whpx->partition,
1257 start_pa,
1258 size);
1261 if (FAILED(hr)) {
1262 error_report("WHPX: Failed to %s GPA range '%s' PA:%p, Size:%p bytes,"
1263 " Host:%p, hr=%08lx",
1264 (add ? "MAP" : "UNMAP"), name,
1265 (void *)(uintptr_t)start_pa, (void *)size, host_va, hr);
1269 static void whpx_process_section(MemoryRegionSection *section, int add)
1271 MemoryRegion *mr = section->mr;
1272 hwaddr start_pa = section->offset_within_address_space;
1273 ram_addr_t size = int128_get64(section->size);
1274 unsigned int delta;
1275 uint64_t host_va;
1277 if (!memory_region_is_ram(mr)) {
1278 return;
1281 delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask);
1282 delta &= ~qemu_real_host_page_mask;
1283 if (delta > size) {
1284 return;
1286 start_pa += delta;
1287 size -= delta;
1288 size &= qemu_real_host_page_mask;
1289 if (!size || (start_pa & ~qemu_real_host_page_mask)) {
1290 return;
1293 host_va = (uintptr_t)memory_region_get_ram_ptr(mr)
1294 + section->offset_within_region + delta;
1296 whpx_update_mapping(start_pa, size, (void *)(uintptr_t)host_va, add,
1297 memory_region_is_rom(mr), mr->name);
1300 static void whpx_region_add(MemoryListener *listener,
1301 MemoryRegionSection *section)
1303 memory_region_ref(section->mr);
1304 whpx_process_section(section, 1);
1307 static void whpx_region_del(MemoryListener *listener,
1308 MemoryRegionSection *section)
1310 whpx_process_section(section, 0);
1311 memory_region_unref(section->mr);
1314 static void whpx_transaction_begin(MemoryListener *listener)
1318 static void whpx_transaction_commit(MemoryListener *listener)
1322 static void whpx_log_sync(MemoryListener *listener,
1323 MemoryRegionSection *section)
1325 MemoryRegion *mr = section->mr;
1327 if (!memory_region_is_ram(mr)) {
1328 return;
1331 memory_region_set_dirty(mr, 0, int128_get64(section->size));
1334 static MemoryListener whpx_memory_listener = {
1335 .begin = whpx_transaction_begin,
1336 .commit = whpx_transaction_commit,
1337 .region_add = whpx_region_add,
1338 .region_del = whpx_region_del,
1339 .log_sync = whpx_log_sync,
1340 .priority = 10,
1343 static void whpx_memory_init(void)
1345 memory_listener_register(&whpx_memory_listener, &address_space_memory);
1348 static void whpx_handle_interrupt(CPUState *cpu, int mask)
1350 cpu->interrupt_request |= mask;
1352 if (!qemu_cpu_is_self(cpu)) {
1353 qemu_cpu_kick(cpu);
1358 * Partition support
1361 static int whpx_accel_init(MachineState *ms)
1363 struct whpx_state *whpx;
1364 int ret;
1365 HRESULT hr;
1366 WHV_CAPABILITY whpx_cap;
1367 UINT32 whpx_cap_size;
1368 WHV_PARTITION_PROPERTY prop;
1370 whpx = &whpx_global;
1372 if (!init_whp_dispatch()) {
1373 ret = -ENOSYS;
1374 goto error;
1377 memset(whpx, 0, sizeof(struct whpx_state));
1378 whpx->mem_quota = ms->ram_size;
1380 hr = whp_dispatch.WHvGetCapability(
1381 WHvCapabilityCodeHypervisorPresent, &whpx_cap,
1382 sizeof(whpx_cap), &whpx_cap_size);
1383 if (FAILED(hr) || !whpx_cap.HypervisorPresent) {
1384 error_report("WHPX: No accelerator found, hr=%08lx", hr);
1385 ret = -ENOSPC;
1386 goto error;
1389 hr = whp_dispatch.WHvCreatePartition(&whpx->partition);
1390 if (FAILED(hr)) {
1391 error_report("WHPX: Failed to create partition, hr=%08lx", hr);
1392 ret = -EINVAL;
1393 goto error;
1396 memset(&prop, 0, sizeof(WHV_PARTITION_PROPERTY));
1397 prop.ProcessorCount = ms->smp.cpus;
1398 hr = whp_dispatch.WHvSetPartitionProperty(
1399 whpx->partition,
1400 WHvPartitionPropertyCodeProcessorCount,
1401 &prop,
1402 sizeof(WHV_PARTITION_PROPERTY));
1404 if (FAILED(hr)) {
1405 error_report("WHPX: Failed to set partition core count to %d,"
1406 " hr=%08lx", ms->smp.cores, hr);
1407 ret = -EINVAL;
1408 goto error;
1411 memset(&prop, 0, sizeof(WHV_PARTITION_PROPERTY));
1412 prop.ExtendedVmExits.X64MsrExit = 1;
1413 prop.ExtendedVmExits.X64CpuidExit = 1;
1414 hr = whp_dispatch.WHvSetPartitionProperty(
1415 whpx->partition,
1416 WHvPartitionPropertyCodeExtendedVmExits,
1417 &prop,
1418 sizeof(WHV_PARTITION_PROPERTY));
1420 if (FAILED(hr)) {
1421 error_report("WHPX: Failed to enable partition extended X64MsrExit and"
1422 " X64CpuidExit hr=%08lx", hr);
1423 ret = -EINVAL;
1424 goto error;
1427 UINT32 cpuidExitList[] = {1, 0x80000001};
1428 hr = whp_dispatch.WHvSetPartitionProperty(
1429 whpx->partition,
1430 WHvPartitionPropertyCodeCpuidExitList,
1431 cpuidExitList,
1432 RTL_NUMBER_OF(cpuidExitList) * sizeof(UINT32));
1434 if (FAILED(hr)) {
1435 error_report("WHPX: Failed to set partition CpuidExitList hr=%08lx",
1436 hr);
1437 ret = -EINVAL;
1438 goto error;
1441 hr = whp_dispatch.WHvSetupPartition(whpx->partition);
1442 if (FAILED(hr)) {
1443 error_report("WHPX: Failed to setup partition, hr=%08lx", hr);
1444 ret = -EINVAL;
1445 goto error;
1448 whpx_memory_init();
1450 cpu_interrupt_handler = whpx_handle_interrupt;
1452 printf("Windows Hypervisor Platform accelerator is operational\n");
1453 return 0;
1455 error:
1457 if (NULL != whpx->partition) {
1458 whp_dispatch.WHvDeletePartition(whpx->partition);
1459 whpx->partition = NULL;
1463 return ret;
1466 int whpx_enabled(void)
1468 return whpx_allowed;
1471 static void whpx_accel_class_init(ObjectClass *oc, void *data)
1473 AccelClass *ac = ACCEL_CLASS(oc);
1474 ac->name = "WHPX";
1475 ac->init_machine = whpx_accel_init;
1476 ac->allowed = &whpx_allowed;
1479 static const TypeInfo whpx_accel_type = {
1480 .name = ACCEL_CLASS_NAME("whpx"),
1481 .parent = TYPE_ACCEL,
1482 .class_init = whpx_accel_class_init,
1485 static void whpx_type_init(void)
1487 type_register_static(&whpx_accel_type);
1490 bool init_whp_dispatch(void)
1492 const char *lib_name;
1493 HMODULE hLib;
1495 if (whp_dispatch_initialized) {
1496 return true;
1499 #define WHP_LOAD_FIELD(return_type, function_name, signature) \
1500 whp_dispatch.function_name = \
1501 (function_name ## _t)GetProcAddress(hLib, #function_name); \
1502 if (!whp_dispatch.function_name) { \
1503 error_report("Could not load function %s from library %s.", \
1504 #function_name, lib_name); \
1505 goto error; \
1508 lib_name = "WinHvPlatform.dll";
1509 hWinHvPlatform = LoadLibrary(lib_name);
1510 if (!hWinHvPlatform) {
1511 error_report("Could not load library %s.", lib_name);
1512 goto error;
1514 hLib = hWinHvPlatform;
1515 LIST_WINHVPLATFORM_FUNCTIONS(WHP_LOAD_FIELD)
1517 lib_name = "WinHvEmulation.dll";
1518 hWinHvEmulation = LoadLibrary(lib_name);
1519 if (!hWinHvEmulation) {
1520 error_report("Could not load library %s.", lib_name);
1521 goto error;
1523 hLib = hWinHvEmulation;
1524 LIST_WINHVEMULATION_FUNCTIONS(WHP_LOAD_FIELD)
1526 whp_dispatch_initialized = true;
1527 return true;
1529 error:
1531 if (hWinHvPlatform) {
1532 FreeLibrary(hWinHvPlatform);
1534 if (hWinHvEmulation) {
1535 FreeLibrary(hWinHvEmulation);
1537 return false;
1540 type_init(whpx_type_init);