target/arm: Implement FMLAL and FMLSL for aarch64
[qemu/ar7.git] / target / i386 / whpx-all.c
blob57e53e1f1f40936a93dc1485543636358d56c68f
1 /*
2 * QEMU Windows Hypervisor Platform accelerator (WHPX)
4 * Copyright Microsoft Corp. 2017
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
9 */
11 #include "qemu/osdep.h"
12 #include "cpu.h"
13 #include "exec/address-spaces.h"
14 #include "exec/ioport.h"
15 #include "qemu-common.h"
16 #include "strings.h"
17 #include "sysemu/accel.h"
18 #include "sysemu/whpx.h"
19 #include "sysemu/sysemu.h"
20 #include "sysemu/cpus.h"
21 #include "qemu/main-loop.h"
22 #include "hw/boards.h"
23 #include "qemu/error-report.h"
24 #include "qemu/queue.h"
25 #include "qapi/error.h"
26 #include "migration/blocker.h"
27 #include "whp-dispatch.h"
29 #include <WinHvPlatform.h>
30 #include <WinHvEmulation.h>
32 struct whpx_state {
33 uint64_t mem_quota;
34 WHV_PARTITION_HANDLE partition;
37 static const WHV_REGISTER_NAME whpx_register_names[] = {
39 /* X64 General purpose registers */
40 WHvX64RegisterRax,
41 WHvX64RegisterRcx,
42 WHvX64RegisterRdx,
43 WHvX64RegisterRbx,
44 WHvX64RegisterRsp,
45 WHvX64RegisterRbp,
46 WHvX64RegisterRsi,
47 WHvX64RegisterRdi,
48 WHvX64RegisterR8,
49 WHvX64RegisterR9,
50 WHvX64RegisterR10,
51 WHvX64RegisterR11,
52 WHvX64RegisterR12,
53 WHvX64RegisterR13,
54 WHvX64RegisterR14,
55 WHvX64RegisterR15,
56 WHvX64RegisterRip,
57 WHvX64RegisterRflags,
59 /* X64 Segment registers */
60 WHvX64RegisterEs,
61 WHvX64RegisterCs,
62 WHvX64RegisterSs,
63 WHvX64RegisterDs,
64 WHvX64RegisterFs,
65 WHvX64RegisterGs,
66 WHvX64RegisterLdtr,
67 WHvX64RegisterTr,
69 /* X64 Table registers */
70 WHvX64RegisterIdtr,
71 WHvX64RegisterGdtr,
73 /* X64 Control Registers */
74 WHvX64RegisterCr0,
75 WHvX64RegisterCr2,
76 WHvX64RegisterCr3,
77 WHvX64RegisterCr4,
78 WHvX64RegisterCr8,
80 /* X64 Debug Registers */
82 * WHvX64RegisterDr0,
83 * WHvX64RegisterDr1,
84 * WHvX64RegisterDr2,
85 * WHvX64RegisterDr3,
86 * WHvX64RegisterDr6,
87 * WHvX64RegisterDr7,
90 /* X64 Floating Point and Vector Registers */
91 WHvX64RegisterXmm0,
92 WHvX64RegisterXmm1,
93 WHvX64RegisterXmm2,
94 WHvX64RegisterXmm3,
95 WHvX64RegisterXmm4,
96 WHvX64RegisterXmm5,
97 WHvX64RegisterXmm6,
98 WHvX64RegisterXmm7,
99 WHvX64RegisterXmm8,
100 WHvX64RegisterXmm9,
101 WHvX64RegisterXmm10,
102 WHvX64RegisterXmm11,
103 WHvX64RegisterXmm12,
104 WHvX64RegisterXmm13,
105 WHvX64RegisterXmm14,
106 WHvX64RegisterXmm15,
107 WHvX64RegisterFpMmx0,
108 WHvX64RegisterFpMmx1,
109 WHvX64RegisterFpMmx2,
110 WHvX64RegisterFpMmx3,
111 WHvX64RegisterFpMmx4,
112 WHvX64RegisterFpMmx5,
113 WHvX64RegisterFpMmx6,
114 WHvX64RegisterFpMmx7,
115 WHvX64RegisterFpControlStatus,
116 WHvX64RegisterXmmControlStatus,
118 /* X64 MSRs */
119 WHvX64RegisterTsc,
120 WHvX64RegisterEfer,
121 #ifdef TARGET_X86_64
122 WHvX64RegisterKernelGsBase,
123 #endif
124 WHvX64RegisterApicBase,
125 /* WHvX64RegisterPat, */
126 WHvX64RegisterSysenterCs,
127 WHvX64RegisterSysenterEip,
128 WHvX64RegisterSysenterEsp,
129 WHvX64RegisterStar,
130 #ifdef TARGET_X86_64
131 WHvX64RegisterLstar,
132 WHvX64RegisterCstar,
133 WHvX64RegisterSfmask,
134 #endif
136 /* Interrupt / Event Registers */
138 * WHvRegisterPendingInterruption,
139 * WHvRegisterInterruptState,
140 * WHvRegisterPendingEvent0,
141 * WHvRegisterPendingEvent1
142 * WHvX64RegisterDeliverabilityNotifications,
146 struct whpx_register_set {
147 WHV_REGISTER_VALUE values[RTL_NUMBER_OF(whpx_register_names)];
150 struct whpx_vcpu {
151 WHV_EMULATOR_HANDLE emulator;
152 bool window_registered;
153 bool interruptable;
154 uint64_t tpr;
155 uint64_t apic_base;
156 bool interruption_pending;
158 /* Must be the last field as it may have a tail */
159 WHV_RUN_VP_EXIT_CONTEXT exit_ctx;
162 static bool whpx_allowed;
163 static bool whp_dispatch_initialized;
164 static HMODULE hWinHvPlatform, hWinHvEmulation;
166 struct whpx_state whpx_global;
167 struct WHPDispatch whp_dispatch;
171 * VP support
174 static struct whpx_vcpu *get_whpx_vcpu(CPUState *cpu)
176 return (struct whpx_vcpu *)cpu->hax_vcpu;
179 static WHV_X64_SEGMENT_REGISTER whpx_seg_q2h(const SegmentCache *qs, int v86,
180 int r86)
182 WHV_X64_SEGMENT_REGISTER hs;
183 unsigned flags = qs->flags;
185 hs.Base = qs->base;
186 hs.Limit = qs->limit;
187 hs.Selector = qs->selector;
189 if (v86) {
190 hs.Attributes = 0;
191 hs.SegmentType = 3;
192 hs.Present = 1;
193 hs.DescriptorPrivilegeLevel = 3;
194 hs.NonSystemSegment = 1;
196 } else {
197 hs.Attributes = (flags >> DESC_TYPE_SHIFT);
199 if (r86) {
200 /* hs.Base &= 0xfffff; */
204 return hs;
207 static SegmentCache whpx_seg_h2q(const WHV_X64_SEGMENT_REGISTER *hs)
209 SegmentCache qs;
211 qs.base = hs->Base;
212 qs.limit = hs->Limit;
213 qs.selector = hs->Selector;
215 qs.flags = ((uint32_t)hs->Attributes) << DESC_TYPE_SHIFT;
217 return qs;
220 static void whpx_set_registers(CPUState *cpu)
222 struct whpx_state *whpx = &whpx_global;
223 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
224 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
225 X86CPU *x86_cpu = X86_CPU(cpu);
226 struct whpx_register_set vcxt;
227 HRESULT hr;
228 int idx;
229 int idx_next;
230 int i;
231 int v86, r86;
233 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
235 memset(&vcxt, 0, sizeof(struct whpx_register_set));
237 v86 = (env->eflags & VM_MASK);
238 r86 = !(env->cr[0] & CR0_PE_MASK);
240 vcpu->tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
241 vcpu->apic_base = cpu_get_apic_base(x86_cpu->apic_state);
243 idx = 0;
245 /* Indexes for first 16 registers match between HV and QEMU definitions */
246 idx_next = 16;
247 for (idx = 0; idx < CPU_NB_REGS; idx += 1) {
248 vcxt.values[idx].Reg64 = (uint64_t)env->regs[idx];
250 idx = idx_next;
252 /* Same goes for RIP and RFLAGS */
253 assert(whpx_register_names[idx] == WHvX64RegisterRip);
254 vcxt.values[idx++].Reg64 = env->eip;
256 assert(whpx_register_names[idx] == WHvX64RegisterRflags);
257 vcxt.values[idx++].Reg64 = env->eflags;
259 /* Translate 6+4 segment registers. HV and QEMU order matches */
260 assert(idx == WHvX64RegisterEs);
261 for (i = 0; i < 6; i += 1, idx += 1) {
262 vcxt.values[idx].Segment = whpx_seg_q2h(&env->segs[i], v86, r86);
265 assert(idx == WHvX64RegisterLdtr);
266 vcxt.values[idx++].Segment = whpx_seg_q2h(&env->ldt, 0, 0);
268 assert(idx == WHvX64RegisterTr);
269 vcxt.values[idx++].Segment = whpx_seg_q2h(&env->tr, 0, 0);
271 assert(idx == WHvX64RegisterIdtr);
272 vcxt.values[idx].Table.Base = env->idt.base;
273 vcxt.values[idx].Table.Limit = env->idt.limit;
274 idx += 1;
276 assert(idx == WHvX64RegisterGdtr);
277 vcxt.values[idx].Table.Base = env->gdt.base;
278 vcxt.values[idx].Table.Limit = env->gdt.limit;
279 idx += 1;
281 /* CR0, 2, 3, 4, 8 */
282 assert(whpx_register_names[idx] == WHvX64RegisterCr0);
283 vcxt.values[idx++].Reg64 = env->cr[0];
284 assert(whpx_register_names[idx] == WHvX64RegisterCr2);
285 vcxt.values[idx++].Reg64 = env->cr[2];
286 assert(whpx_register_names[idx] == WHvX64RegisterCr3);
287 vcxt.values[idx++].Reg64 = env->cr[3];
288 assert(whpx_register_names[idx] == WHvX64RegisterCr4);
289 vcxt.values[idx++].Reg64 = env->cr[4];
290 assert(whpx_register_names[idx] == WHvX64RegisterCr8);
291 vcxt.values[idx++].Reg64 = vcpu->tpr;
293 /* 8 Debug Registers - Skipped */
295 /* 16 XMM registers */
296 assert(whpx_register_names[idx] == WHvX64RegisterXmm0);
297 idx_next = idx + 16;
298 for (i = 0; i < sizeof(env->xmm_regs) / sizeof(ZMMReg); i += 1, idx += 1) {
299 vcxt.values[idx].Reg128.Low64 = env->xmm_regs[i].ZMM_Q(0);
300 vcxt.values[idx].Reg128.High64 = env->xmm_regs[i].ZMM_Q(1);
302 idx = idx_next;
304 /* 8 FP registers */
305 assert(whpx_register_names[idx] == WHvX64RegisterFpMmx0);
306 for (i = 0; i < 8; i += 1, idx += 1) {
307 vcxt.values[idx].Fp.AsUINT128.Low64 = env->fpregs[i].mmx.MMX_Q(0);
308 /* vcxt.values[idx].Fp.AsUINT128.High64 =
309 env->fpregs[i].mmx.MMX_Q(1);
313 /* FP control status register */
314 assert(whpx_register_names[idx] == WHvX64RegisterFpControlStatus);
315 vcxt.values[idx].FpControlStatus.FpControl = env->fpuc;
316 vcxt.values[idx].FpControlStatus.FpStatus =
317 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
318 vcxt.values[idx].FpControlStatus.FpTag = 0;
319 for (i = 0; i < 8; ++i) {
320 vcxt.values[idx].FpControlStatus.FpTag |= (!env->fptags[i]) << i;
322 vcxt.values[idx].FpControlStatus.Reserved = 0;
323 vcxt.values[idx].FpControlStatus.LastFpOp = env->fpop;
324 vcxt.values[idx].FpControlStatus.LastFpRip = env->fpip;
325 idx += 1;
327 /* XMM control status register */
328 assert(whpx_register_names[idx] == WHvX64RegisterXmmControlStatus);
329 vcxt.values[idx].XmmControlStatus.LastFpRdp = 0;
330 vcxt.values[idx].XmmControlStatus.XmmStatusControl = env->mxcsr;
331 vcxt.values[idx].XmmControlStatus.XmmStatusControlMask = 0x0000ffff;
332 idx += 1;
334 /* MSRs */
335 assert(whpx_register_names[idx] == WHvX64RegisterTsc);
336 vcxt.values[idx++].Reg64 = env->tsc;
337 assert(whpx_register_names[idx] == WHvX64RegisterEfer);
338 vcxt.values[idx++].Reg64 = env->efer;
339 #ifdef TARGET_X86_64
340 assert(whpx_register_names[idx] == WHvX64RegisterKernelGsBase);
341 vcxt.values[idx++].Reg64 = env->kernelgsbase;
342 #endif
344 assert(whpx_register_names[idx] == WHvX64RegisterApicBase);
345 vcxt.values[idx++].Reg64 = vcpu->apic_base;
347 /* WHvX64RegisterPat - Skipped */
349 assert(whpx_register_names[idx] == WHvX64RegisterSysenterCs);
350 vcxt.values[idx++].Reg64 = env->sysenter_cs;
351 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEip);
352 vcxt.values[idx++].Reg64 = env->sysenter_eip;
353 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEsp);
354 vcxt.values[idx++].Reg64 = env->sysenter_esp;
355 assert(whpx_register_names[idx] == WHvX64RegisterStar);
356 vcxt.values[idx++].Reg64 = env->star;
357 #ifdef TARGET_X86_64
358 assert(whpx_register_names[idx] == WHvX64RegisterLstar);
359 vcxt.values[idx++].Reg64 = env->lstar;
360 assert(whpx_register_names[idx] == WHvX64RegisterCstar);
361 vcxt.values[idx++].Reg64 = env->cstar;
362 assert(whpx_register_names[idx] == WHvX64RegisterSfmask);
363 vcxt.values[idx++].Reg64 = env->fmask;
364 #endif
366 /* Interrupt / Event Registers - Skipped */
368 assert(idx == RTL_NUMBER_OF(whpx_register_names));
370 hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
371 whpx->partition, cpu->cpu_index,
372 whpx_register_names,
373 RTL_NUMBER_OF(whpx_register_names),
374 &vcxt.values[0]);
376 if (FAILED(hr)) {
377 error_report("WHPX: Failed to set virtual processor context, hr=%08lx",
378 hr);
381 return;
384 static void whpx_get_registers(CPUState *cpu)
386 struct whpx_state *whpx = &whpx_global;
387 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
388 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
389 X86CPU *x86_cpu = X86_CPU(cpu);
390 struct whpx_register_set vcxt;
391 uint64_t tpr, apic_base;
392 HRESULT hr;
393 int idx;
394 int idx_next;
395 int i;
397 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
399 hr = whp_dispatch.WHvGetVirtualProcessorRegisters(
400 whpx->partition, cpu->cpu_index,
401 whpx_register_names,
402 RTL_NUMBER_OF(whpx_register_names),
403 &vcxt.values[0]);
404 if (FAILED(hr)) {
405 error_report("WHPX: Failed to get virtual processor context, hr=%08lx",
406 hr);
409 idx = 0;
411 /* Indexes for first 16 registers match between HV and QEMU definitions */
412 idx_next = 16;
413 for (idx = 0; idx < CPU_NB_REGS; idx += 1) {
414 env->regs[idx] = vcxt.values[idx].Reg64;
416 idx = idx_next;
418 /* Same goes for RIP and RFLAGS */
419 assert(whpx_register_names[idx] == WHvX64RegisterRip);
420 env->eip = vcxt.values[idx++].Reg64;
421 assert(whpx_register_names[idx] == WHvX64RegisterRflags);
422 env->eflags = vcxt.values[idx++].Reg64;
424 /* Translate 6+4 segment registers. HV and QEMU order matches */
425 assert(idx == WHvX64RegisterEs);
426 for (i = 0; i < 6; i += 1, idx += 1) {
427 env->segs[i] = whpx_seg_h2q(&vcxt.values[idx].Segment);
430 assert(idx == WHvX64RegisterLdtr);
431 env->ldt = whpx_seg_h2q(&vcxt.values[idx++].Segment);
432 assert(idx == WHvX64RegisterTr);
433 env->tr = whpx_seg_h2q(&vcxt.values[idx++].Segment);
434 assert(idx == WHvX64RegisterIdtr);
435 env->idt.base = vcxt.values[idx].Table.Base;
436 env->idt.limit = vcxt.values[idx].Table.Limit;
437 idx += 1;
438 assert(idx == WHvX64RegisterGdtr);
439 env->gdt.base = vcxt.values[idx].Table.Base;
440 env->gdt.limit = vcxt.values[idx].Table.Limit;
441 idx += 1;
443 /* CR0, 2, 3, 4, 8 */
444 assert(whpx_register_names[idx] == WHvX64RegisterCr0);
445 env->cr[0] = vcxt.values[idx++].Reg64;
446 assert(whpx_register_names[idx] == WHvX64RegisterCr2);
447 env->cr[2] = vcxt.values[idx++].Reg64;
448 assert(whpx_register_names[idx] == WHvX64RegisterCr3);
449 env->cr[3] = vcxt.values[idx++].Reg64;
450 assert(whpx_register_names[idx] == WHvX64RegisterCr4);
451 env->cr[4] = vcxt.values[idx++].Reg64;
452 assert(whpx_register_names[idx] == WHvX64RegisterCr8);
453 tpr = vcxt.values[idx++].Reg64;
454 if (tpr != vcpu->tpr) {
455 vcpu->tpr = tpr;
456 cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
459 /* 8 Debug Registers - Skipped */
461 /* 16 XMM registers */
462 assert(whpx_register_names[idx] == WHvX64RegisterXmm0);
463 idx_next = idx + 16;
464 for (i = 0; i < sizeof(env->xmm_regs) / sizeof(ZMMReg); i += 1, idx += 1) {
465 env->xmm_regs[i].ZMM_Q(0) = vcxt.values[idx].Reg128.Low64;
466 env->xmm_regs[i].ZMM_Q(1) = vcxt.values[idx].Reg128.High64;
468 idx = idx_next;
470 /* 8 FP registers */
471 assert(whpx_register_names[idx] == WHvX64RegisterFpMmx0);
472 for (i = 0; i < 8; i += 1, idx += 1) {
473 env->fpregs[i].mmx.MMX_Q(0) = vcxt.values[idx].Fp.AsUINT128.Low64;
474 /* env->fpregs[i].mmx.MMX_Q(1) =
475 vcxt.values[idx].Fp.AsUINT128.High64;
479 /* FP control status register */
480 assert(whpx_register_names[idx] == WHvX64RegisterFpControlStatus);
481 env->fpuc = vcxt.values[idx].FpControlStatus.FpControl;
482 env->fpstt = (vcxt.values[idx].FpControlStatus.FpStatus >> 11) & 0x7;
483 env->fpus = vcxt.values[idx].FpControlStatus.FpStatus & ~0x3800;
484 for (i = 0; i < 8; ++i) {
485 env->fptags[i] = !((vcxt.values[idx].FpControlStatus.FpTag >> i) & 1);
487 env->fpop = vcxt.values[idx].FpControlStatus.LastFpOp;
488 env->fpip = vcxt.values[idx].FpControlStatus.LastFpRip;
489 idx += 1;
491 /* XMM control status register */
492 assert(whpx_register_names[idx] == WHvX64RegisterXmmControlStatus);
493 env->mxcsr = vcxt.values[idx].XmmControlStatus.XmmStatusControl;
494 idx += 1;
496 /* MSRs */
497 assert(whpx_register_names[idx] == WHvX64RegisterTsc);
498 env->tsc = vcxt.values[idx++].Reg64;
499 assert(whpx_register_names[idx] == WHvX64RegisterEfer);
500 env->efer = vcxt.values[idx++].Reg64;
501 #ifdef TARGET_X86_64
502 assert(whpx_register_names[idx] == WHvX64RegisterKernelGsBase);
503 env->kernelgsbase = vcxt.values[idx++].Reg64;
504 #endif
506 assert(whpx_register_names[idx] == WHvX64RegisterApicBase);
507 apic_base = vcxt.values[idx++].Reg64;
508 if (apic_base != vcpu->apic_base) {
509 vcpu->apic_base = apic_base;
510 cpu_set_apic_base(x86_cpu->apic_state, vcpu->apic_base);
513 /* WHvX64RegisterPat - Skipped */
515 assert(whpx_register_names[idx] == WHvX64RegisterSysenterCs);
516 env->sysenter_cs = vcxt.values[idx++].Reg64;;
517 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEip);
518 env->sysenter_eip = vcxt.values[idx++].Reg64;
519 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEsp);
520 env->sysenter_esp = vcxt.values[idx++].Reg64;
521 assert(whpx_register_names[idx] == WHvX64RegisterStar);
522 env->star = vcxt.values[idx++].Reg64;
523 #ifdef TARGET_X86_64
524 assert(whpx_register_names[idx] == WHvX64RegisterLstar);
525 env->lstar = vcxt.values[idx++].Reg64;
526 assert(whpx_register_names[idx] == WHvX64RegisterCstar);
527 env->cstar = vcxt.values[idx++].Reg64;
528 assert(whpx_register_names[idx] == WHvX64RegisterSfmask);
529 env->fmask = vcxt.values[idx++].Reg64;
530 #endif
532 /* Interrupt / Event Registers - Skipped */
534 assert(idx == RTL_NUMBER_OF(whpx_register_names));
536 return;
539 static HRESULT CALLBACK whpx_emu_ioport_callback(
540 void *ctx,
541 WHV_EMULATOR_IO_ACCESS_INFO *IoAccess)
543 MemTxAttrs attrs = { 0 };
544 address_space_rw(&address_space_io, IoAccess->Port, attrs,
545 (uint8_t *)&IoAccess->Data, IoAccess->AccessSize,
546 IoAccess->Direction);
547 return S_OK;
550 static HRESULT CALLBACK whpx_emu_mmio_callback(
551 void *ctx,
552 WHV_EMULATOR_MEMORY_ACCESS_INFO *ma)
554 cpu_physical_memory_rw(ma->GpaAddress, ma->Data, ma->AccessSize,
555 ma->Direction);
556 return S_OK;
559 static HRESULT CALLBACK whpx_emu_getreg_callback(
560 void *ctx,
561 const WHV_REGISTER_NAME *RegisterNames,
562 UINT32 RegisterCount,
563 WHV_REGISTER_VALUE *RegisterValues)
565 HRESULT hr;
566 struct whpx_state *whpx = &whpx_global;
567 CPUState *cpu = (CPUState *)ctx;
569 hr = whp_dispatch.WHvGetVirtualProcessorRegisters(
570 whpx->partition, cpu->cpu_index,
571 RegisterNames, RegisterCount,
572 RegisterValues);
573 if (FAILED(hr)) {
574 error_report("WHPX: Failed to get virtual processor registers,"
575 " hr=%08lx", hr);
578 return hr;
581 static HRESULT CALLBACK whpx_emu_setreg_callback(
582 void *ctx,
583 const WHV_REGISTER_NAME *RegisterNames,
584 UINT32 RegisterCount,
585 const WHV_REGISTER_VALUE *RegisterValues)
587 HRESULT hr;
588 struct whpx_state *whpx = &whpx_global;
589 CPUState *cpu = (CPUState *)ctx;
591 hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
592 whpx->partition, cpu->cpu_index,
593 RegisterNames, RegisterCount,
594 RegisterValues);
595 if (FAILED(hr)) {
596 error_report("WHPX: Failed to set virtual processor registers,"
597 " hr=%08lx", hr);
601 * The emulator just successfully wrote the register state. We clear the
602 * dirty state so we avoid the double write on resume of the VP.
604 cpu->vcpu_dirty = false;
606 return hr;
609 static HRESULT CALLBACK whpx_emu_translate_callback(
610 void *ctx,
611 WHV_GUEST_VIRTUAL_ADDRESS Gva,
612 WHV_TRANSLATE_GVA_FLAGS TranslateFlags,
613 WHV_TRANSLATE_GVA_RESULT_CODE *TranslationResult,
614 WHV_GUEST_PHYSICAL_ADDRESS *Gpa)
616 HRESULT hr;
617 struct whpx_state *whpx = &whpx_global;
618 CPUState *cpu = (CPUState *)ctx;
619 WHV_TRANSLATE_GVA_RESULT res;
621 hr = whp_dispatch.WHvTranslateGva(whpx->partition, cpu->cpu_index,
622 Gva, TranslateFlags, &res, Gpa);
623 if (FAILED(hr)) {
624 error_report("WHPX: Failed to translate GVA, hr=%08lx", hr);
625 } else {
626 *TranslationResult = res.ResultCode;
629 return hr;
632 static const WHV_EMULATOR_CALLBACKS whpx_emu_callbacks = {
633 .Size = sizeof(WHV_EMULATOR_CALLBACKS),
634 .WHvEmulatorIoPortCallback = whpx_emu_ioport_callback,
635 .WHvEmulatorMemoryCallback = whpx_emu_mmio_callback,
636 .WHvEmulatorGetVirtualProcessorRegisters = whpx_emu_getreg_callback,
637 .WHvEmulatorSetVirtualProcessorRegisters = whpx_emu_setreg_callback,
638 .WHvEmulatorTranslateGvaPage = whpx_emu_translate_callback,
641 static int whpx_handle_mmio(CPUState *cpu, WHV_MEMORY_ACCESS_CONTEXT *ctx)
643 HRESULT hr;
644 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
645 WHV_EMULATOR_STATUS emu_status;
647 hr = whp_dispatch.WHvEmulatorTryMmioEmulation(
648 vcpu->emulator, cpu,
649 &vcpu->exit_ctx.VpContext, ctx,
650 &emu_status);
651 if (FAILED(hr)) {
652 error_report("WHPX: Failed to parse MMIO access, hr=%08lx", hr);
653 return -1;
656 if (!emu_status.EmulationSuccessful) {
657 error_report("WHPX: Failed to emulate MMIO access with"
658 " EmulatorReturnStatus: %u", emu_status.AsUINT32);
659 return -1;
662 return 0;
665 static int whpx_handle_portio(CPUState *cpu,
666 WHV_X64_IO_PORT_ACCESS_CONTEXT *ctx)
668 HRESULT hr;
669 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
670 WHV_EMULATOR_STATUS emu_status;
672 hr = whp_dispatch.WHvEmulatorTryIoEmulation(
673 vcpu->emulator, cpu,
674 &vcpu->exit_ctx.VpContext, ctx,
675 &emu_status);
676 if (FAILED(hr)) {
677 error_report("WHPX: Failed to parse PortIO access, hr=%08lx", hr);
678 return -1;
681 if (!emu_status.EmulationSuccessful) {
682 error_report("WHPX: Failed to emulate PortIO access with"
683 " EmulatorReturnStatus: %u", emu_status.AsUINT32);
684 return -1;
687 return 0;
690 static int whpx_handle_halt(CPUState *cpu)
692 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
693 int ret = 0;
695 qemu_mutex_lock_iothread();
696 if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
697 (env->eflags & IF_MASK)) &&
698 !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
699 cpu->exception_index = EXCP_HLT;
700 cpu->halted = true;
701 ret = 1;
703 qemu_mutex_unlock_iothread();
705 return ret;
708 static void whpx_vcpu_pre_run(CPUState *cpu)
710 HRESULT hr;
711 struct whpx_state *whpx = &whpx_global;
712 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
713 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
714 X86CPU *x86_cpu = X86_CPU(cpu);
715 int irq;
716 uint8_t tpr;
717 WHV_X64_PENDING_INTERRUPTION_REGISTER new_int;
718 UINT32 reg_count = 0;
719 WHV_REGISTER_VALUE reg_values[3];
720 WHV_REGISTER_NAME reg_names[3];
722 memset(&new_int, 0, sizeof(new_int));
723 memset(reg_values, 0, sizeof(reg_values));
725 qemu_mutex_lock_iothread();
727 /* Inject NMI */
728 if (!vcpu->interruption_pending &&
729 cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
730 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
731 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
732 vcpu->interruptable = false;
733 new_int.InterruptionType = WHvX64PendingNmi;
734 new_int.InterruptionPending = 1;
735 new_int.InterruptionVector = 2;
737 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
738 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
743 * Force the VCPU out of its inner loop to process any INIT requests or
744 * commit pending TPR access.
746 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
747 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
748 !(env->hflags & HF_SMM_MASK)) {
749 cpu->exit_request = 1;
751 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
752 cpu->exit_request = 1;
756 /* Get pending hard interruption or replay one that was overwritten */
757 if (!vcpu->interruption_pending &&
758 vcpu->interruptable && (env->eflags & IF_MASK)) {
759 assert(!new_int.InterruptionPending);
760 if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
761 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
762 irq = cpu_get_pic_interrupt(env);
763 if (irq >= 0) {
764 new_int.InterruptionType = WHvX64PendingInterrupt;
765 new_int.InterruptionPending = 1;
766 new_int.InterruptionVector = irq;
771 /* Setup interrupt state if new one was prepared */
772 if (new_int.InterruptionPending) {
773 reg_values[reg_count].PendingInterruption = new_int;
774 reg_names[reg_count] = WHvRegisterPendingInterruption;
775 reg_count += 1;
778 /* Sync the TPR to the CR8 if was modified during the intercept */
779 tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
780 if (tpr != vcpu->tpr) {
781 vcpu->tpr = tpr;
782 reg_values[reg_count].Reg64 = tpr;
783 cpu->exit_request = 1;
784 reg_names[reg_count] = WHvX64RegisterCr8;
785 reg_count += 1;
788 /* Update the state of the interrupt delivery notification */
789 if (!vcpu->window_registered &&
790 cpu->interrupt_request & CPU_INTERRUPT_HARD) {
791 reg_values[reg_count].DeliverabilityNotifications.InterruptNotification
792 = 1;
793 vcpu->window_registered = 1;
794 reg_names[reg_count] = WHvX64RegisterDeliverabilityNotifications;
795 reg_count += 1;
798 qemu_mutex_unlock_iothread();
800 if (reg_count) {
801 hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
802 whpx->partition, cpu->cpu_index,
803 reg_names, reg_count, reg_values);
804 if (FAILED(hr)) {
805 error_report("WHPX: Failed to set interrupt state registers,"
806 " hr=%08lx", hr);
810 return;
813 static void whpx_vcpu_post_run(CPUState *cpu)
815 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
816 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
817 X86CPU *x86_cpu = X86_CPU(cpu);
819 env->eflags = vcpu->exit_ctx.VpContext.Rflags;
821 uint64_t tpr = vcpu->exit_ctx.VpContext.Cr8;
822 if (vcpu->tpr != tpr) {
823 vcpu->tpr = tpr;
824 qemu_mutex_lock_iothread();
825 cpu_set_apic_tpr(x86_cpu->apic_state, vcpu->tpr);
826 qemu_mutex_unlock_iothread();
829 vcpu->interruption_pending =
830 vcpu->exit_ctx.VpContext.ExecutionState.InterruptionPending;
832 vcpu->interruptable =
833 !vcpu->exit_ctx.VpContext.ExecutionState.InterruptShadow;
835 return;
838 static void whpx_vcpu_process_async_events(CPUState *cpu)
840 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
841 X86CPU *x86_cpu = X86_CPU(cpu);
842 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
844 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
845 !(env->hflags & HF_SMM_MASK)) {
847 do_cpu_init(x86_cpu);
848 cpu->vcpu_dirty = true;
849 vcpu->interruptable = true;
852 if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
853 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
854 apic_poll_irq(x86_cpu->apic_state);
857 if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
858 (env->eflags & IF_MASK)) ||
859 (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
860 cpu->halted = false;
863 if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
864 if (!cpu->vcpu_dirty) {
865 whpx_get_registers(cpu);
867 do_cpu_sipi(x86_cpu);
870 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
871 cpu->interrupt_request &= ~CPU_INTERRUPT_TPR;
872 if (!cpu->vcpu_dirty) {
873 whpx_get_registers(cpu);
875 apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip,
876 env->tpr_access_type);
879 return;
882 static int whpx_vcpu_run(CPUState *cpu)
884 HRESULT hr;
885 struct whpx_state *whpx = &whpx_global;
886 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
887 int ret;
889 whpx_vcpu_process_async_events(cpu);
890 if (cpu->halted) {
891 cpu->exception_index = EXCP_HLT;
892 atomic_set(&cpu->exit_request, false);
893 return 0;
896 qemu_mutex_unlock_iothread();
897 cpu_exec_start(cpu);
899 do {
900 if (cpu->vcpu_dirty) {
901 whpx_set_registers(cpu);
902 cpu->vcpu_dirty = false;
905 whpx_vcpu_pre_run(cpu);
907 if (atomic_read(&cpu->exit_request)) {
908 whpx_vcpu_kick(cpu);
911 hr = whp_dispatch.WHvRunVirtualProcessor(
912 whpx->partition, cpu->cpu_index,
913 &vcpu->exit_ctx, sizeof(vcpu->exit_ctx));
915 if (FAILED(hr)) {
916 error_report("WHPX: Failed to exec a virtual processor,"
917 " hr=%08lx", hr);
918 ret = -1;
919 break;
922 whpx_vcpu_post_run(cpu);
924 switch (vcpu->exit_ctx.ExitReason) {
925 case WHvRunVpExitReasonMemoryAccess:
926 ret = whpx_handle_mmio(cpu, &vcpu->exit_ctx.MemoryAccess);
927 break;
929 case WHvRunVpExitReasonX64IoPortAccess:
930 ret = whpx_handle_portio(cpu, &vcpu->exit_ctx.IoPortAccess);
931 break;
933 case WHvRunVpExitReasonX64InterruptWindow:
934 vcpu->window_registered = 0;
935 ret = 0;
936 break;
938 case WHvRunVpExitReasonX64Halt:
939 ret = whpx_handle_halt(cpu);
940 break;
942 case WHvRunVpExitReasonCanceled:
943 cpu->exception_index = EXCP_INTERRUPT;
944 ret = 1;
945 break;
947 case WHvRunVpExitReasonX64MsrAccess: {
948 WHV_REGISTER_VALUE reg_values[3] = {0};
949 WHV_REGISTER_NAME reg_names[3];
950 UINT32 reg_count;
952 reg_names[0] = WHvX64RegisterRip;
953 reg_names[1] = WHvX64RegisterRax;
954 reg_names[2] = WHvX64RegisterRdx;
956 reg_values[0].Reg64 =
957 vcpu->exit_ctx.VpContext.Rip +
958 vcpu->exit_ctx.VpContext.InstructionLength;
961 * For all unsupported MSR access we:
962 * ignore writes
963 * return 0 on read.
965 reg_count = vcpu->exit_ctx.MsrAccess.AccessInfo.IsWrite ?
966 1 : 3;
968 hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
969 whpx->partition,
970 cpu->cpu_index,
971 reg_names, reg_count,
972 reg_values);
974 if (FAILED(hr)) {
975 error_report("WHPX: Failed to set MsrAccess state "
976 " registers, hr=%08lx", hr);
978 ret = 0;
979 break;
981 case WHvRunVpExitReasonX64Cpuid: {
982 WHV_REGISTER_VALUE reg_values[5];
983 WHV_REGISTER_NAME reg_names[5];
984 UINT32 reg_count = 5;
985 UINT64 rip, rax, rcx, rdx, rbx;
987 memset(reg_values, 0, sizeof(reg_values));
989 rip = vcpu->exit_ctx.VpContext.Rip +
990 vcpu->exit_ctx.VpContext.InstructionLength;
991 switch (vcpu->exit_ctx.CpuidAccess.Rax) {
992 case 1:
993 rax = vcpu->exit_ctx.CpuidAccess.DefaultResultRax;
994 /* Advertise that we are running on a hypervisor */
995 rcx =
996 vcpu->exit_ctx.CpuidAccess.DefaultResultRcx |
997 CPUID_EXT_HYPERVISOR;
999 rdx = vcpu->exit_ctx.CpuidAccess.DefaultResultRdx;
1000 rbx = vcpu->exit_ctx.CpuidAccess.DefaultResultRbx;
1001 break;
1002 case 0x80000001:
1003 rax = vcpu->exit_ctx.CpuidAccess.DefaultResultRax;
1004 /* Remove any support of OSVW */
1005 rcx =
1006 vcpu->exit_ctx.CpuidAccess.DefaultResultRcx &
1007 ~CPUID_EXT3_OSVW;
1009 rdx = vcpu->exit_ctx.CpuidAccess.DefaultResultRdx;
1010 rbx = vcpu->exit_ctx.CpuidAccess.DefaultResultRbx;
1011 break;
1012 default:
1013 rax = vcpu->exit_ctx.CpuidAccess.DefaultResultRax;
1014 rcx = vcpu->exit_ctx.CpuidAccess.DefaultResultRcx;
1015 rdx = vcpu->exit_ctx.CpuidAccess.DefaultResultRdx;
1016 rbx = vcpu->exit_ctx.CpuidAccess.DefaultResultRbx;
1019 reg_names[0] = WHvX64RegisterRip;
1020 reg_names[1] = WHvX64RegisterRax;
1021 reg_names[2] = WHvX64RegisterRcx;
1022 reg_names[3] = WHvX64RegisterRdx;
1023 reg_names[4] = WHvX64RegisterRbx;
1025 reg_values[0].Reg64 = rip;
1026 reg_values[1].Reg64 = rax;
1027 reg_values[2].Reg64 = rcx;
1028 reg_values[3].Reg64 = rdx;
1029 reg_values[4].Reg64 = rbx;
1031 hr = whp_dispatch.WHvSetVirtualProcessorRegisters(
1032 whpx->partition, cpu->cpu_index,
1033 reg_names,
1034 reg_count,
1035 reg_values);
1037 if (FAILED(hr)) {
1038 error_report("WHPX: Failed to set CpuidAccess state registers,"
1039 " hr=%08lx", hr);
1041 ret = 0;
1042 break;
1044 case WHvRunVpExitReasonNone:
1045 case WHvRunVpExitReasonUnrecoverableException:
1046 case WHvRunVpExitReasonInvalidVpRegisterValue:
1047 case WHvRunVpExitReasonUnsupportedFeature:
1048 case WHvRunVpExitReasonException:
1049 default:
1050 error_report("WHPX: Unexpected VP exit code %d",
1051 vcpu->exit_ctx.ExitReason);
1052 whpx_get_registers(cpu);
1053 qemu_mutex_lock_iothread();
1054 qemu_system_guest_panicked(cpu_get_crash_info(cpu));
1055 qemu_mutex_unlock_iothread();
1056 break;
1059 } while (!ret);
1061 cpu_exec_end(cpu);
1062 qemu_mutex_lock_iothread();
1063 current_cpu = cpu;
1065 atomic_set(&cpu->exit_request, false);
1067 return ret < 0;
1070 static void do_whpx_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
1072 whpx_get_registers(cpu);
1073 cpu->vcpu_dirty = true;
1076 static void do_whpx_cpu_synchronize_post_reset(CPUState *cpu,
1077 run_on_cpu_data arg)
1079 whpx_set_registers(cpu);
1080 cpu->vcpu_dirty = false;
1083 static void do_whpx_cpu_synchronize_post_init(CPUState *cpu,
1084 run_on_cpu_data arg)
1086 whpx_set_registers(cpu);
1087 cpu->vcpu_dirty = false;
1090 static void do_whpx_cpu_synchronize_pre_loadvm(CPUState *cpu,
1091 run_on_cpu_data arg)
1093 cpu->vcpu_dirty = true;
1097 * CPU support.
1100 void whpx_cpu_synchronize_state(CPUState *cpu)
1102 if (!cpu->vcpu_dirty) {
1103 run_on_cpu(cpu, do_whpx_cpu_synchronize_state, RUN_ON_CPU_NULL);
1107 void whpx_cpu_synchronize_post_reset(CPUState *cpu)
1109 run_on_cpu(cpu, do_whpx_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
1112 void whpx_cpu_synchronize_post_init(CPUState *cpu)
1114 run_on_cpu(cpu, do_whpx_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
1117 void whpx_cpu_synchronize_pre_loadvm(CPUState *cpu)
1119 run_on_cpu(cpu, do_whpx_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
1123 * Vcpu support.
1126 static Error *whpx_migration_blocker;
1128 int whpx_init_vcpu(CPUState *cpu)
1130 HRESULT hr;
1131 struct whpx_state *whpx = &whpx_global;
1132 struct whpx_vcpu *vcpu;
1133 Error *local_error = NULL;
1135 /* Add migration blockers for all unsupported features of the
1136 * Windows Hypervisor Platform
1138 if (whpx_migration_blocker == NULL) {
1139 error_setg(&whpx_migration_blocker,
1140 "State blocked due to non-migratable CPUID feature support,"
1141 "dirty memory tracking support, and XSAVE/XRSTOR support");
1143 (void)migrate_add_blocker(whpx_migration_blocker, &local_error);
1144 if (local_error) {
1145 error_report_err(local_error);
1146 migrate_del_blocker(whpx_migration_blocker);
1147 error_free(whpx_migration_blocker);
1148 return -EINVAL;
1152 vcpu = g_malloc0(sizeof(struct whpx_vcpu));
1154 if (!vcpu) {
1155 error_report("WHPX: Failed to allocte VCPU context.");
1156 return -ENOMEM;
1159 hr = whp_dispatch.WHvEmulatorCreateEmulator(
1160 &whpx_emu_callbacks,
1161 &vcpu->emulator);
1162 if (FAILED(hr)) {
1163 error_report("WHPX: Failed to setup instruction completion support,"
1164 " hr=%08lx", hr);
1165 g_free(vcpu);
1166 return -EINVAL;
1169 hr = whp_dispatch.WHvCreateVirtualProcessor(
1170 whpx->partition, cpu->cpu_index, 0);
1171 if (FAILED(hr)) {
1172 error_report("WHPX: Failed to create a virtual processor,"
1173 " hr=%08lx", hr);
1174 whp_dispatch.WHvEmulatorDestroyEmulator(vcpu->emulator);
1175 g_free(vcpu);
1176 return -EINVAL;
1179 vcpu->interruptable = true;
1181 cpu->vcpu_dirty = true;
1182 cpu->hax_vcpu = (struct hax_vcpu_state *)vcpu;
1184 return 0;
1187 int whpx_vcpu_exec(CPUState *cpu)
1189 int ret;
1190 int fatal;
1192 for (;;) {
1193 if (cpu->exception_index >= EXCP_INTERRUPT) {
1194 ret = cpu->exception_index;
1195 cpu->exception_index = -1;
1196 break;
1199 fatal = whpx_vcpu_run(cpu);
1201 if (fatal) {
1202 error_report("WHPX: Failed to exec a virtual processor");
1203 abort();
1207 return ret;
1210 void whpx_destroy_vcpu(CPUState *cpu)
1212 struct whpx_state *whpx = &whpx_global;
1213 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
1215 whp_dispatch.WHvDeleteVirtualProcessor(whpx->partition, cpu->cpu_index);
1216 whp_dispatch.WHvEmulatorDestroyEmulator(vcpu->emulator);
1217 g_free(cpu->hax_vcpu);
1218 return;
1221 void whpx_vcpu_kick(CPUState *cpu)
1223 struct whpx_state *whpx = &whpx_global;
1224 whp_dispatch.WHvCancelRunVirtualProcessor(
1225 whpx->partition, cpu->cpu_index, 0);
1229 * Memory support.
1232 static void whpx_update_mapping(hwaddr start_pa, ram_addr_t size,
1233 void *host_va, int add, int rom,
1234 const char *name)
1236 struct whpx_state *whpx = &whpx_global;
1237 HRESULT hr;
1240 if (add) {
1241 printf("WHPX: ADD PA:%p Size:%p, Host:%p, %s, '%s'\n",
1242 (void*)start_pa, (void*)size, host_va,
1243 (rom ? "ROM" : "RAM"), name);
1244 } else {
1245 printf("WHPX: DEL PA:%p Size:%p, Host:%p, '%s'\n",
1246 (void*)start_pa, (void*)size, host_va, name);
1250 if (add) {
1251 hr = whp_dispatch.WHvMapGpaRange(whpx->partition,
1252 host_va,
1253 start_pa,
1254 size,
1255 (WHvMapGpaRangeFlagRead |
1256 WHvMapGpaRangeFlagExecute |
1257 (rom ? 0 : WHvMapGpaRangeFlagWrite)));
1258 } else {
1259 hr = whp_dispatch.WHvUnmapGpaRange(whpx->partition,
1260 start_pa,
1261 size);
1264 if (FAILED(hr)) {
1265 error_report("WHPX: Failed to %s GPA range '%s' PA:%p, Size:%p bytes,"
1266 " Host:%p, hr=%08lx",
1267 (add ? "MAP" : "UNMAP"), name,
1268 (void *)(uintptr_t)start_pa, (void *)size, host_va, hr);
1272 static void whpx_process_section(MemoryRegionSection *section, int add)
1274 MemoryRegion *mr = section->mr;
1275 hwaddr start_pa = section->offset_within_address_space;
1276 ram_addr_t size = int128_get64(section->size);
1277 unsigned int delta;
1278 uint64_t host_va;
1280 if (!memory_region_is_ram(mr)) {
1281 return;
1284 delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask);
1285 delta &= ~qemu_real_host_page_mask;
1286 if (delta > size) {
1287 return;
1289 start_pa += delta;
1290 size -= delta;
1291 size &= qemu_real_host_page_mask;
1292 if (!size || (start_pa & ~qemu_real_host_page_mask)) {
1293 return;
1296 host_va = (uintptr_t)memory_region_get_ram_ptr(mr)
1297 + section->offset_within_region + delta;
1299 whpx_update_mapping(start_pa, size, (void *)(uintptr_t)host_va, add,
1300 memory_region_is_rom(mr), mr->name);
1303 static void whpx_region_add(MemoryListener *listener,
1304 MemoryRegionSection *section)
1306 memory_region_ref(section->mr);
1307 whpx_process_section(section, 1);
1310 static void whpx_region_del(MemoryListener *listener,
1311 MemoryRegionSection *section)
1313 whpx_process_section(section, 0);
1314 memory_region_unref(section->mr);
1317 static void whpx_transaction_begin(MemoryListener *listener)
1321 static void whpx_transaction_commit(MemoryListener *listener)
1325 static void whpx_log_sync(MemoryListener *listener,
1326 MemoryRegionSection *section)
1328 MemoryRegion *mr = section->mr;
1330 if (!memory_region_is_ram(mr)) {
1331 return;
1334 memory_region_set_dirty(mr, 0, int128_get64(section->size));
1337 static MemoryListener whpx_memory_listener = {
1338 .begin = whpx_transaction_begin,
1339 .commit = whpx_transaction_commit,
1340 .region_add = whpx_region_add,
1341 .region_del = whpx_region_del,
1342 .log_sync = whpx_log_sync,
1343 .priority = 10,
1346 static void whpx_memory_init(void)
1348 memory_listener_register(&whpx_memory_listener, &address_space_memory);
1351 static void whpx_handle_interrupt(CPUState *cpu, int mask)
1353 cpu->interrupt_request |= mask;
1355 if (!qemu_cpu_is_self(cpu)) {
1356 qemu_cpu_kick(cpu);
1361 * Partition support
1364 static int whpx_accel_init(MachineState *ms)
1366 struct whpx_state *whpx;
1367 int ret;
1368 HRESULT hr;
1369 WHV_CAPABILITY whpx_cap;
1370 UINT32 whpx_cap_size;
1371 WHV_PARTITION_PROPERTY prop;
1373 whpx = &whpx_global;
1375 if (!init_whp_dispatch()) {
1376 ret = -ENOSYS;
1377 goto error;
1380 memset(whpx, 0, sizeof(struct whpx_state));
1381 whpx->mem_quota = ms->ram_size;
1383 hr = whp_dispatch.WHvGetCapability(
1384 WHvCapabilityCodeHypervisorPresent, &whpx_cap,
1385 sizeof(whpx_cap), &whpx_cap_size);
1386 if (FAILED(hr) || !whpx_cap.HypervisorPresent) {
1387 error_report("WHPX: No accelerator found, hr=%08lx", hr);
1388 ret = -ENOSPC;
1389 goto error;
1392 hr = whp_dispatch.WHvCreatePartition(&whpx->partition);
1393 if (FAILED(hr)) {
1394 error_report("WHPX: Failed to create partition, hr=%08lx", hr);
1395 ret = -EINVAL;
1396 goto error;
1399 memset(&prop, 0, sizeof(WHV_PARTITION_PROPERTY));
1400 prop.ProcessorCount = smp_cpus;
1401 hr = whp_dispatch.WHvSetPartitionProperty(
1402 whpx->partition,
1403 WHvPartitionPropertyCodeProcessorCount,
1404 &prop,
1405 sizeof(WHV_PARTITION_PROPERTY));
1407 if (FAILED(hr)) {
1408 error_report("WHPX: Failed to set partition core count to %d,"
1409 " hr=%08lx", smp_cores, hr);
1410 ret = -EINVAL;
1411 goto error;
1414 memset(&prop, 0, sizeof(WHV_PARTITION_PROPERTY));
1415 prop.ExtendedVmExits.X64MsrExit = 1;
1416 prop.ExtendedVmExits.X64CpuidExit = 1;
1417 hr = whp_dispatch.WHvSetPartitionProperty(
1418 whpx->partition,
1419 WHvPartitionPropertyCodeExtendedVmExits,
1420 &prop,
1421 sizeof(WHV_PARTITION_PROPERTY));
1423 if (FAILED(hr)) {
1424 error_report("WHPX: Failed to enable partition extended X64MsrExit and"
1425 " X64CpuidExit hr=%08lx", hr);
1426 ret = -EINVAL;
1427 goto error;
1430 UINT32 cpuidExitList[] = {1, 0x80000001};
1431 hr = whp_dispatch.WHvSetPartitionProperty(
1432 whpx->partition,
1433 WHvPartitionPropertyCodeCpuidExitList,
1434 cpuidExitList,
1435 RTL_NUMBER_OF(cpuidExitList) * sizeof(UINT32));
1437 if (FAILED(hr)) {
1438 error_report("WHPX: Failed to set partition CpuidExitList hr=%08lx",
1439 hr);
1440 ret = -EINVAL;
1441 goto error;
1444 hr = whp_dispatch.WHvSetupPartition(whpx->partition);
1445 if (FAILED(hr)) {
1446 error_report("WHPX: Failed to setup partition, hr=%08lx", hr);
1447 ret = -EINVAL;
1448 goto error;
1451 whpx_memory_init();
1453 cpu_interrupt_handler = whpx_handle_interrupt;
1455 printf("Windows Hypervisor Platform accelerator is operational\n");
1456 return 0;
1458 error:
1460 if (NULL != whpx->partition) {
1461 whp_dispatch.WHvDeletePartition(whpx->partition);
1462 whpx->partition = NULL;
1466 return ret;
1469 int whpx_enabled(void)
1471 return whpx_allowed;
1474 static void whpx_accel_class_init(ObjectClass *oc, void *data)
1476 AccelClass *ac = ACCEL_CLASS(oc);
1477 ac->name = "WHPX";
1478 ac->init_machine = whpx_accel_init;
1479 ac->allowed = &whpx_allowed;
1482 static const TypeInfo whpx_accel_type = {
1483 .name = ACCEL_CLASS_NAME("whpx"),
1484 .parent = TYPE_ACCEL,
1485 .class_init = whpx_accel_class_init,
1488 static void whpx_type_init(void)
1490 type_register_static(&whpx_accel_type);
1493 bool init_whp_dispatch(void)
1495 const char *lib_name;
1496 HMODULE hLib;
1498 if (whp_dispatch_initialized) {
1499 return true;
1502 #define WHP_LOAD_FIELD(return_type, function_name, signature) \
1503 whp_dispatch.function_name = \
1504 (function_name ## _t)GetProcAddress(hLib, #function_name); \
1505 if (!whp_dispatch.function_name) { \
1506 error_report("Could not load function %s from library %s.", \
1507 #function_name, lib_name); \
1508 goto error; \
1511 lib_name = "WinHvPlatform.dll";
1512 hWinHvPlatform = LoadLibrary(lib_name);
1513 if (!hWinHvPlatform) {
1514 error_report("Could not load library %s.", lib_name);
1515 goto error;
1517 hLib = hWinHvPlatform;
1518 LIST_WINHVPLATFORM_FUNCTIONS(WHP_LOAD_FIELD)
1520 lib_name = "WinHvEmulation.dll";
1521 hWinHvEmulation = LoadLibrary(lib_name);
1522 if (!hWinHvEmulation) {
1523 error_report("Could not load library %s.", lib_name);
1524 goto error;
1526 hLib = hWinHvEmulation;
1527 LIST_WINHVEMULATION_FUNCTIONS(WHP_LOAD_FIELD)
1529 whp_dispatch_initialized = true;
1530 return true;
1532 error:
1534 if (hWinHvPlatform) {
1535 FreeLibrary(hWinHvPlatform);
1537 if (hWinHvEmulation) {
1538 FreeLibrary(hWinHvEmulation);
1540 return false;
1543 type_init(whpx_type_init);