2 * QEMU Windows Hypervisor Platform accelerator (WHPX)
4 * Copyright Microsoft Corp. 2017
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
13 #include "exec/address-spaces.h"
14 #include "exec/ioport.h"
15 #include "qemu-common.h"
17 #include "sysemu/accel.h"
18 #include "sysemu/whpx.h"
19 #include "sysemu/sysemu.h"
20 #include "sysemu/cpus.h"
21 #include "qemu/main-loop.h"
22 #include "hw/boards.h"
23 #include "qemu/error-report.h"
24 #include "qemu/queue.h"
25 #include "qapi/error.h"
26 #include "migration/blocker.h"
27 #include "whp-dispatch.h"
29 #include <WinHvPlatform.h>
30 #include <WinHvEmulation.h>
34 WHV_PARTITION_HANDLE partition
;
37 static const WHV_REGISTER_NAME whpx_register_names
[] = {
39 /* X64 General purpose registers */
59 /* X64 Segment registers */
69 /* X64 Table registers */
73 /* X64 Control Registers */
80 /* X64 Debug Registers */
90 /* X64 Floating Point and Vector Registers */
107 WHvX64RegisterFpMmx0
,
108 WHvX64RegisterFpMmx1
,
109 WHvX64RegisterFpMmx2
,
110 WHvX64RegisterFpMmx3
,
111 WHvX64RegisterFpMmx4
,
112 WHvX64RegisterFpMmx5
,
113 WHvX64RegisterFpMmx6
,
114 WHvX64RegisterFpMmx7
,
115 WHvX64RegisterFpControlStatus
,
116 WHvX64RegisterXmmControlStatus
,
122 WHvX64RegisterKernelGsBase
,
124 WHvX64RegisterApicBase
,
125 /* WHvX64RegisterPat, */
126 WHvX64RegisterSysenterCs
,
127 WHvX64RegisterSysenterEip
,
128 WHvX64RegisterSysenterEsp
,
133 WHvX64RegisterSfmask
,
136 /* Interrupt / Event Registers */
138 * WHvRegisterPendingInterruption,
139 * WHvRegisterInterruptState,
140 * WHvRegisterPendingEvent0,
141 * WHvRegisterPendingEvent1
142 * WHvX64RegisterDeliverabilityNotifications,
146 struct whpx_register_set
{
147 WHV_REGISTER_VALUE values
[RTL_NUMBER_OF(whpx_register_names
)];
151 WHV_EMULATOR_HANDLE emulator
;
152 bool window_registered
;
156 bool interruption_pending
;
158 /* Must be the last field as it may have a tail */
159 WHV_RUN_VP_EXIT_CONTEXT exit_ctx
;
162 static bool whpx_allowed
;
163 static bool whp_dispatch_initialized
;
164 static HMODULE hWinHvPlatform
, hWinHvEmulation
;
166 struct whpx_state whpx_global
;
167 struct WHPDispatch whp_dispatch
;
174 static struct whpx_vcpu
*get_whpx_vcpu(CPUState
*cpu
)
176 return (struct whpx_vcpu
*)cpu
->hax_vcpu
;
179 static WHV_X64_SEGMENT_REGISTER
whpx_seg_q2h(const SegmentCache
*qs
, int v86
,
182 WHV_X64_SEGMENT_REGISTER hs
;
183 unsigned flags
= qs
->flags
;
186 hs
.Limit
= qs
->limit
;
187 hs
.Selector
= qs
->selector
;
193 hs
.DescriptorPrivilegeLevel
= 3;
194 hs
.NonSystemSegment
= 1;
197 hs
.Attributes
= (flags
>> DESC_TYPE_SHIFT
);
200 /* hs.Base &= 0xfffff; */
207 static SegmentCache
whpx_seg_h2q(const WHV_X64_SEGMENT_REGISTER
*hs
)
212 qs
.limit
= hs
->Limit
;
213 qs
.selector
= hs
->Selector
;
215 qs
.flags
= ((uint32_t)hs
->Attributes
) << DESC_TYPE_SHIFT
;
220 static void whpx_set_registers(CPUState
*cpu
)
222 struct whpx_state
*whpx
= &whpx_global
;
223 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
224 struct CPUX86State
*env
= (CPUArchState
*)(cpu
->env_ptr
);
225 X86CPU
*x86_cpu
= X86_CPU(cpu
);
226 struct whpx_register_set vcxt
;
233 assert(cpu_is_stopped(cpu
) || qemu_cpu_is_self(cpu
));
235 memset(&vcxt
, 0, sizeof(struct whpx_register_set
));
237 v86
= (env
->eflags
& VM_MASK
);
238 r86
= !(env
->cr
[0] & CR0_PE_MASK
);
240 vcpu
->tpr
= cpu_get_apic_tpr(x86_cpu
->apic_state
);
241 vcpu
->apic_base
= cpu_get_apic_base(x86_cpu
->apic_state
);
245 /* Indexes for first 16 registers match between HV and QEMU definitions */
247 for (idx
= 0; idx
< CPU_NB_REGS
; idx
+= 1) {
248 vcxt
.values
[idx
].Reg64
= (uint64_t)env
->regs
[idx
];
252 /* Same goes for RIP and RFLAGS */
253 assert(whpx_register_names
[idx
] == WHvX64RegisterRip
);
254 vcxt
.values
[idx
++].Reg64
= env
->eip
;
256 assert(whpx_register_names
[idx
] == WHvX64RegisterRflags
);
257 vcxt
.values
[idx
++].Reg64
= env
->eflags
;
259 /* Translate 6+4 segment registers. HV and QEMU order matches */
260 assert(idx
== WHvX64RegisterEs
);
261 for (i
= 0; i
< 6; i
+= 1, idx
+= 1) {
262 vcxt
.values
[idx
].Segment
= whpx_seg_q2h(&env
->segs
[i
], v86
, r86
);
265 assert(idx
== WHvX64RegisterLdtr
);
266 vcxt
.values
[idx
++].Segment
= whpx_seg_q2h(&env
->ldt
, 0, 0);
268 assert(idx
== WHvX64RegisterTr
);
269 vcxt
.values
[idx
++].Segment
= whpx_seg_q2h(&env
->tr
, 0, 0);
271 assert(idx
== WHvX64RegisterIdtr
);
272 vcxt
.values
[idx
].Table
.Base
= env
->idt
.base
;
273 vcxt
.values
[idx
].Table
.Limit
= env
->idt
.limit
;
276 assert(idx
== WHvX64RegisterGdtr
);
277 vcxt
.values
[idx
].Table
.Base
= env
->gdt
.base
;
278 vcxt
.values
[idx
].Table
.Limit
= env
->gdt
.limit
;
281 /* CR0, 2, 3, 4, 8 */
282 assert(whpx_register_names
[idx
] == WHvX64RegisterCr0
);
283 vcxt
.values
[idx
++].Reg64
= env
->cr
[0];
284 assert(whpx_register_names
[idx
] == WHvX64RegisterCr2
);
285 vcxt
.values
[idx
++].Reg64
= env
->cr
[2];
286 assert(whpx_register_names
[idx
] == WHvX64RegisterCr3
);
287 vcxt
.values
[idx
++].Reg64
= env
->cr
[3];
288 assert(whpx_register_names
[idx
] == WHvX64RegisterCr4
);
289 vcxt
.values
[idx
++].Reg64
= env
->cr
[4];
290 assert(whpx_register_names
[idx
] == WHvX64RegisterCr8
);
291 vcxt
.values
[idx
++].Reg64
= vcpu
->tpr
;
293 /* 8 Debug Registers - Skipped */
295 /* 16 XMM registers */
296 assert(whpx_register_names
[idx
] == WHvX64RegisterXmm0
);
298 for (i
= 0; i
< sizeof(env
->xmm_regs
) / sizeof(ZMMReg
); i
+= 1, idx
+= 1) {
299 vcxt
.values
[idx
].Reg128
.Low64
= env
->xmm_regs
[i
].ZMM_Q(0);
300 vcxt
.values
[idx
].Reg128
.High64
= env
->xmm_regs
[i
].ZMM_Q(1);
305 assert(whpx_register_names
[idx
] == WHvX64RegisterFpMmx0
);
306 for (i
= 0; i
< 8; i
+= 1, idx
+= 1) {
307 vcxt
.values
[idx
].Fp
.AsUINT128
.Low64
= env
->fpregs
[i
].mmx
.MMX_Q(0);
308 /* vcxt.values[idx].Fp.AsUINT128.High64 =
309 env->fpregs[i].mmx.MMX_Q(1);
313 /* FP control status register */
314 assert(whpx_register_names
[idx
] == WHvX64RegisterFpControlStatus
);
315 vcxt
.values
[idx
].FpControlStatus
.FpControl
= env
->fpuc
;
316 vcxt
.values
[idx
].FpControlStatus
.FpStatus
=
317 (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
318 vcxt
.values
[idx
].FpControlStatus
.FpTag
= 0;
319 for (i
= 0; i
< 8; ++i
) {
320 vcxt
.values
[idx
].FpControlStatus
.FpTag
|= (!env
->fptags
[i
]) << i
;
322 vcxt
.values
[idx
].FpControlStatus
.Reserved
= 0;
323 vcxt
.values
[idx
].FpControlStatus
.LastFpOp
= env
->fpop
;
324 vcxt
.values
[idx
].FpControlStatus
.LastFpRip
= env
->fpip
;
327 /* XMM control status register */
328 assert(whpx_register_names
[idx
] == WHvX64RegisterXmmControlStatus
);
329 vcxt
.values
[idx
].XmmControlStatus
.LastFpRdp
= 0;
330 vcxt
.values
[idx
].XmmControlStatus
.XmmStatusControl
= env
->mxcsr
;
331 vcxt
.values
[idx
].XmmControlStatus
.XmmStatusControlMask
= 0x0000ffff;
335 assert(whpx_register_names
[idx
] == WHvX64RegisterTsc
);
336 vcxt
.values
[idx
++].Reg64
= env
->tsc
;
337 assert(whpx_register_names
[idx
] == WHvX64RegisterEfer
);
338 vcxt
.values
[idx
++].Reg64
= env
->efer
;
340 assert(whpx_register_names
[idx
] == WHvX64RegisterKernelGsBase
);
341 vcxt
.values
[idx
++].Reg64
= env
->kernelgsbase
;
344 assert(whpx_register_names
[idx
] == WHvX64RegisterApicBase
);
345 vcxt
.values
[idx
++].Reg64
= vcpu
->apic_base
;
347 /* WHvX64RegisterPat - Skipped */
349 assert(whpx_register_names
[idx
] == WHvX64RegisterSysenterCs
);
350 vcxt
.values
[idx
++].Reg64
= env
->sysenter_cs
;
351 assert(whpx_register_names
[idx
] == WHvX64RegisterSysenterEip
);
352 vcxt
.values
[idx
++].Reg64
= env
->sysenter_eip
;
353 assert(whpx_register_names
[idx
] == WHvX64RegisterSysenterEsp
);
354 vcxt
.values
[idx
++].Reg64
= env
->sysenter_esp
;
355 assert(whpx_register_names
[idx
] == WHvX64RegisterStar
);
356 vcxt
.values
[idx
++].Reg64
= env
->star
;
358 assert(whpx_register_names
[idx
] == WHvX64RegisterLstar
);
359 vcxt
.values
[idx
++].Reg64
= env
->lstar
;
360 assert(whpx_register_names
[idx
] == WHvX64RegisterCstar
);
361 vcxt
.values
[idx
++].Reg64
= env
->cstar
;
362 assert(whpx_register_names
[idx
] == WHvX64RegisterSfmask
);
363 vcxt
.values
[idx
++].Reg64
= env
->fmask
;
366 /* Interrupt / Event Registers - Skipped */
368 assert(idx
== RTL_NUMBER_OF(whpx_register_names
));
370 hr
= whp_dispatch
.WHvSetVirtualProcessorRegisters(
371 whpx
->partition
, cpu
->cpu_index
,
373 RTL_NUMBER_OF(whpx_register_names
),
377 error_report("WHPX: Failed to set virtual processor context, hr=%08lx",
384 static void whpx_get_registers(CPUState
*cpu
)
386 struct whpx_state
*whpx
= &whpx_global
;
387 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
388 struct CPUX86State
*env
= (CPUArchState
*)(cpu
->env_ptr
);
389 X86CPU
*x86_cpu
= X86_CPU(cpu
);
390 struct whpx_register_set vcxt
;
391 uint64_t tpr
, apic_base
;
397 assert(cpu_is_stopped(cpu
) || qemu_cpu_is_self(cpu
));
399 hr
= whp_dispatch
.WHvGetVirtualProcessorRegisters(
400 whpx
->partition
, cpu
->cpu_index
,
402 RTL_NUMBER_OF(whpx_register_names
),
405 error_report("WHPX: Failed to get virtual processor context, hr=%08lx",
411 /* Indexes for first 16 registers match between HV and QEMU definitions */
413 for (idx
= 0; idx
< CPU_NB_REGS
; idx
+= 1) {
414 env
->regs
[idx
] = vcxt
.values
[idx
].Reg64
;
418 /* Same goes for RIP and RFLAGS */
419 assert(whpx_register_names
[idx
] == WHvX64RegisterRip
);
420 env
->eip
= vcxt
.values
[idx
++].Reg64
;
421 assert(whpx_register_names
[idx
] == WHvX64RegisterRflags
);
422 env
->eflags
= vcxt
.values
[idx
++].Reg64
;
424 /* Translate 6+4 segment registers. HV and QEMU order matches */
425 assert(idx
== WHvX64RegisterEs
);
426 for (i
= 0; i
< 6; i
+= 1, idx
+= 1) {
427 env
->segs
[i
] = whpx_seg_h2q(&vcxt
.values
[idx
].Segment
);
430 assert(idx
== WHvX64RegisterLdtr
);
431 env
->ldt
= whpx_seg_h2q(&vcxt
.values
[idx
++].Segment
);
432 assert(idx
== WHvX64RegisterTr
);
433 env
->tr
= whpx_seg_h2q(&vcxt
.values
[idx
++].Segment
);
434 assert(idx
== WHvX64RegisterIdtr
);
435 env
->idt
.base
= vcxt
.values
[idx
].Table
.Base
;
436 env
->idt
.limit
= vcxt
.values
[idx
].Table
.Limit
;
438 assert(idx
== WHvX64RegisterGdtr
);
439 env
->gdt
.base
= vcxt
.values
[idx
].Table
.Base
;
440 env
->gdt
.limit
= vcxt
.values
[idx
].Table
.Limit
;
443 /* CR0, 2, 3, 4, 8 */
444 assert(whpx_register_names
[idx
] == WHvX64RegisterCr0
);
445 env
->cr
[0] = vcxt
.values
[idx
++].Reg64
;
446 assert(whpx_register_names
[idx
] == WHvX64RegisterCr2
);
447 env
->cr
[2] = vcxt
.values
[idx
++].Reg64
;
448 assert(whpx_register_names
[idx
] == WHvX64RegisterCr3
);
449 env
->cr
[3] = vcxt
.values
[idx
++].Reg64
;
450 assert(whpx_register_names
[idx
] == WHvX64RegisterCr4
);
451 env
->cr
[4] = vcxt
.values
[idx
++].Reg64
;
452 assert(whpx_register_names
[idx
] == WHvX64RegisterCr8
);
453 tpr
= vcxt
.values
[idx
++].Reg64
;
454 if (tpr
!= vcpu
->tpr
) {
456 cpu_set_apic_tpr(x86_cpu
->apic_state
, tpr
);
459 /* 8 Debug Registers - Skipped */
461 /* 16 XMM registers */
462 assert(whpx_register_names
[idx
] == WHvX64RegisterXmm0
);
464 for (i
= 0; i
< sizeof(env
->xmm_regs
) / sizeof(ZMMReg
); i
+= 1, idx
+= 1) {
465 env
->xmm_regs
[i
].ZMM_Q(0) = vcxt
.values
[idx
].Reg128
.Low64
;
466 env
->xmm_regs
[i
].ZMM_Q(1) = vcxt
.values
[idx
].Reg128
.High64
;
471 assert(whpx_register_names
[idx
] == WHvX64RegisterFpMmx0
);
472 for (i
= 0; i
< 8; i
+= 1, idx
+= 1) {
473 env
->fpregs
[i
].mmx
.MMX_Q(0) = vcxt
.values
[idx
].Fp
.AsUINT128
.Low64
;
474 /* env->fpregs[i].mmx.MMX_Q(1) =
475 vcxt.values[idx].Fp.AsUINT128.High64;
479 /* FP control status register */
480 assert(whpx_register_names
[idx
] == WHvX64RegisterFpControlStatus
);
481 env
->fpuc
= vcxt
.values
[idx
].FpControlStatus
.FpControl
;
482 env
->fpstt
= (vcxt
.values
[idx
].FpControlStatus
.FpStatus
>> 11) & 0x7;
483 env
->fpus
= vcxt
.values
[idx
].FpControlStatus
.FpStatus
& ~0x3800;
484 for (i
= 0; i
< 8; ++i
) {
485 env
->fptags
[i
] = !((vcxt
.values
[idx
].FpControlStatus
.FpTag
>> i
) & 1);
487 env
->fpop
= vcxt
.values
[idx
].FpControlStatus
.LastFpOp
;
488 env
->fpip
= vcxt
.values
[idx
].FpControlStatus
.LastFpRip
;
491 /* XMM control status register */
492 assert(whpx_register_names
[idx
] == WHvX64RegisterXmmControlStatus
);
493 env
->mxcsr
= vcxt
.values
[idx
].XmmControlStatus
.XmmStatusControl
;
497 assert(whpx_register_names
[idx
] == WHvX64RegisterTsc
);
498 env
->tsc
= vcxt
.values
[idx
++].Reg64
;
499 assert(whpx_register_names
[idx
] == WHvX64RegisterEfer
);
500 env
->efer
= vcxt
.values
[idx
++].Reg64
;
502 assert(whpx_register_names
[idx
] == WHvX64RegisterKernelGsBase
);
503 env
->kernelgsbase
= vcxt
.values
[idx
++].Reg64
;
506 assert(whpx_register_names
[idx
] == WHvX64RegisterApicBase
);
507 apic_base
= vcxt
.values
[idx
++].Reg64
;
508 if (apic_base
!= vcpu
->apic_base
) {
509 vcpu
->apic_base
= apic_base
;
510 cpu_set_apic_base(x86_cpu
->apic_state
, vcpu
->apic_base
);
513 /* WHvX64RegisterPat - Skipped */
515 assert(whpx_register_names
[idx
] == WHvX64RegisterSysenterCs
);
516 env
->sysenter_cs
= vcxt
.values
[idx
++].Reg64
;;
517 assert(whpx_register_names
[idx
] == WHvX64RegisterSysenterEip
);
518 env
->sysenter_eip
= vcxt
.values
[idx
++].Reg64
;
519 assert(whpx_register_names
[idx
] == WHvX64RegisterSysenterEsp
);
520 env
->sysenter_esp
= vcxt
.values
[idx
++].Reg64
;
521 assert(whpx_register_names
[idx
] == WHvX64RegisterStar
);
522 env
->star
= vcxt
.values
[idx
++].Reg64
;
524 assert(whpx_register_names
[idx
] == WHvX64RegisterLstar
);
525 env
->lstar
= vcxt
.values
[idx
++].Reg64
;
526 assert(whpx_register_names
[idx
] == WHvX64RegisterCstar
);
527 env
->cstar
= vcxt
.values
[idx
++].Reg64
;
528 assert(whpx_register_names
[idx
] == WHvX64RegisterSfmask
);
529 env
->fmask
= vcxt
.values
[idx
++].Reg64
;
532 /* Interrupt / Event Registers - Skipped */
534 assert(idx
== RTL_NUMBER_OF(whpx_register_names
));
539 static HRESULT CALLBACK
whpx_emu_ioport_callback(
541 WHV_EMULATOR_IO_ACCESS_INFO
*IoAccess
)
543 MemTxAttrs attrs
= { 0 };
544 address_space_rw(&address_space_io
, IoAccess
->Port
, attrs
,
545 (uint8_t *)&IoAccess
->Data
, IoAccess
->AccessSize
,
546 IoAccess
->Direction
);
550 static HRESULT CALLBACK
whpx_emu_mmio_callback(
552 WHV_EMULATOR_MEMORY_ACCESS_INFO
*ma
)
554 cpu_physical_memory_rw(ma
->GpaAddress
, ma
->Data
, ma
->AccessSize
,
559 static HRESULT CALLBACK
whpx_emu_getreg_callback(
561 const WHV_REGISTER_NAME
*RegisterNames
,
562 UINT32 RegisterCount
,
563 WHV_REGISTER_VALUE
*RegisterValues
)
566 struct whpx_state
*whpx
= &whpx_global
;
567 CPUState
*cpu
= (CPUState
*)ctx
;
569 hr
= whp_dispatch
.WHvGetVirtualProcessorRegisters(
570 whpx
->partition
, cpu
->cpu_index
,
571 RegisterNames
, RegisterCount
,
574 error_report("WHPX: Failed to get virtual processor registers,"
581 static HRESULT CALLBACK
whpx_emu_setreg_callback(
583 const WHV_REGISTER_NAME
*RegisterNames
,
584 UINT32 RegisterCount
,
585 const WHV_REGISTER_VALUE
*RegisterValues
)
588 struct whpx_state
*whpx
= &whpx_global
;
589 CPUState
*cpu
= (CPUState
*)ctx
;
591 hr
= whp_dispatch
.WHvSetVirtualProcessorRegisters(
592 whpx
->partition
, cpu
->cpu_index
,
593 RegisterNames
, RegisterCount
,
596 error_report("WHPX: Failed to set virtual processor registers,"
601 * The emulator just successfully wrote the register state. We clear the
602 * dirty state so we avoid the double write on resume of the VP.
604 cpu
->vcpu_dirty
= false;
609 static HRESULT CALLBACK
whpx_emu_translate_callback(
611 WHV_GUEST_VIRTUAL_ADDRESS Gva
,
612 WHV_TRANSLATE_GVA_FLAGS TranslateFlags
,
613 WHV_TRANSLATE_GVA_RESULT_CODE
*TranslationResult
,
614 WHV_GUEST_PHYSICAL_ADDRESS
*Gpa
)
617 struct whpx_state
*whpx
= &whpx_global
;
618 CPUState
*cpu
= (CPUState
*)ctx
;
619 WHV_TRANSLATE_GVA_RESULT res
;
621 hr
= whp_dispatch
.WHvTranslateGva(whpx
->partition
, cpu
->cpu_index
,
622 Gva
, TranslateFlags
, &res
, Gpa
);
624 error_report("WHPX: Failed to translate GVA, hr=%08lx", hr
);
626 *TranslationResult
= res
.ResultCode
;
632 static const WHV_EMULATOR_CALLBACKS whpx_emu_callbacks
= {
633 .Size
= sizeof(WHV_EMULATOR_CALLBACKS
),
634 .WHvEmulatorIoPortCallback
= whpx_emu_ioport_callback
,
635 .WHvEmulatorMemoryCallback
= whpx_emu_mmio_callback
,
636 .WHvEmulatorGetVirtualProcessorRegisters
= whpx_emu_getreg_callback
,
637 .WHvEmulatorSetVirtualProcessorRegisters
= whpx_emu_setreg_callback
,
638 .WHvEmulatorTranslateGvaPage
= whpx_emu_translate_callback
,
641 static int whpx_handle_mmio(CPUState
*cpu
, WHV_MEMORY_ACCESS_CONTEXT
*ctx
)
644 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
645 WHV_EMULATOR_STATUS emu_status
;
647 hr
= whp_dispatch
.WHvEmulatorTryMmioEmulation(
649 &vcpu
->exit_ctx
.VpContext
, ctx
,
652 error_report("WHPX: Failed to parse MMIO access, hr=%08lx", hr
);
656 if (!emu_status
.EmulationSuccessful
) {
657 error_report("WHPX: Failed to emulate MMIO access with"
658 " EmulatorReturnStatus: %u", emu_status
.AsUINT32
);
665 static int whpx_handle_portio(CPUState
*cpu
,
666 WHV_X64_IO_PORT_ACCESS_CONTEXT
*ctx
)
669 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
670 WHV_EMULATOR_STATUS emu_status
;
672 hr
= whp_dispatch
.WHvEmulatorTryIoEmulation(
674 &vcpu
->exit_ctx
.VpContext
, ctx
,
677 error_report("WHPX: Failed to parse PortIO access, hr=%08lx", hr
);
681 if (!emu_status
.EmulationSuccessful
) {
682 error_report("WHPX: Failed to emulate PortIO access with"
683 " EmulatorReturnStatus: %u", emu_status
.AsUINT32
);
690 static int whpx_handle_halt(CPUState
*cpu
)
692 struct CPUX86State
*env
= (CPUArchState
*)(cpu
->env_ptr
);
695 qemu_mutex_lock_iothread();
696 if (!((cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
697 (env
->eflags
& IF_MASK
)) &&
698 !(cpu
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
699 cpu
->exception_index
= EXCP_HLT
;
703 qemu_mutex_unlock_iothread();
708 static void whpx_vcpu_pre_run(CPUState
*cpu
)
711 struct whpx_state
*whpx
= &whpx_global
;
712 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
713 struct CPUX86State
*env
= (CPUArchState
*)(cpu
->env_ptr
);
714 X86CPU
*x86_cpu
= X86_CPU(cpu
);
717 WHV_X64_PENDING_INTERRUPTION_REGISTER new_int
;
718 UINT32 reg_count
= 0;
719 WHV_REGISTER_VALUE reg_values
[3];
720 WHV_REGISTER_NAME reg_names
[3];
722 memset(&new_int
, 0, sizeof(new_int
));
723 memset(reg_values
, 0, sizeof(reg_values
));
725 qemu_mutex_lock_iothread();
728 if (!vcpu
->interruption_pending
&&
729 cpu
->interrupt_request
& (CPU_INTERRUPT_NMI
| CPU_INTERRUPT_SMI
)) {
730 if (cpu
->interrupt_request
& CPU_INTERRUPT_NMI
) {
731 cpu
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
732 vcpu
->interruptable
= false;
733 new_int
.InterruptionType
= WHvX64PendingNmi
;
734 new_int
.InterruptionPending
= 1;
735 new_int
.InterruptionVector
= 2;
737 if (cpu
->interrupt_request
& CPU_INTERRUPT_SMI
) {
738 cpu
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
743 * Force the VCPU out of its inner loop to process any INIT requests or
744 * commit pending TPR access.
746 if (cpu
->interrupt_request
& (CPU_INTERRUPT_INIT
| CPU_INTERRUPT_TPR
)) {
747 if ((cpu
->interrupt_request
& CPU_INTERRUPT_INIT
) &&
748 !(env
->hflags
& HF_SMM_MASK
)) {
749 cpu
->exit_request
= 1;
751 if (cpu
->interrupt_request
& CPU_INTERRUPT_TPR
) {
752 cpu
->exit_request
= 1;
756 /* Get pending hard interruption or replay one that was overwritten */
757 if (!vcpu
->interruption_pending
&&
758 vcpu
->interruptable
&& (env
->eflags
& IF_MASK
)) {
759 assert(!new_int
.InterruptionPending
);
760 if (cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) {
761 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
762 irq
= cpu_get_pic_interrupt(env
);
764 new_int
.InterruptionType
= WHvX64PendingInterrupt
;
765 new_int
.InterruptionPending
= 1;
766 new_int
.InterruptionVector
= irq
;
771 /* Setup interrupt state if new one was prepared */
772 if (new_int
.InterruptionPending
) {
773 reg_values
[reg_count
].PendingInterruption
= new_int
;
774 reg_names
[reg_count
] = WHvRegisterPendingInterruption
;
778 /* Sync the TPR to the CR8 if was modified during the intercept */
779 tpr
= cpu_get_apic_tpr(x86_cpu
->apic_state
);
780 if (tpr
!= vcpu
->tpr
) {
782 reg_values
[reg_count
].Reg64
= tpr
;
783 cpu
->exit_request
= 1;
784 reg_names
[reg_count
] = WHvX64RegisterCr8
;
788 /* Update the state of the interrupt delivery notification */
789 if (!vcpu
->window_registered
&&
790 cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) {
791 reg_values
[reg_count
].DeliverabilityNotifications
.InterruptNotification
793 vcpu
->window_registered
= 1;
794 reg_names
[reg_count
] = WHvX64RegisterDeliverabilityNotifications
;
798 qemu_mutex_unlock_iothread();
801 hr
= whp_dispatch
.WHvSetVirtualProcessorRegisters(
802 whpx
->partition
, cpu
->cpu_index
,
803 reg_names
, reg_count
, reg_values
);
805 error_report("WHPX: Failed to set interrupt state registers,"
813 static void whpx_vcpu_post_run(CPUState
*cpu
)
815 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
816 struct CPUX86State
*env
= (CPUArchState
*)(cpu
->env_ptr
);
817 X86CPU
*x86_cpu
= X86_CPU(cpu
);
819 env
->eflags
= vcpu
->exit_ctx
.VpContext
.Rflags
;
821 uint64_t tpr
= vcpu
->exit_ctx
.VpContext
.Cr8
;
822 if (vcpu
->tpr
!= tpr
) {
824 qemu_mutex_lock_iothread();
825 cpu_set_apic_tpr(x86_cpu
->apic_state
, vcpu
->tpr
);
826 qemu_mutex_unlock_iothread();
829 vcpu
->interruption_pending
=
830 vcpu
->exit_ctx
.VpContext
.ExecutionState
.InterruptionPending
;
832 vcpu
->interruptable
=
833 !vcpu
->exit_ctx
.VpContext
.ExecutionState
.InterruptShadow
;
838 static void whpx_vcpu_process_async_events(CPUState
*cpu
)
840 struct CPUX86State
*env
= (CPUArchState
*)(cpu
->env_ptr
);
841 X86CPU
*x86_cpu
= X86_CPU(cpu
);
842 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
844 if ((cpu
->interrupt_request
& CPU_INTERRUPT_INIT
) &&
845 !(env
->hflags
& HF_SMM_MASK
)) {
847 do_cpu_init(x86_cpu
);
848 cpu
->vcpu_dirty
= true;
849 vcpu
->interruptable
= true;
852 if (cpu
->interrupt_request
& CPU_INTERRUPT_POLL
) {
853 cpu
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
854 apic_poll_irq(x86_cpu
->apic_state
);
857 if (((cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
858 (env
->eflags
& IF_MASK
)) ||
859 (cpu
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
863 if (cpu
->interrupt_request
& CPU_INTERRUPT_SIPI
) {
864 if (!cpu
->vcpu_dirty
) {
865 whpx_get_registers(cpu
);
867 do_cpu_sipi(x86_cpu
);
870 if (cpu
->interrupt_request
& CPU_INTERRUPT_TPR
) {
871 cpu
->interrupt_request
&= ~CPU_INTERRUPT_TPR
;
872 if (!cpu
->vcpu_dirty
) {
873 whpx_get_registers(cpu
);
875 apic_handle_tpr_access_report(x86_cpu
->apic_state
, env
->eip
,
876 env
->tpr_access_type
);
882 static int whpx_vcpu_run(CPUState
*cpu
)
885 struct whpx_state
*whpx
= &whpx_global
;
886 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
889 whpx_vcpu_process_async_events(cpu
);
891 cpu
->exception_index
= EXCP_HLT
;
892 atomic_set(&cpu
->exit_request
, false);
896 qemu_mutex_unlock_iothread();
900 if (cpu
->vcpu_dirty
) {
901 whpx_set_registers(cpu
);
902 cpu
->vcpu_dirty
= false;
905 whpx_vcpu_pre_run(cpu
);
907 if (atomic_read(&cpu
->exit_request
)) {
911 hr
= whp_dispatch
.WHvRunVirtualProcessor(
912 whpx
->partition
, cpu
->cpu_index
,
913 &vcpu
->exit_ctx
, sizeof(vcpu
->exit_ctx
));
916 error_report("WHPX: Failed to exec a virtual processor,"
922 whpx_vcpu_post_run(cpu
);
924 switch (vcpu
->exit_ctx
.ExitReason
) {
925 case WHvRunVpExitReasonMemoryAccess
:
926 ret
= whpx_handle_mmio(cpu
, &vcpu
->exit_ctx
.MemoryAccess
);
929 case WHvRunVpExitReasonX64IoPortAccess
:
930 ret
= whpx_handle_portio(cpu
, &vcpu
->exit_ctx
.IoPortAccess
);
933 case WHvRunVpExitReasonX64InterruptWindow
:
934 vcpu
->window_registered
= 0;
938 case WHvRunVpExitReasonX64Halt
:
939 ret
= whpx_handle_halt(cpu
);
942 case WHvRunVpExitReasonCanceled
:
943 cpu
->exception_index
= EXCP_INTERRUPT
;
947 case WHvRunVpExitReasonX64MsrAccess
: {
948 WHV_REGISTER_VALUE reg_values
[3] = {0};
949 WHV_REGISTER_NAME reg_names
[3];
952 reg_names
[0] = WHvX64RegisterRip
;
953 reg_names
[1] = WHvX64RegisterRax
;
954 reg_names
[2] = WHvX64RegisterRdx
;
956 reg_values
[0].Reg64
=
957 vcpu
->exit_ctx
.VpContext
.Rip
+
958 vcpu
->exit_ctx
.VpContext
.InstructionLength
;
961 * For all unsupported MSR access we:
965 reg_count
= vcpu
->exit_ctx
.MsrAccess
.AccessInfo
.IsWrite
?
968 hr
= whp_dispatch
.WHvSetVirtualProcessorRegisters(
971 reg_names
, reg_count
,
975 error_report("WHPX: Failed to set MsrAccess state "
976 " registers, hr=%08lx", hr
);
981 case WHvRunVpExitReasonX64Cpuid
: {
982 WHV_REGISTER_VALUE reg_values
[5];
983 WHV_REGISTER_NAME reg_names
[5];
984 UINT32 reg_count
= 5;
985 UINT64 rip
, rax
, rcx
, rdx
, rbx
;
987 memset(reg_values
, 0, sizeof(reg_values
));
989 rip
= vcpu
->exit_ctx
.VpContext
.Rip
+
990 vcpu
->exit_ctx
.VpContext
.InstructionLength
;
991 switch (vcpu
->exit_ctx
.CpuidAccess
.Rax
) {
993 rax
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRax
;
994 /* Advertise that we are running on a hypervisor */
996 vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRcx
|
997 CPUID_EXT_HYPERVISOR
;
999 rdx
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRdx
;
1000 rbx
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRbx
;
1003 rax
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRax
;
1004 /* Remove any support of OSVW */
1006 vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRcx
&
1009 rdx
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRdx
;
1010 rbx
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRbx
;
1013 rax
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRax
;
1014 rcx
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRcx
;
1015 rdx
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRdx
;
1016 rbx
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRbx
;
1019 reg_names
[0] = WHvX64RegisterRip
;
1020 reg_names
[1] = WHvX64RegisterRax
;
1021 reg_names
[2] = WHvX64RegisterRcx
;
1022 reg_names
[3] = WHvX64RegisterRdx
;
1023 reg_names
[4] = WHvX64RegisterRbx
;
1025 reg_values
[0].Reg64
= rip
;
1026 reg_values
[1].Reg64
= rax
;
1027 reg_values
[2].Reg64
= rcx
;
1028 reg_values
[3].Reg64
= rdx
;
1029 reg_values
[4].Reg64
= rbx
;
1031 hr
= whp_dispatch
.WHvSetVirtualProcessorRegisters(
1032 whpx
->partition
, cpu
->cpu_index
,
1038 error_report("WHPX: Failed to set CpuidAccess state registers,"
1044 case WHvRunVpExitReasonNone
:
1045 case WHvRunVpExitReasonUnrecoverableException
:
1046 case WHvRunVpExitReasonInvalidVpRegisterValue
:
1047 case WHvRunVpExitReasonUnsupportedFeature
:
1048 case WHvRunVpExitReasonException
:
1050 error_report("WHPX: Unexpected VP exit code %d",
1051 vcpu
->exit_ctx
.ExitReason
);
1052 whpx_get_registers(cpu
);
1053 qemu_mutex_lock_iothread();
1054 qemu_system_guest_panicked(cpu_get_crash_info(cpu
));
1055 qemu_mutex_unlock_iothread();
1062 qemu_mutex_lock_iothread();
1065 atomic_set(&cpu
->exit_request
, false);
1070 static void do_whpx_cpu_synchronize_state(CPUState
*cpu
, run_on_cpu_data arg
)
1072 whpx_get_registers(cpu
);
1073 cpu
->vcpu_dirty
= true;
1076 static void do_whpx_cpu_synchronize_post_reset(CPUState
*cpu
,
1077 run_on_cpu_data arg
)
1079 whpx_set_registers(cpu
);
1080 cpu
->vcpu_dirty
= false;
1083 static void do_whpx_cpu_synchronize_post_init(CPUState
*cpu
,
1084 run_on_cpu_data arg
)
1086 whpx_set_registers(cpu
);
1087 cpu
->vcpu_dirty
= false;
1090 static void do_whpx_cpu_synchronize_pre_loadvm(CPUState
*cpu
,
1091 run_on_cpu_data arg
)
1093 cpu
->vcpu_dirty
= true;
1100 void whpx_cpu_synchronize_state(CPUState
*cpu
)
1102 if (!cpu
->vcpu_dirty
) {
1103 run_on_cpu(cpu
, do_whpx_cpu_synchronize_state
, RUN_ON_CPU_NULL
);
1107 void whpx_cpu_synchronize_post_reset(CPUState
*cpu
)
1109 run_on_cpu(cpu
, do_whpx_cpu_synchronize_post_reset
, RUN_ON_CPU_NULL
);
1112 void whpx_cpu_synchronize_post_init(CPUState
*cpu
)
1114 run_on_cpu(cpu
, do_whpx_cpu_synchronize_post_init
, RUN_ON_CPU_NULL
);
1117 void whpx_cpu_synchronize_pre_loadvm(CPUState
*cpu
)
1119 run_on_cpu(cpu
, do_whpx_cpu_synchronize_pre_loadvm
, RUN_ON_CPU_NULL
);
1126 static Error
*whpx_migration_blocker
;
1128 int whpx_init_vcpu(CPUState
*cpu
)
1131 struct whpx_state
*whpx
= &whpx_global
;
1132 struct whpx_vcpu
*vcpu
;
1133 Error
*local_error
= NULL
;
1135 /* Add migration blockers for all unsupported features of the
1136 * Windows Hypervisor Platform
1138 if (whpx_migration_blocker
== NULL
) {
1139 error_setg(&whpx_migration_blocker
,
1140 "State blocked due to non-migratable CPUID feature support,"
1141 "dirty memory tracking support, and XSAVE/XRSTOR support");
1143 (void)migrate_add_blocker(whpx_migration_blocker
, &local_error
);
1145 error_report_err(local_error
);
1146 migrate_del_blocker(whpx_migration_blocker
);
1147 error_free(whpx_migration_blocker
);
1152 vcpu
= g_malloc0(sizeof(struct whpx_vcpu
));
1155 error_report("WHPX: Failed to allocte VCPU context.");
1159 hr
= whp_dispatch
.WHvEmulatorCreateEmulator(
1160 &whpx_emu_callbacks
,
1163 error_report("WHPX: Failed to setup instruction completion support,"
1169 hr
= whp_dispatch
.WHvCreateVirtualProcessor(
1170 whpx
->partition
, cpu
->cpu_index
, 0);
1172 error_report("WHPX: Failed to create a virtual processor,"
1174 whp_dispatch
.WHvEmulatorDestroyEmulator(vcpu
->emulator
);
1179 vcpu
->interruptable
= true;
1181 cpu
->vcpu_dirty
= true;
1182 cpu
->hax_vcpu
= (struct hax_vcpu_state
*)vcpu
;
1187 int whpx_vcpu_exec(CPUState
*cpu
)
1193 if (cpu
->exception_index
>= EXCP_INTERRUPT
) {
1194 ret
= cpu
->exception_index
;
1195 cpu
->exception_index
= -1;
1199 fatal
= whpx_vcpu_run(cpu
);
1202 error_report("WHPX: Failed to exec a virtual processor");
1210 void whpx_destroy_vcpu(CPUState
*cpu
)
1212 struct whpx_state
*whpx
= &whpx_global
;
1213 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
1215 whp_dispatch
.WHvDeleteVirtualProcessor(whpx
->partition
, cpu
->cpu_index
);
1216 whp_dispatch
.WHvEmulatorDestroyEmulator(vcpu
->emulator
);
1217 g_free(cpu
->hax_vcpu
);
1221 void whpx_vcpu_kick(CPUState
*cpu
)
1223 struct whpx_state
*whpx
= &whpx_global
;
1224 whp_dispatch
.WHvCancelRunVirtualProcessor(
1225 whpx
->partition
, cpu
->cpu_index
, 0);
1232 static void whpx_update_mapping(hwaddr start_pa
, ram_addr_t size
,
1233 void *host_va
, int add
, int rom
,
1236 struct whpx_state
*whpx
= &whpx_global
;
1241 printf("WHPX: ADD PA:%p Size:%p, Host:%p, %s, '%s'\n",
1242 (void*)start_pa, (void*)size, host_va,
1243 (rom ? "ROM" : "RAM"), name);
1245 printf("WHPX: DEL PA:%p Size:%p, Host:%p, '%s'\n",
1246 (void*)start_pa, (void*)size, host_va, name);
1251 hr
= whp_dispatch
.WHvMapGpaRange(whpx
->partition
,
1255 (WHvMapGpaRangeFlagRead
|
1256 WHvMapGpaRangeFlagExecute
|
1257 (rom
? 0 : WHvMapGpaRangeFlagWrite
)));
1259 hr
= whp_dispatch
.WHvUnmapGpaRange(whpx
->partition
,
1265 error_report("WHPX: Failed to %s GPA range '%s' PA:%p, Size:%p bytes,"
1266 " Host:%p, hr=%08lx",
1267 (add
? "MAP" : "UNMAP"), name
,
1268 (void *)(uintptr_t)start_pa
, (void *)size
, host_va
, hr
);
1272 static void whpx_process_section(MemoryRegionSection
*section
, int add
)
1274 MemoryRegion
*mr
= section
->mr
;
1275 hwaddr start_pa
= section
->offset_within_address_space
;
1276 ram_addr_t size
= int128_get64(section
->size
);
1280 if (!memory_region_is_ram(mr
)) {
1284 delta
= qemu_real_host_page_size
- (start_pa
& ~qemu_real_host_page_mask
);
1285 delta
&= ~qemu_real_host_page_mask
;
1291 size
&= qemu_real_host_page_mask
;
1292 if (!size
|| (start_pa
& ~qemu_real_host_page_mask
)) {
1296 host_va
= (uintptr_t)memory_region_get_ram_ptr(mr
)
1297 + section
->offset_within_region
+ delta
;
1299 whpx_update_mapping(start_pa
, size
, (void *)(uintptr_t)host_va
, add
,
1300 memory_region_is_rom(mr
), mr
->name
);
1303 static void whpx_region_add(MemoryListener
*listener
,
1304 MemoryRegionSection
*section
)
1306 memory_region_ref(section
->mr
);
1307 whpx_process_section(section
, 1);
1310 static void whpx_region_del(MemoryListener
*listener
,
1311 MemoryRegionSection
*section
)
1313 whpx_process_section(section
, 0);
1314 memory_region_unref(section
->mr
);
1317 static void whpx_transaction_begin(MemoryListener
*listener
)
1321 static void whpx_transaction_commit(MemoryListener
*listener
)
1325 static void whpx_log_sync(MemoryListener
*listener
,
1326 MemoryRegionSection
*section
)
1328 MemoryRegion
*mr
= section
->mr
;
1330 if (!memory_region_is_ram(mr
)) {
1334 memory_region_set_dirty(mr
, 0, int128_get64(section
->size
));
1337 static MemoryListener whpx_memory_listener
= {
1338 .begin
= whpx_transaction_begin
,
1339 .commit
= whpx_transaction_commit
,
1340 .region_add
= whpx_region_add
,
1341 .region_del
= whpx_region_del
,
1342 .log_sync
= whpx_log_sync
,
1346 static void whpx_memory_init(void)
1348 memory_listener_register(&whpx_memory_listener
, &address_space_memory
);
1351 static void whpx_handle_interrupt(CPUState
*cpu
, int mask
)
1353 cpu
->interrupt_request
|= mask
;
1355 if (!qemu_cpu_is_self(cpu
)) {
1364 static int whpx_accel_init(MachineState
*ms
)
1366 struct whpx_state
*whpx
;
1369 WHV_CAPABILITY whpx_cap
;
1370 UINT32 whpx_cap_size
;
1371 WHV_PARTITION_PROPERTY prop
;
1373 whpx
= &whpx_global
;
1375 if (!init_whp_dispatch()) {
1380 memset(whpx
, 0, sizeof(struct whpx_state
));
1381 whpx
->mem_quota
= ms
->ram_size
;
1383 hr
= whp_dispatch
.WHvGetCapability(
1384 WHvCapabilityCodeHypervisorPresent
, &whpx_cap
,
1385 sizeof(whpx_cap
), &whpx_cap_size
);
1386 if (FAILED(hr
) || !whpx_cap
.HypervisorPresent
) {
1387 error_report("WHPX: No accelerator found, hr=%08lx", hr
);
1392 hr
= whp_dispatch
.WHvCreatePartition(&whpx
->partition
);
1394 error_report("WHPX: Failed to create partition, hr=%08lx", hr
);
1399 memset(&prop
, 0, sizeof(WHV_PARTITION_PROPERTY
));
1400 prop
.ProcessorCount
= smp_cpus
;
1401 hr
= whp_dispatch
.WHvSetPartitionProperty(
1403 WHvPartitionPropertyCodeProcessorCount
,
1405 sizeof(WHV_PARTITION_PROPERTY
));
1408 error_report("WHPX: Failed to set partition core count to %d,"
1409 " hr=%08lx", smp_cores
, hr
);
1414 memset(&prop
, 0, sizeof(WHV_PARTITION_PROPERTY
));
1415 prop
.ExtendedVmExits
.X64MsrExit
= 1;
1416 prop
.ExtendedVmExits
.X64CpuidExit
= 1;
1417 hr
= whp_dispatch
.WHvSetPartitionProperty(
1419 WHvPartitionPropertyCodeExtendedVmExits
,
1421 sizeof(WHV_PARTITION_PROPERTY
));
1424 error_report("WHPX: Failed to enable partition extended X64MsrExit and"
1425 " X64CpuidExit hr=%08lx", hr
);
1430 UINT32 cpuidExitList
[] = {1, 0x80000001};
1431 hr
= whp_dispatch
.WHvSetPartitionProperty(
1433 WHvPartitionPropertyCodeCpuidExitList
,
1435 RTL_NUMBER_OF(cpuidExitList
) * sizeof(UINT32
));
1438 error_report("WHPX: Failed to set partition CpuidExitList hr=%08lx",
1444 hr
= whp_dispatch
.WHvSetupPartition(whpx
->partition
);
1446 error_report("WHPX: Failed to setup partition, hr=%08lx", hr
);
1453 cpu_interrupt_handler
= whpx_handle_interrupt
;
1455 printf("Windows Hypervisor Platform accelerator is operational\n");
1460 if (NULL
!= whpx
->partition
) {
1461 whp_dispatch
.WHvDeletePartition(whpx
->partition
);
1462 whpx
->partition
= NULL
;
1469 int whpx_enabled(void)
1471 return whpx_allowed
;
1474 static void whpx_accel_class_init(ObjectClass
*oc
, void *data
)
1476 AccelClass
*ac
= ACCEL_CLASS(oc
);
1478 ac
->init_machine
= whpx_accel_init
;
1479 ac
->allowed
= &whpx_allowed
;
1482 static const TypeInfo whpx_accel_type
= {
1483 .name
= ACCEL_CLASS_NAME("whpx"),
1484 .parent
= TYPE_ACCEL
,
1485 .class_init
= whpx_accel_class_init
,
1488 static void whpx_type_init(void)
1490 type_register_static(&whpx_accel_type
);
1493 bool init_whp_dispatch(void)
1495 const char *lib_name
;
1498 if (whp_dispatch_initialized
) {
1502 #define WHP_LOAD_FIELD(return_type, function_name, signature) \
1503 whp_dispatch.function_name = \
1504 (function_name ## _t)GetProcAddress(hLib, #function_name); \
1505 if (!whp_dispatch.function_name) { \
1506 error_report("Could not load function %s from library %s.", \
1507 #function_name, lib_name); \
1511 lib_name = "WinHvPlatform.dll";
1512 hWinHvPlatform
= LoadLibrary(lib_name
);
1513 if (!hWinHvPlatform
) {
1514 error_report("Could not load library %s.", lib_name
);
1517 hLib
= hWinHvPlatform
;
1518 LIST_WINHVPLATFORM_FUNCTIONS(WHP_LOAD_FIELD
)
1520 lib_name
= "WinHvEmulation.dll";
1521 hWinHvEmulation
= LoadLibrary(lib_name
);
1522 if (!hWinHvEmulation
) {
1523 error_report("Could not load library %s.", lib_name
);
1526 hLib
= hWinHvEmulation
;
1527 LIST_WINHVEMULATION_FUNCTIONS(WHP_LOAD_FIELD
)
1529 whp_dispatch_initialized
= true;
1534 if (hWinHvPlatform
) {
1535 FreeLibrary(hWinHvPlatform
);
1537 if (hWinHvEmulation
) {
1538 FreeLibrary(hWinHvEmulation
);
1543 type_init(whpx_type_init
);