2 * QEMU Windows Hypervisor Platform accelerator (WHPX)
4 * Copyright Microsoft Corp. 2017
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
13 #include "exec/address-spaces.h"
14 #include "exec/ioport.h"
15 #include "qemu-common.h"
16 #include "sysemu/accel.h"
17 #include "sysemu/whpx.h"
18 #include "sysemu/sysemu.h"
19 #include "sysemu/cpus.h"
20 #include "qemu/main-loop.h"
21 #include "hw/boards.h"
22 #include "qemu/error-report.h"
23 #include "qemu/queue.h"
24 #include "qapi/error.h"
25 #include "migration/blocker.h"
26 #include "whp-dispatch.h"
28 #include <WinHvPlatform.h>
29 #include <WinHvEmulation.h>
33 WHV_PARTITION_HANDLE partition
;
36 static const WHV_REGISTER_NAME whpx_register_names
[] = {
38 /* X64 General purpose registers */
58 /* X64 Segment registers */
68 /* X64 Table registers */
72 /* X64 Control Registers */
79 /* X64 Debug Registers */
89 /* X64 Floating Point and Vector Registers */
106 WHvX64RegisterFpMmx0
,
107 WHvX64RegisterFpMmx1
,
108 WHvX64RegisterFpMmx2
,
109 WHvX64RegisterFpMmx3
,
110 WHvX64RegisterFpMmx4
,
111 WHvX64RegisterFpMmx5
,
112 WHvX64RegisterFpMmx6
,
113 WHvX64RegisterFpMmx7
,
114 WHvX64RegisterFpControlStatus
,
115 WHvX64RegisterXmmControlStatus
,
121 WHvX64RegisterKernelGsBase
,
123 WHvX64RegisterApicBase
,
124 /* WHvX64RegisterPat, */
125 WHvX64RegisterSysenterCs
,
126 WHvX64RegisterSysenterEip
,
127 WHvX64RegisterSysenterEsp
,
132 WHvX64RegisterSfmask
,
135 /* Interrupt / Event Registers */
137 * WHvRegisterPendingInterruption,
138 * WHvRegisterInterruptState,
139 * WHvRegisterPendingEvent0,
140 * WHvRegisterPendingEvent1
141 * WHvX64RegisterDeliverabilityNotifications,
145 struct whpx_register_set
{
146 WHV_REGISTER_VALUE values
[RTL_NUMBER_OF(whpx_register_names
)];
150 WHV_EMULATOR_HANDLE emulator
;
151 bool window_registered
;
155 bool interruption_pending
;
157 /* Must be the last field as it may have a tail */
158 WHV_RUN_VP_EXIT_CONTEXT exit_ctx
;
161 static bool whpx_allowed
;
162 static bool whp_dispatch_initialized
;
163 static HMODULE hWinHvPlatform
, hWinHvEmulation
;
165 struct whpx_state whpx_global
;
166 struct WHPDispatch whp_dispatch
;
173 static struct whpx_vcpu
*get_whpx_vcpu(CPUState
*cpu
)
175 return (struct whpx_vcpu
*)cpu
->hax_vcpu
;
178 static WHV_X64_SEGMENT_REGISTER
whpx_seg_q2h(const SegmentCache
*qs
, int v86
,
181 WHV_X64_SEGMENT_REGISTER hs
;
182 unsigned flags
= qs
->flags
;
185 hs
.Limit
= qs
->limit
;
186 hs
.Selector
= qs
->selector
;
192 hs
.DescriptorPrivilegeLevel
= 3;
193 hs
.NonSystemSegment
= 1;
196 hs
.Attributes
= (flags
>> DESC_TYPE_SHIFT
);
199 /* hs.Base &= 0xfffff; */
206 static SegmentCache
whpx_seg_h2q(const WHV_X64_SEGMENT_REGISTER
*hs
)
211 qs
.limit
= hs
->Limit
;
212 qs
.selector
= hs
->Selector
;
214 qs
.flags
= ((uint32_t)hs
->Attributes
) << DESC_TYPE_SHIFT
;
219 static void whpx_set_registers(CPUState
*cpu
)
221 struct whpx_state
*whpx
= &whpx_global
;
222 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
223 struct CPUX86State
*env
= (CPUArchState
*)(cpu
->env_ptr
);
224 X86CPU
*x86_cpu
= X86_CPU(cpu
);
225 struct whpx_register_set vcxt
;
232 assert(cpu_is_stopped(cpu
) || qemu_cpu_is_self(cpu
));
234 memset(&vcxt
, 0, sizeof(struct whpx_register_set
));
236 v86
= (env
->eflags
& VM_MASK
);
237 r86
= !(env
->cr
[0] & CR0_PE_MASK
);
239 vcpu
->tpr
= cpu_get_apic_tpr(x86_cpu
->apic_state
);
240 vcpu
->apic_base
= cpu_get_apic_base(x86_cpu
->apic_state
);
244 /* Indexes for first 16 registers match between HV and QEMU definitions */
246 for (idx
= 0; idx
< CPU_NB_REGS
; idx
+= 1) {
247 vcxt
.values
[idx
].Reg64
= (uint64_t)env
->regs
[idx
];
251 /* Same goes for RIP and RFLAGS */
252 assert(whpx_register_names
[idx
] == WHvX64RegisterRip
);
253 vcxt
.values
[idx
++].Reg64
= env
->eip
;
255 assert(whpx_register_names
[idx
] == WHvX64RegisterRflags
);
256 vcxt
.values
[idx
++].Reg64
= env
->eflags
;
258 /* Translate 6+4 segment registers. HV and QEMU order matches */
259 assert(idx
== WHvX64RegisterEs
);
260 for (i
= 0; i
< 6; i
+= 1, idx
+= 1) {
261 vcxt
.values
[idx
].Segment
= whpx_seg_q2h(&env
->segs
[i
], v86
, r86
);
264 assert(idx
== WHvX64RegisterLdtr
);
265 vcxt
.values
[idx
++].Segment
= whpx_seg_q2h(&env
->ldt
, 0, 0);
267 assert(idx
== WHvX64RegisterTr
);
268 vcxt
.values
[idx
++].Segment
= whpx_seg_q2h(&env
->tr
, 0, 0);
270 assert(idx
== WHvX64RegisterIdtr
);
271 vcxt
.values
[idx
].Table
.Base
= env
->idt
.base
;
272 vcxt
.values
[idx
].Table
.Limit
= env
->idt
.limit
;
275 assert(idx
== WHvX64RegisterGdtr
);
276 vcxt
.values
[idx
].Table
.Base
= env
->gdt
.base
;
277 vcxt
.values
[idx
].Table
.Limit
= env
->gdt
.limit
;
280 /* CR0, 2, 3, 4, 8 */
281 assert(whpx_register_names
[idx
] == WHvX64RegisterCr0
);
282 vcxt
.values
[idx
++].Reg64
= env
->cr
[0];
283 assert(whpx_register_names
[idx
] == WHvX64RegisterCr2
);
284 vcxt
.values
[idx
++].Reg64
= env
->cr
[2];
285 assert(whpx_register_names
[idx
] == WHvX64RegisterCr3
);
286 vcxt
.values
[idx
++].Reg64
= env
->cr
[3];
287 assert(whpx_register_names
[idx
] == WHvX64RegisterCr4
);
288 vcxt
.values
[idx
++].Reg64
= env
->cr
[4];
289 assert(whpx_register_names
[idx
] == WHvX64RegisterCr8
);
290 vcxt
.values
[idx
++].Reg64
= vcpu
->tpr
;
292 /* 8 Debug Registers - Skipped */
294 /* 16 XMM registers */
295 assert(whpx_register_names
[idx
] == WHvX64RegisterXmm0
);
297 for (i
= 0; i
< sizeof(env
->xmm_regs
) / sizeof(ZMMReg
); i
+= 1, idx
+= 1) {
298 vcxt
.values
[idx
].Reg128
.Low64
= env
->xmm_regs
[i
].ZMM_Q(0);
299 vcxt
.values
[idx
].Reg128
.High64
= env
->xmm_regs
[i
].ZMM_Q(1);
304 assert(whpx_register_names
[idx
] == WHvX64RegisterFpMmx0
);
305 for (i
= 0; i
< 8; i
+= 1, idx
+= 1) {
306 vcxt
.values
[idx
].Fp
.AsUINT128
.Low64
= env
->fpregs
[i
].mmx
.MMX_Q(0);
307 /* vcxt.values[idx].Fp.AsUINT128.High64 =
308 env->fpregs[i].mmx.MMX_Q(1);
312 /* FP control status register */
313 assert(whpx_register_names
[idx
] == WHvX64RegisterFpControlStatus
);
314 vcxt
.values
[idx
].FpControlStatus
.FpControl
= env
->fpuc
;
315 vcxt
.values
[idx
].FpControlStatus
.FpStatus
=
316 (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
317 vcxt
.values
[idx
].FpControlStatus
.FpTag
= 0;
318 for (i
= 0; i
< 8; ++i
) {
319 vcxt
.values
[idx
].FpControlStatus
.FpTag
|= (!env
->fptags
[i
]) << i
;
321 vcxt
.values
[idx
].FpControlStatus
.Reserved
= 0;
322 vcxt
.values
[idx
].FpControlStatus
.LastFpOp
= env
->fpop
;
323 vcxt
.values
[idx
].FpControlStatus
.LastFpRip
= env
->fpip
;
326 /* XMM control status register */
327 assert(whpx_register_names
[idx
] == WHvX64RegisterXmmControlStatus
);
328 vcxt
.values
[idx
].XmmControlStatus
.LastFpRdp
= 0;
329 vcxt
.values
[idx
].XmmControlStatus
.XmmStatusControl
= env
->mxcsr
;
330 vcxt
.values
[idx
].XmmControlStatus
.XmmStatusControlMask
= 0x0000ffff;
334 assert(whpx_register_names
[idx
] == WHvX64RegisterTsc
);
335 vcxt
.values
[idx
++].Reg64
= env
->tsc
;
336 assert(whpx_register_names
[idx
] == WHvX64RegisterEfer
);
337 vcxt
.values
[idx
++].Reg64
= env
->efer
;
339 assert(whpx_register_names
[idx
] == WHvX64RegisterKernelGsBase
);
340 vcxt
.values
[idx
++].Reg64
= env
->kernelgsbase
;
343 assert(whpx_register_names
[idx
] == WHvX64RegisterApicBase
);
344 vcxt
.values
[idx
++].Reg64
= vcpu
->apic_base
;
346 /* WHvX64RegisterPat - Skipped */
348 assert(whpx_register_names
[idx
] == WHvX64RegisterSysenterCs
);
349 vcxt
.values
[idx
++].Reg64
= env
->sysenter_cs
;
350 assert(whpx_register_names
[idx
] == WHvX64RegisterSysenterEip
);
351 vcxt
.values
[idx
++].Reg64
= env
->sysenter_eip
;
352 assert(whpx_register_names
[idx
] == WHvX64RegisterSysenterEsp
);
353 vcxt
.values
[idx
++].Reg64
= env
->sysenter_esp
;
354 assert(whpx_register_names
[idx
] == WHvX64RegisterStar
);
355 vcxt
.values
[idx
++].Reg64
= env
->star
;
357 assert(whpx_register_names
[idx
] == WHvX64RegisterLstar
);
358 vcxt
.values
[idx
++].Reg64
= env
->lstar
;
359 assert(whpx_register_names
[idx
] == WHvX64RegisterCstar
);
360 vcxt
.values
[idx
++].Reg64
= env
->cstar
;
361 assert(whpx_register_names
[idx
] == WHvX64RegisterSfmask
);
362 vcxt
.values
[idx
++].Reg64
= env
->fmask
;
365 /* Interrupt / Event Registers - Skipped */
367 assert(idx
== RTL_NUMBER_OF(whpx_register_names
));
369 hr
= whp_dispatch
.WHvSetVirtualProcessorRegisters(
370 whpx
->partition
, cpu
->cpu_index
,
372 RTL_NUMBER_OF(whpx_register_names
),
376 error_report("WHPX: Failed to set virtual processor context, hr=%08lx",
383 static void whpx_get_registers(CPUState
*cpu
)
385 struct whpx_state
*whpx
= &whpx_global
;
386 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
387 struct CPUX86State
*env
= (CPUArchState
*)(cpu
->env_ptr
);
388 X86CPU
*x86_cpu
= X86_CPU(cpu
);
389 struct whpx_register_set vcxt
;
390 uint64_t tpr
, apic_base
;
396 assert(cpu_is_stopped(cpu
) || qemu_cpu_is_self(cpu
));
398 hr
= whp_dispatch
.WHvGetVirtualProcessorRegisters(
399 whpx
->partition
, cpu
->cpu_index
,
401 RTL_NUMBER_OF(whpx_register_names
),
404 error_report("WHPX: Failed to get virtual processor context, hr=%08lx",
410 /* Indexes for first 16 registers match between HV and QEMU definitions */
412 for (idx
= 0; idx
< CPU_NB_REGS
; idx
+= 1) {
413 env
->regs
[idx
] = vcxt
.values
[idx
].Reg64
;
417 /* Same goes for RIP and RFLAGS */
418 assert(whpx_register_names
[idx
] == WHvX64RegisterRip
);
419 env
->eip
= vcxt
.values
[idx
++].Reg64
;
420 assert(whpx_register_names
[idx
] == WHvX64RegisterRflags
);
421 env
->eflags
= vcxt
.values
[idx
++].Reg64
;
423 /* Translate 6+4 segment registers. HV and QEMU order matches */
424 assert(idx
== WHvX64RegisterEs
);
425 for (i
= 0; i
< 6; i
+= 1, idx
+= 1) {
426 env
->segs
[i
] = whpx_seg_h2q(&vcxt
.values
[idx
].Segment
);
429 assert(idx
== WHvX64RegisterLdtr
);
430 env
->ldt
= whpx_seg_h2q(&vcxt
.values
[idx
++].Segment
);
431 assert(idx
== WHvX64RegisterTr
);
432 env
->tr
= whpx_seg_h2q(&vcxt
.values
[idx
++].Segment
);
433 assert(idx
== WHvX64RegisterIdtr
);
434 env
->idt
.base
= vcxt
.values
[idx
].Table
.Base
;
435 env
->idt
.limit
= vcxt
.values
[idx
].Table
.Limit
;
437 assert(idx
== WHvX64RegisterGdtr
);
438 env
->gdt
.base
= vcxt
.values
[idx
].Table
.Base
;
439 env
->gdt
.limit
= vcxt
.values
[idx
].Table
.Limit
;
442 /* CR0, 2, 3, 4, 8 */
443 assert(whpx_register_names
[idx
] == WHvX64RegisterCr0
);
444 env
->cr
[0] = vcxt
.values
[idx
++].Reg64
;
445 assert(whpx_register_names
[idx
] == WHvX64RegisterCr2
);
446 env
->cr
[2] = vcxt
.values
[idx
++].Reg64
;
447 assert(whpx_register_names
[idx
] == WHvX64RegisterCr3
);
448 env
->cr
[3] = vcxt
.values
[idx
++].Reg64
;
449 assert(whpx_register_names
[idx
] == WHvX64RegisterCr4
);
450 env
->cr
[4] = vcxt
.values
[idx
++].Reg64
;
451 assert(whpx_register_names
[idx
] == WHvX64RegisterCr8
);
452 tpr
= vcxt
.values
[idx
++].Reg64
;
453 if (tpr
!= vcpu
->tpr
) {
455 cpu_set_apic_tpr(x86_cpu
->apic_state
, tpr
);
458 /* 8 Debug Registers - Skipped */
460 /* 16 XMM registers */
461 assert(whpx_register_names
[idx
] == WHvX64RegisterXmm0
);
463 for (i
= 0; i
< sizeof(env
->xmm_regs
) / sizeof(ZMMReg
); i
+= 1, idx
+= 1) {
464 env
->xmm_regs
[i
].ZMM_Q(0) = vcxt
.values
[idx
].Reg128
.Low64
;
465 env
->xmm_regs
[i
].ZMM_Q(1) = vcxt
.values
[idx
].Reg128
.High64
;
470 assert(whpx_register_names
[idx
] == WHvX64RegisterFpMmx0
);
471 for (i
= 0; i
< 8; i
+= 1, idx
+= 1) {
472 env
->fpregs
[i
].mmx
.MMX_Q(0) = vcxt
.values
[idx
].Fp
.AsUINT128
.Low64
;
473 /* env->fpregs[i].mmx.MMX_Q(1) =
474 vcxt.values[idx].Fp.AsUINT128.High64;
478 /* FP control status register */
479 assert(whpx_register_names
[idx
] == WHvX64RegisterFpControlStatus
);
480 env
->fpuc
= vcxt
.values
[idx
].FpControlStatus
.FpControl
;
481 env
->fpstt
= (vcxt
.values
[idx
].FpControlStatus
.FpStatus
>> 11) & 0x7;
482 env
->fpus
= vcxt
.values
[idx
].FpControlStatus
.FpStatus
& ~0x3800;
483 for (i
= 0; i
< 8; ++i
) {
484 env
->fptags
[i
] = !((vcxt
.values
[idx
].FpControlStatus
.FpTag
>> i
) & 1);
486 env
->fpop
= vcxt
.values
[idx
].FpControlStatus
.LastFpOp
;
487 env
->fpip
= vcxt
.values
[idx
].FpControlStatus
.LastFpRip
;
490 /* XMM control status register */
491 assert(whpx_register_names
[idx
] == WHvX64RegisterXmmControlStatus
);
492 env
->mxcsr
= vcxt
.values
[idx
].XmmControlStatus
.XmmStatusControl
;
496 assert(whpx_register_names
[idx
] == WHvX64RegisterTsc
);
497 env
->tsc
= vcxt
.values
[idx
++].Reg64
;
498 assert(whpx_register_names
[idx
] == WHvX64RegisterEfer
);
499 env
->efer
= vcxt
.values
[idx
++].Reg64
;
501 assert(whpx_register_names
[idx
] == WHvX64RegisterKernelGsBase
);
502 env
->kernelgsbase
= vcxt
.values
[idx
++].Reg64
;
505 assert(whpx_register_names
[idx
] == WHvX64RegisterApicBase
);
506 apic_base
= vcxt
.values
[idx
++].Reg64
;
507 if (apic_base
!= vcpu
->apic_base
) {
508 vcpu
->apic_base
= apic_base
;
509 cpu_set_apic_base(x86_cpu
->apic_state
, vcpu
->apic_base
);
512 /* WHvX64RegisterPat - Skipped */
514 assert(whpx_register_names
[idx
] == WHvX64RegisterSysenterCs
);
515 env
->sysenter_cs
= vcxt
.values
[idx
++].Reg64
;;
516 assert(whpx_register_names
[idx
] == WHvX64RegisterSysenterEip
);
517 env
->sysenter_eip
= vcxt
.values
[idx
++].Reg64
;
518 assert(whpx_register_names
[idx
] == WHvX64RegisterSysenterEsp
);
519 env
->sysenter_esp
= vcxt
.values
[idx
++].Reg64
;
520 assert(whpx_register_names
[idx
] == WHvX64RegisterStar
);
521 env
->star
= vcxt
.values
[idx
++].Reg64
;
523 assert(whpx_register_names
[idx
] == WHvX64RegisterLstar
);
524 env
->lstar
= vcxt
.values
[idx
++].Reg64
;
525 assert(whpx_register_names
[idx
] == WHvX64RegisterCstar
);
526 env
->cstar
= vcxt
.values
[idx
++].Reg64
;
527 assert(whpx_register_names
[idx
] == WHvX64RegisterSfmask
);
528 env
->fmask
= vcxt
.values
[idx
++].Reg64
;
531 /* Interrupt / Event Registers - Skipped */
533 assert(idx
== RTL_NUMBER_OF(whpx_register_names
));
538 static HRESULT CALLBACK
whpx_emu_ioport_callback(
540 WHV_EMULATOR_IO_ACCESS_INFO
*IoAccess
)
542 MemTxAttrs attrs
= { 0 };
543 address_space_rw(&address_space_io
, IoAccess
->Port
, attrs
,
544 (uint8_t *)&IoAccess
->Data
, IoAccess
->AccessSize
,
545 IoAccess
->Direction
);
549 static HRESULT CALLBACK
whpx_emu_mmio_callback(
551 WHV_EMULATOR_MEMORY_ACCESS_INFO
*ma
)
553 cpu_physical_memory_rw(ma
->GpaAddress
, ma
->Data
, ma
->AccessSize
,
558 static HRESULT CALLBACK
whpx_emu_getreg_callback(
560 const WHV_REGISTER_NAME
*RegisterNames
,
561 UINT32 RegisterCount
,
562 WHV_REGISTER_VALUE
*RegisterValues
)
565 struct whpx_state
*whpx
= &whpx_global
;
566 CPUState
*cpu
= (CPUState
*)ctx
;
568 hr
= whp_dispatch
.WHvGetVirtualProcessorRegisters(
569 whpx
->partition
, cpu
->cpu_index
,
570 RegisterNames
, RegisterCount
,
573 error_report("WHPX: Failed to get virtual processor registers,"
580 static HRESULT CALLBACK
whpx_emu_setreg_callback(
582 const WHV_REGISTER_NAME
*RegisterNames
,
583 UINT32 RegisterCount
,
584 const WHV_REGISTER_VALUE
*RegisterValues
)
587 struct whpx_state
*whpx
= &whpx_global
;
588 CPUState
*cpu
= (CPUState
*)ctx
;
590 hr
= whp_dispatch
.WHvSetVirtualProcessorRegisters(
591 whpx
->partition
, cpu
->cpu_index
,
592 RegisterNames
, RegisterCount
,
595 error_report("WHPX: Failed to set virtual processor registers,"
600 * The emulator just successfully wrote the register state. We clear the
601 * dirty state so we avoid the double write on resume of the VP.
603 cpu
->vcpu_dirty
= false;
608 static HRESULT CALLBACK
whpx_emu_translate_callback(
610 WHV_GUEST_VIRTUAL_ADDRESS Gva
,
611 WHV_TRANSLATE_GVA_FLAGS TranslateFlags
,
612 WHV_TRANSLATE_GVA_RESULT_CODE
*TranslationResult
,
613 WHV_GUEST_PHYSICAL_ADDRESS
*Gpa
)
616 struct whpx_state
*whpx
= &whpx_global
;
617 CPUState
*cpu
= (CPUState
*)ctx
;
618 WHV_TRANSLATE_GVA_RESULT res
;
620 hr
= whp_dispatch
.WHvTranslateGva(whpx
->partition
, cpu
->cpu_index
,
621 Gva
, TranslateFlags
, &res
, Gpa
);
623 error_report("WHPX: Failed to translate GVA, hr=%08lx", hr
);
625 *TranslationResult
= res
.ResultCode
;
631 static const WHV_EMULATOR_CALLBACKS whpx_emu_callbacks
= {
632 .Size
= sizeof(WHV_EMULATOR_CALLBACKS
),
633 .WHvEmulatorIoPortCallback
= whpx_emu_ioport_callback
,
634 .WHvEmulatorMemoryCallback
= whpx_emu_mmio_callback
,
635 .WHvEmulatorGetVirtualProcessorRegisters
= whpx_emu_getreg_callback
,
636 .WHvEmulatorSetVirtualProcessorRegisters
= whpx_emu_setreg_callback
,
637 .WHvEmulatorTranslateGvaPage
= whpx_emu_translate_callback
,
640 static int whpx_handle_mmio(CPUState
*cpu
, WHV_MEMORY_ACCESS_CONTEXT
*ctx
)
643 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
644 WHV_EMULATOR_STATUS emu_status
;
646 hr
= whp_dispatch
.WHvEmulatorTryMmioEmulation(
648 &vcpu
->exit_ctx
.VpContext
, ctx
,
651 error_report("WHPX: Failed to parse MMIO access, hr=%08lx", hr
);
655 if (!emu_status
.EmulationSuccessful
) {
656 error_report("WHPX: Failed to emulate MMIO access with"
657 " EmulatorReturnStatus: %u", emu_status
.AsUINT32
);
664 static int whpx_handle_portio(CPUState
*cpu
,
665 WHV_X64_IO_PORT_ACCESS_CONTEXT
*ctx
)
668 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
669 WHV_EMULATOR_STATUS emu_status
;
671 hr
= whp_dispatch
.WHvEmulatorTryIoEmulation(
673 &vcpu
->exit_ctx
.VpContext
, ctx
,
676 error_report("WHPX: Failed to parse PortIO access, hr=%08lx", hr
);
680 if (!emu_status
.EmulationSuccessful
) {
681 error_report("WHPX: Failed to emulate PortIO access with"
682 " EmulatorReturnStatus: %u", emu_status
.AsUINT32
);
689 static int whpx_handle_halt(CPUState
*cpu
)
691 struct CPUX86State
*env
= (CPUArchState
*)(cpu
->env_ptr
);
694 qemu_mutex_lock_iothread();
695 if (!((cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
696 (env
->eflags
& IF_MASK
)) &&
697 !(cpu
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
698 cpu
->exception_index
= EXCP_HLT
;
702 qemu_mutex_unlock_iothread();
707 static void whpx_vcpu_pre_run(CPUState
*cpu
)
710 struct whpx_state
*whpx
= &whpx_global
;
711 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
712 struct CPUX86State
*env
= (CPUArchState
*)(cpu
->env_ptr
);
713 X86CPU
*x86_cpu
= X86_CPU(cpu
);
716 WHV_X64_PENDING_INTERRUPTION_REGISTER new_int
;
717 UINT32 reg_count
= 0;
718 WHV_REGISTER_VALUE reg_values
[3];
719 WHV_REGISTER_NAME reg_names
[3];
721 memset(&new_int
, 0, sizeof(new_int
));
722 memset(reg_values
, 0, sizeof(reg_values
));
724 qemu_mutex_lock_iothread();
727 if (!vcpu
->interruption_pending
&&
728 cpu
->interrupt_request
& (CPU_INTERRUPT_NMI
| CPU_INTERRUPT_SMI
)) {
729 if (cpu
->interrupt_request
& CPU_INTERRUPT_NMI
) {
730 cpu
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
731 vcpu
->interruptable
= false;
732 new_int
.InterruptionType
= WHvX64PendingNmi
;
733 new_int
.InterruptionPending
= 1;
734 new_int
.InterruptionVector
= 2;
736 if (cpu
->interrupt_request
& CPU_INTERRUPT_SMI
) {
737 cpu
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
742 * Force the VCPU out of its inner loop to process any INIT requests or
743 * commit pending TPR access.
745 if (cpu
->interrupt_request
& (CPU_INTERRUPT_INIT
| CPU_INTERRUPT_TPR
)) {
746 if ((cpu
->interrupt_request
& CPU_INTERRUPT_INIT
) &&
747 !(env
->hflags
& HF_SMM_MASK
)) {
748 cpu
->exit_request
= 1;
750 if (cpu
->interrupt_request
& CPU_INTERRUPT_TPR
) {
751 cpu
->exit_request
= 1;
755 /* Get pending hard interruption or replay one that was overwritten */
756 if (!vcpu
->interruption_pending
&&
757 vcpu
->interruptable
&& (env
->eflags
& IF_MASK
)) {
758 assert(!new_int
.InterruptionPending
);
759 if (cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) {
760 cpu
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
761 irq
= cpu_get_pic_interrupt(env
);
763 new_int
.InterruptionType
= WHvX64PendingInterrupt
;
764 new_int
.InterruptionPending
= 1;
765 new_int
.InterruptionVector
= irq
;
770 /* Setup interrupt state if new one was prepared */
771 if (new_int
.InterruptionPending
) {
772 reg_values
[reg_count
].PendingInterruption
= new_int
;
773 reg_names
[reg_count
] = WHvRegisterPendingInterruption
;
777 /* Sync the TPR to the CR8 if was modified during the intercept */
778 tpr
= cpu_get_apic_tpr(x86_cpu
->apic_state
);
779 if (tpr
!= vcpu
->tpr
) {
781 reg_values
[reg_count
].Reg64
= tpr
;
782 cpu
->exit_request
= 1;
783 reg_names
[reg_count
] = WHvX64RegisterCr8
;
787 /* Update the state of the interrupt delivery notification */
788 if (!vcpu
->window_registered
&&
789 cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) {
790 reg_values
[reg_count
].DeliverabilityNotifications
.InterruptNotification
792 vcpu
->window_registered
= 1;
793 reg_names
[reg_count
] = WHvX64RegisterDeliverabilityNotifications
;
797 qemu_mutex_unlock_iothread();
800 hr
= whp_dispatch
.WHvSetVirtualProcessorRegisters(
801 whpx
->partition
, cpu
->cpu_index
,
802 reg_names
, reg_count
, reg_values
);
804 error_report("WHPX: Failed to set interrupt state registers,"
812 static void whpx_vcpu_post_run(CPUState
*cpu
)
814 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
815 struct CPUX86State
*env
= (CPUArchState
*)(cpu
->env_ptr
);
816 X86CPU
*x86_cpu
= X86_CPU(cpu
);
818 env
->eflags
= vcpu
->exit_ctx
.VpContext
.Rflags
;
820 uint64_t tpr
= vcpu
->exit_ctx
.VpContext
.Cr8
;
821 if (vcpu
->tpr
!= tpr
) {
823 qemu_mutex_lock_iothread();
824 cpu_set_apic_tpr(x86_cpu
->apic_state
, vcpu
->tpr
);
825 qemu_mutex_unlock_iothread();
828 vcpu
->interruption_pending
=
829 vcpu
->exit_ctx
.VpContext
.ExecutionState
.InterruptionPending
;
831 vcpu
->interruptable
=
832 !vcpu
->exit_ctx
.VpContext
.ExecutionState
.InterruptShadow
;
837 static void whpx_vcpu_process_async_events(CPUState
*cpu
)
839 struct CPUX86State
*env
= (CPUArchState
*)(cpu
->env_ptr
);
840 X86CPU
*x86_cpu
= X86_CPU(cpu
);
841 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
843 if ((cpu
->interrupt_request
& CPU_INTERRUPT_INIT
) &&
844 !(env
->hflags
& HF_SMM_MASK
)) {
846 do_cpu_init(x86_cpu
);
847 cpu
->vcpu_dirty
= true;
848 vcpu
->interruptable
= true;
851 if (cpu
->interrupt_request
& CPU_INTERRUPT_POLL
) {
852 cpu
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
853 apic_poll_irq(x86_cpu
->apic_state
);
856 if (((cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
857 (env
->eflags
& IF_MASK
)) ||
858 (cpu
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
862 if (cpu
->interrupt_request
& CPU_INTERRUPT_SIPI
) {
863 if (!cpu
->vcpu_dirty
) {
864 whpx_get_registers(cpu
);
866 do_cpu_sipi(x86_cpu
);
869 if (cpu
->interrupt_request
& CPU_INTERRUPT_TPR
) {
870 cpu
->interrupt_request
&= ~CPU_INTERRUPT_TPR
;
871 if (!cpu
->vcpu_dirty
) {
872 whpx_get_registers(cpu
);
874 apic_handle_tpr_access_report(x86_cpu
->apic_state
, env
->eip
,
875 env
->tpr_access_type
);
881 static int whpx_vcpu_run(CPUState
*cpu
)
884 struct whpx_state
*whpx
= &whpx_global
;
885 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
888 whpx_vcpu_process_async_events(cpu
);
890 cpu
->exception_index
= EXCP_HLT
;
891 atomic_set(&cpu
->exit_request
, false);
895 qemu_mutex_unlock_iothread();
899 if (cpu
->vcpu_dirty
) {
900 whpx_set_registers(cpu
);
901 cpu
->vcpu_dirty
= false;
904 whpx_vcpu_pre_run(cpu
);
906 if (atomic_read(&cpu
->exit_request
)) {
910 hr
= whp_dispatch
.WHvRunVirtualProcessor(
911 whpx
->partition
, cpu
->cpu_index
,
912 &vcpu
->exit_ctx
, sizeof(vcpu
->exit_ctx
));
915 error_report("WHPX: Failed to exec a virtual processor,"
921 whpx_vcpu_post_run(cpu
);
923 switch (vcpu
->exit_ctx
.ExitReason
) {
924 case WHvRunVpExitReasonMemoryAccess
:
925 ret
= whpx_handle_mmio(cpu
, &vcpu
->exit_ctx
.MemoryAccess
);
928 case WHvRunVpExitReasonX64IoPortAccess
:
929 ret
= whpx_handle_portio(cpu
, &vcpu
->exit_ctx
.IoPortAccess
);
932 case WHvRunVpExitReasonX64InterruptWindow
:
933 vcpu
->window_registered
= 0;
937 case WHvRunVpExitReasonX64Halt
:
938 ret
= whpx_handle_halt(cpu
);
941 case WHvRunVpExitReasonCanceled
:
942 cpu
->exception_index
= EXCP_INTERRUPT
;
946 case WHvRunVpExitReasonX64MsrAccess
: {
947 WHV_REGISTER_VALUE reg_values
[3] = {0};
948 WHV_REGISTER_NAME reg_names
[3];
951 reg_names
[0] = WHvX64RegisterRip
;
952 reg_names
[1] = WHvX64RegisterRax
;
953 reg_names
[2] = WHvX64RegisterRdx
;
955 reg_values
[0].Reg64
=
956 vcpu
->exit_ctx
.VpContext
.Rip
+
957 vcpu
->exit_ctx
.VpContext
.InstructionLength
;
960 * For all unsupported MSR access we:
964 reg_count
= vcpu
->exit_ctx
.MsrAccess
.AccessInfo
.IsWrite
?
967 hr
= whp_dispatch
.WHvSetVirtualProcessorRegisters(
970 reg_names
, reg_count
,
974 error_report("WHPX: Failed to set MsrAccess state "
975 " registers, hr=%08lx", hr
);
980 case WHvRunVpExitReasonX64Cpuid
: {
981 WHV_REGISTER_VALUE reg_values
[5];
982 WHV_REGISTER_NAME reg_names
[5];
983 UINT32 reg_count
= 5;
984 UINT64 rip
, rax
, rcx
, rdx
, rbx
;
986 memset(reg_values
, 0, sizeof(reg_values
));
988 rip
= vcpu
->exit_ctx
.VpContext
.Rip
+
989 vcpu
->exit_ctx
.VpContext
.InstructionLength
;
990 switch (vcpu
->exit_ctx
.CpuidAccess
.Rax
) {
992 rax
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRax
;
993 /* Advertise that we are running on a hypervisor */
995 vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRcx
|
996 CPUID_EXT_HYPERVISOR
;
998 rdx
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRdx
;
999 rbx
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRbx
;
1002 rax
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRax
;
1003 /* Remove any support of OSVW */
1005 vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRcx
&
1008 rdx
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRdx
;
1009 rbx
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRbx
;
1012 rax
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRax
;
1013 rcx
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRcx
;
1014 rdx
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRdx
;
1015 rbx
= vcpu
->exit_ctx
.CpuidAccess
.DefaultResultRbx
;
1018 reg_names
[0] = WHvX64RegisterRip
;
1019 reg_names
[1] = WHvX64RegisterRax
;
1020 reg_names
[2] = WHvX64RegisterRcx
;
1021 reg_names
[3] = WHvX64RegisterRdx
;
1022 reg_names
[4] = WHvX64RegisterRbx
;
1024 reg_values
[0].Reg64
= rip
;
1025 reg_values
[1].Reg64
= rax
;
1026 reg_values
[2].Reg64
= rcx
;
1027 reg_values
[3].Reg64
= rdx
;
1028 reg_values
[4].Reg64
= rbx
;
1030 hr
= whp_dispatch
.WHvSetVirtualProcessorRegisters(
1031 whpx
->partition
, cpu
->cpu_index
,
1037 error_report("WHPX: Failed to set CpuidAccess state registers,"
1043 case WHvRunVpExitReasonNone
:
1044 case WHvRunVpExitReasonUnrecoverableException
:
1045 case WHvRunVpExitReasonInvalidVpRegisterValue
:
1046 case WHvRunVpExitReasonUnsupportedFeature
:
1047 case WHvRunVpExitReasonException
:
1049 error_report("WHPX: Unexpected VP exit code %d",
1050 vcpu
->exit_ctx
.ExitReason
);
1051 whpx_get_registers(cpu
);
1052 qemu_mutex_lock_iothread();
1053 qemu_system_guest_panicked(cpu_get_crash_info(cpu
));
1054 qemu_mutex_unlock_iothread();
1061 qemu_mutex_lock_iothread();
1064 atomic_set(&cpu
->exit_request
, false);
1069 static void do_whpx_cpu_synchronize_state(CPUState
*cpu
, run_on_cpu_data arg
)
1071 whpx_get_registers(cpu
);
1072 cpu
->vcpu_dirty
= true;
1075 static void do_whpx_cpu_synchronize_post_reset(CPUState
*cpu
,
1076 run_on_cpu_data arg
)
1078 whpx_set_registers(cpu
);
1079 cpu
->vcpu_dirty
= false;
1082 static void do_whpx_cpu_synchronize_post_init(CPUState
*cpu
,
1083 run_on_cpu_data arg
)
1085 whpx_set_registers(cpu
);
1086 cpu
->vcpu_dirty
= false;
1089 static void do_whpx_cpu_synchronize_pre_loadvm(CPUState
*cpu
,
1090 run_on_cpu_data arg
)
1092 cpu
->vcpu_dirty
= true;
1099 void whpx_cpu_synchronize_state(CPUState
*cpu
)
1101 if (!cpu
->vcpu_dirty
) {
1102 run_on_cpu(cpu
, do_whpx_cpu_synchronize_state
, RUN_ON_CPU_NULL
);
1106 void whpx_cpu_synchronize_post_reset(CPUState
*cpu
)
1108 run_on_cpu(cpu
, do_whpx_cpu_synchronize_post_reset
, RUN_ON_CPU_NULL
);
1111 void whpx_cpu_synchronize_post_init(CPUState
*cpu
)
1113 run_on_cpu(cpu
, do_whpx_cpu_synchronize_post_init
, RUN_ON_CPU_NULL
);
1116 void whpx_cpu_synchronize_pre_loadvm(CPUState
*cpu
)
1118 run_on_cpu(cpu
, do_whpx_cpu_synchronize_pre_loadvm
, RUN_ON_CPU_NULL
);
1125 static Error
*whpx_migration_blocker
;
1127 int whpx_init_vcpu(CPUState
*cpu
)
1130 struct whpx_state
*whpx
= &whpx_global
;
1131 struct whpx_vcpu
*vcpu
;
1132 Error
*local_error
= NULL
;
1134 /* Add migration blockers for all unsupported features of the
1135 * Windows Hypervisor Platform
1137 if (whpx_migration_blocker
== NULL
) {
1138 error_setg(&whpx_migration_blocker
,
1139 "State blocked due to non-migratable CPUID feature support,"
1140 "dirty memory tracking support, and XSAVE/XRSTOR support");
1142 (void)migrate_add_blocker(whpx_migration_blocker
, &local_error
);
1144 error_report_err(local_error
);
1145 migrate_del_blocker(whpx_migration_blocker
);
1146 error_free(whpx_migration_blocker
);
1151 vcpu
= g_malloc0(sizeof(struct whpx_vcpu
));
1154 error_report("WHPX: Failed to allocte VCPU context.");
1158 hr
= whp_dispatch
.WHvEmulatorCreateEmulator(
1159 &whpx_emu_callbacks
,
1162 error_report("WHPX: Failed to setup instruction completion support,"
1168 hr
= whp_dispatch
.WHvCreateVirtualProcessor(
1169 whpx
->partition
, cpu
->cpu_index
, 0);
1171 error_report("WHPX: Failed to create a virtual processor,"
1173 whp_dispatch
.WHvEmulatorDestroyEmulator(vcpu
->emulator
);
1178 vcpu
->interruptable
= true;
1180 cpu
->vcpu_dirty
= true;
1181 cpu
->hax_vcpu
= (struct hax_vcpu_state
*)vcpu
;
1186 int whpx_vcpu_exec(CPUState
*cpu
)
1192 if (cpu
->exception_index
>= EXCP_INTERRUPT
) {
1193 ret
= cpu
->exception_index
;
1194 cpu
->exception_index
= -1;
1198 fatal
= whpx_vcpu_run(cpu
);
1201 error_report("WHPX: Failed to exec a virtual processor");
1209 void whpx_destroy_vcpu(CPUState
*cpu
)
1211 struct whpx_state
*whpx
= &whpx_global
;
1212 struct whpx_vcpu
*vcpu
= get_whpx_vcpu(cpu
);
1214 whp_dispatch
.WHvDeleteVirtualProcessor(whpx
->partition
, cpu
->cpu_index
);
1215 whp_dispatch
.WHvEmulatorDestroyEmulator(vcpu
->emulator
);
1216 g_free(cpu
->hax_vcpu
);
1220 void whpx_vcpu_kick(CPUState
*cpu
)
1222 struct whpx_state
*whpx
= &whpx_global
;
1223 whp_dispatch
.WHvCancelRunVirtualProcessor(
1224 whpx
->partition
, cpu
->cpu_index
, 0);
1231 static void whpx_update_mapping(hwaddr start_pa
, ram_addr_t size
,
1232 void *host_va
, int add
, int rom
,
1235 struct whpx_state
*whpx
= &whpx_global
;
1240 printf("WHPX: ADD PA:%p Size:%p, Host:%p, %s, '%s'\n",
1241 (void*)start_pa, (void*)size, host_va,
1242 (rom ? "ROM" : "RAM"), name);
1244 printf("WHPX: DEL PA:%p Size:%p, Host:%p, '%s'\n",
1245 (void*)start_pa, (void*)size, host_va, name);
1250 hr
= whp_dispatch
.WHvMapGpaRange(whpx
->partition
,
1254 (WHvMapGpaRangeFlagRead
|
1255 WHvMapGpaRangeFlagExecute
|
1256 (rom
? 0 : WHvMapGpaRangeFlagWrite
)));
1258 hr
= whp_dispatch
.WHvUnmapGpaRange(whpx
->partition
,
1264 error_report("WHPX: Failed to %s GPA range '%s' PA:%p, Size:%p bytes,"
1265 " Host:%p, hr=%08lx",
1266 (add
? "MAP" : "UNMAP"), name
,
1267 (void *)(uintptr_t)start_pa
, (void *)size
, host_va
, hr
);
1271 static void whpx_process_section(MemoryRegionSection
*section
, int add
)
1273 MemoryRegion
*mr
= section
->mr
;
1274 hwaddr start_pa
= section
->offset_within_address_space
;
1275 ram_addr_t size
= int128_get64(section
->size
);
1279 if (!memory_region_is_ram(mr
)) {
1283 delta
= qemu_real_host_page_size
- (start_pa
& ~qemu_real_host_page_mask
);
1284 delta
&= ~qemu_real_host_page_mask
;
1290 size
&= qemu_real_host_page_mask
;
1291 if (!size
|| (start_pa
& ~qemu_real_host_page_mask
)) {
1295 host_va
= (uintptr_t)memory_region_get_ram_ptr(mr
)
1296 + section
->offset_within_region
+ delta
;
1298 whpx_update_mapping(start_pa
, size
, (void *)(uintptr_t)host_va
, add
,
1299 memory_region_is_rom(mr
), mr
->name
);
1302 static void whpx_region_add(MemoryListener
*listener
,
1303 MemoryRegionSection
*section
)
1305 memory_region_ref(section
->mr
);
1306 whpx_process_section(section
, 1);
1309 static void whpx_region_del(MemoryListener
*listener
,
1310 MemoryRegionSection
*section
)
1312 whpx_process_section(section
, 0);
1313 memory_region_unref(section
->mr
);
1316 static void whpx_transaction_begin(MemoryListener
*listener
)
1320 static void whpx_transaction_commit(MemoryListener
*listener
)
1324 static void whpx_log_sync(MemoryListener
*listener
,
1325 MemoryRegionSection
*section
)
1327 MemoryRegion
*mr
= section
->mr
;
1329 if (!memory_region_is_ram(mr
)) {
1333 memory_region_set_dirty(mr
, 0, int128_get64(section
->size
));
1336 static MemoryListener whpx_memory_listener
= {
1337 .begin
= whpx_transaction_begin
,
1338 .commit
= whpx_transaction_commit
,
1339 .region_add
= whpx_region_add
,
1340 .region_del
= whpx_region_del
,
1341 .log_sync
= whpx_log_sync
,
1345 static void whpx_memory_init(void)
1347 memory_listener_register(&whpx_memory_listener
, &address_space_memory
);
1350 static void whpx_handle_interrupt(CPUState
*cpu
, int mask
)
1352 cpu
->interrupt_request
|= mask
;
1354 if (!qemu_cpu_is_self(cpu
)) {
1363 static int whpx_accel_init(MachineState
*ms
)
1365 struct whpx_state
*whpx
;
1368 WHV_CAPABILITY whpx_cap
;
1369 UINT32 whpx_cap_size
;
1370 WHV_PARTITION_PROPERTY prop
;
1372 whpx
= &whpx_global
;
1374 if (!init_whp_dispatch()) {
1379 memset(whpx
, 0, sizeof(struct whpx_state
));
1380 whpx
->mem_quota
= ms
->ram_size
;
1382 hr
= whp_dispatch
.WHvGetCapability(
1383 WHvCapabilityCodeHypervisorPresent
, &whpx_cap
,
1384 sizeof(whpx_cap
), &whpx_cap_size
);
1385 if (FAILED(hr
) || !whpx_cap
.HypervisorPresent
) {
1386 error_report("WHPX: No accelerator found, hr=%08lx", hr
);
1391 hr
= whp_dispatch
.WHvCreatePartition(&whpx
->partition
);
1393 error_report("WHPX: Failed to create partition, hr=%08lx", hr
);
1398 memset(&prop
, 0, sizeof(WHV_PARTITION_PROPERTY
));
1399 prop
.ProcessorCount
= smp_cpus
;
1400 hr
= whp_dispatch
.WHvSetPartitionProperty(
1402 WHvPartitionPropertyCodeProcessorCount
,
1404 sizeof(WHV_PARTITION_PROPERTY
));
1407 error_report("WHPX: Failed to set partition core count to %d,"
1408 " hr=%08lx", smp_cores
, hr
);
1413 memset(&prop
, 0, sizeof(WHV_PARTITION_PROPERTY
));
1414 prop
.ExtendedVmExits
.X64MsrExit
= 1;
1415 prop
.ExtendedVmExits
.X64CpuidExit
= 1;
1416 hr
= whp_dispatch
.WHvSetPartitionProperty(
1418 WHvPartitionPropertyCodeExtendedVmExits
,
1420 sizeof(WHV_PARTITION_PROPERTY
));
1423 error_report("WHPX: Failed to enable partition extended X64MsrExit and"
1424 " X64CpuidExit hr=%08lx", hr
);
1429 UINT32 cpuidExitList
[] = {1, 0x80000001};
1430 hr
= whp_dispatch
.WHvSetPartitionProperty(
1432 WHvPartitionPropertyCodeCpuidExitList
,
1434 RTL_NUMBER_OF(cpuidExitList
) * sizeof(UINT32
));
1437 error_report("WHPX: Failed to set partition CpuidExitList hr=%08lx",
1443 hr
= whp_dispatch
.WHvSetupPartition(whpx
->partition
);
1445 error_report("WHPX: Failed to setup partition, hr=%08lx", hr
);
1452 cpu_interrupt_handler
= whpx_handle_interrupt
;
1454 printf("Windows Hypervisor Platform accelerator is operational\n");
1459 if (NULL
!= whpx
->partition
) {
1460 whp_dispatch
.WHvDeletePartition(whpx
->partition
);
1461 whpx
->partition
= NULL
;
1468 int whpx_enabled(void)
1470 return whpx_allowed
;
1473 static void whpx_accel_class_init(ObjectClass
*oc
, void *data
)
1475 AccelClass
*ac
= ACCEL_CLASS(oc
);
1477 ac
->init_machine
= whpx_accel_init
;
1478 ac
->allowed
= &whpx_allowed
;
1481 static const TypeInfo whpx_accel_type
= {
1482 .name
= ACCEL_CLASS_NAME("whpx"),
1483 .parent
= TYPE_ACCEL
,
1484 .class_init
= whpx_accel_class_init
,
1487 static void whpx_type_init(void)
1489 type_register_static(&whpx_accel_type
);
1492 bool init_whp_dispatch(void)
1494 const char *lib_name
;
1497 if (whp_dispatch_initialized
) {
1501 #define WHP_LOAD_FIELD(return_type, function_name, signature) \
1502 whp_dispatch.function_name = \
1503 (function_name ## _t)GetProcAddress(hLib, #function_name); \
1504 if (!whp_dispatch.function_name) { \
1505 error_report("Could not load function %s from library %s.", \
1506 #function_name, lib_name); \
1510 lib_name = "WinHvPlatform.dll";
1511 hWinHvPlatform
= LoadLibrary(lib_name
);
1512 if (!hWinHvPlatform
) {
1513 error_report("Could not load library %s.", lib_name
);
1516 hLib
= hWinHvPlatform
;
1517 LIST_WINHVPLATFORM_FUNCTIONS(WHP_LOAD_FIELD
)
1519 lib_name
= "WinHvEmulation.dll";
1520 hWinHvEmulation
= LoadLibrary(lib_name
);
1521 if (!hWinHvEmulation
) {
1522 error_report("Could not load library %s.", lib_name
);
1525 hLib
= hWinHvEmulation
;
1526 LIST_WINHVEMULATION_FUNCTIONS(WHP_LOAD_FIELD
)
1528 whp_dispatch_initialized
= true;
1533 if (hWinHvPlatform
) {
1534 FreeLibrary(hWinHvPlatform
);
1536 if (hWinHvEmulation
) {
1537 FreeLibrary(hWinHvEmulation
);
1542 type_init(whpx_type_init
);