target/arm: Use vector infrastructure for aa64 add/sub/logic
[qemu/ar7.git] / target / i386 / whpx-all.c
blob0015b27509ad22811ead820486802b683b27e78f
1 /*
2 * QEMU Windows Hypervisor Platform accelerator (WHPX)
4 * Copyright Microsoft Corp. 2017
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
9 */
11 #include "qemu/osdep.h"
12 #include "cpu.h"
13 #include "exec/address-spaces.h"
14 #include "exec/exec-all.h"
15 #include "exec/ioport.h"
16 #include "qemu-common.h"
17 #include "strings.h"
18 #include "sysemu/accel.h"
19 #include "sysemu/whpx.h"
20 #include "sysemu/sysemu.h"
21 #include "sysemu/cpus.h"
22 #include "qemu/main-loop.h"
23 #include "hw/boards.h"
24 #include "qemu/error-report.h"
25 #include "qemu/queue.h"
26 #include "qapi/error.h"
27 #include "migration/blocker.h"
29 #include <winhvplatform.h>
30 #include <winhvemulation.h>
32 struct whpx_state {
33 uint64_t mem_quota;
34 WHV_PARTITION_HANDLE partition;
35 uint32_t exit_ctx_size;
38 static const WHV_REGISTER_NAME whpx_register_names[] = {
40 /* X64 General purpose registers */
41 WHvX64RegisterRax,
42 WHvX64RegisterRcx,
43 WHvX64RegisterRdx,
44 WHvX64RegisterRbx,
45 WHvX64RegisterRsp,
46 WHvX64RegisterRbp,
47 WHvX64RegisterRsi,
48 WHvX64RegisterRdi,
49 WHvX64RegisterR8,
50 WHvX64RegisterR9,
51 WHvX64RegisterR10,
52 WHvX64RegisterR11,
53 WHvX64RegisterR12,
54 WHvX64RegisterR13,
55 WHvX64RegisterR14,
56 WHvX64RegisterR15,
57 WHvX64RegisterRip,
58 WHvX64RegisterRflags,
60 /* X64 Segment registers */
61 WHvX64RegisterEs,
62 WHvX64RegisterCs,
63 WHvX64RegisterSs,
64 WHvX64RegisterDs,
65 WHvX64RegisterFs,
66 WHvX64RegisterGs,
67 WHvX64RegisterLdtr,
68 WHvX64RegisterTr,
70 /* X64 Table registers */
71 WHvX64RegisterIdtr,
72 WHvX64RegisterGdtr,
74 /* X64 Control Registers */
75 WHvX64RegisterCr0,
76 WHvX64RegisterCr2,
77 WHvX64RegisterCr3,
78 WHvX64RegisterCr4,
79 WHvX64RegisterCr8,
81 /* X64 Debug Registers */
83 * WHvX64RegisterDr0,
84 * WHvX64RegisterDr1,
85 * WHvX64RegisterDr2,
86 * WHvX64RegisterDr3,
87 * WHvX64RegisterDr6,
88 * WHvX64RegisterDr7,
91 /* X64 Floating Point and Vector Registers */
92 WHvX64RegisterXmm0,
93 WHvX64RegisterXmm1,
94 WHvX64RegisterXmm2,
95 WHvX64RegisterXmm3,
96 WHvX64RegisterXmm4,
97 WHvX64RegisterXmm5,
98 WHvX64RegisterXmm6,
99 WHvX64RegisterXmm7,
100 WHvX64RegisterXmm8,
101 WHvX64RegisterXmm9,
102 WHvX64RegisterXmm10,
103 WHvX64RegisterXmm11,
104 WHvX64RegisterXmm12,
105 WHvX64RegisterXmm13,
106 WHvX64RegisterXmm14,
107 WHvX64RegisterXmm15,
108 WHvX64RegisterFpMmx0,
109 WHvX64RegisterFpMmx1,
110 WHvX64RegisterFpMmx2,
111 WHvX64RegisterFpMmx3,
112 WHvX64RegisterFpMmx4,
113 WHvX64RegisterFpMmx5,
114 WHvX64RegisterFpMmx6,
115 WHvX64RegisterFpMmx7,
116 WHvX64RegisterFpControlStatus,
117 WHvX64RegisterXmmControlStatus,
119 /* X64 MSRs */
120 WHvX64RegisterTsc,
121 WHvX64RegisterEfer,
122 #ifdef TARGET_X86_64
123 WHvX64RegisterKernelGsBase,
124 #endif
125 WHvX64RegisterApicBase,
126 /* WHvX64RegisterPat, */
127 WHvX64RegisterSysenterCs,
128 WHvX64RegisterSysenterEip,
129 WHvX64RegisterSysenterEsp,
130 WHvX64RegisterStar,
131 #ifdef TARGET_X86_64
132 WHvX64RegisterLstar,
133 WHvX64RegisterCstar,
134 WHvX64RegisterSfmask,
135 #endif
137 /* Interrupt / Event Registers */
139 * WHvRegisterPendingInterruption,
140 * WHvRegisterInterruptState,
141 * WHvRegisterPendingEvent0,
142 * WHvRegisterPendingEvent1
143 * WHvX64RegisterDeliverabilityNotifications,
147 struct whpx_register_set {
148 WHV_REGISTER_VALUE values[RTL_NUMBER_OF(whpx_register_names)];
151 struct whpx_vcpu {
152 WHV_EMULATOR_HANDLE emulator;
153 bool window_registered;
154 bool interruptable;
155 uint64_t tpr;
156 uint64_t apic_base;
157 WHV_X64_PENDING_INTERRUPTION_REGISTER interrupt_in_flight;
159 /* Must be the last field as it may have a tail */
160 WHV_RUN_VP_EXIT_CONTEXT exit_ctx;
163 static bool whpx_allowed;
165 struct whpx_state whpx_global;
169 * VP support
172 static struct whpx_vcpu *get_whpx_vcpu(CPUState *cpu)
174 return (struct whpx_vcpu *)cpu->hax_vcpu;
177 static WHV_X64_SEGMENT_REGISTER whpx_seg_q2h(const SegmentCache *qs, int v86,
178 int r86)
180 WHV_X64_SEGMENT_REGISTER hs;
181 unsigned flags = qs->flags;
183 hs.Base = qs->base;
184 hs.Limit = qs->limit;
185 hs.Selector = qs->selector;
187 if (v86) {
188 hs.Attributes = 0;
189 hs.SegmentType = 3;
190 hs.Present = 1;
191 hs.DescriptorPrivilegeLevel = 3;
192 hs.NonSystemSegment = 1;
194 } else {
195 hs.Attributes = (flags >> DESC_TYPE_SHIFT);
197 if (r86) {
198 /* hs.Base &= 0xfffff; */
202 return hs;
205 static SegmentCache whpx_seg_h2q(const WHV_X64_SEGMENT_REGISTER *hs)
207 SegmentCache qs;
209 qs.base = hs->Base;
210 qs.limit = hs->Limit;
211 qs.selector = hs->Selector;
213 qs.flags = ((uint32_t)hs->Attributes) << DESC_TYPE_SHIFT;
215 return qs;
218 static void whpx_set_registers(CPUState *cpu)
220 struct whpx_state *whpx = &whpx_global;
221 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
222 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
223 X86CPU *x86_cpu = X86_CPU(cpu);
224 struct whpx_register_set vcxt = {0};
225 HRESULT hr;
226 int idx = 0;
227 int i;
228 int v86, r86;
230 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
232 v86 = (env->eflags & VM_MASK);
233 r86 = !(env->cr[0] & CR0_PE_MASK);
235 vcpu->tpr = cpu_get_apic_tpr(x86_cpu->apic_state);
236 vcpu->apic_base = cpu_get_apic_base(x86_cpu->apic_state);
238 /* Indexes for first 16 registers match between HV and QEMU definitions */
239 for (idx = 0; idx < CPU_NB_REGS64; idx += 1) {
240 vcxt.values[idx].Reg64 = env->regs[idx];
243 /* Same goes for RIP and RFLAGS */
244 assert(whpx_register_names[idx] == WHvX64RegisterRip);
245 vcxt.values[idx++].Reg64 = env->eip;
247 assert(whpx_register_names[idx] == WHvX64RegisterRflags);
248 vcxt.values[idx++].Reg64 = env->eflags;
250 /* Translate 6+4 segment registers. HV and QEMU order matches */
251 assert(idx == WHvX64RegisterEs);
252 for (i = 0; i < 6; i += 1, idx += 1) {
253 vcxt.values[idx].Segment = whpx_seg_q2h(&env->segs[i], v86, r86);
256 assert(idx == WHvX64RegisterLdtr);
257 vcxt.values[idx++].Segment = whpx_seg_q2h(&env->ldt, 0, 0);
259 assert(idx == WHvX64RegisterTr);
260 vcxt.values[idx++].Segment = whpx_seg_q2h(&env->tr, 0, 0);
262 assert(idx == WHvX64RegisterIdtr);
263 vcxt.values[idx].Table.Base = env->idt.base;
264 vcxt.values[idx].Table.Limit = env->idt.limit;
265 idx += 1;
267 assert(idx == WHvX64RegisterGdtr);
268 vcxt.values[idx].Table.Base = env->gdt.base;
269 vcxt.values[idx].Table.Limit = env->gdt.limit;
270 idx += 1;
272 /* CR0, 2, 3, 4, 8 */
273 assert(whpx_register_names[idx] == WHvX64RegisterCr0);
274 vcxt.values[idx++].Reg64 = env->cr[0];
275 assert(whpx_register_names[idx] == WHvX64RegisterCr2);
276 vcxt.values[idx++].Reg64 = env->cr[2];
277 assert(whpx_register_names[idx] == WHvX64RegisterCr3);
278 vcxt.values[idx++].Reg64 = env->cr[3];
279 assert(whpx_register_names[idx] == WHvX64RegisterCr4);
280 vcxt.values[idx++].Reg64 = env->cr[4];
281 assert(whpx_register_names[idx] == WHvX64RegisterCr8);
282 vcxt.values[idx++].Reg64 = vcpu->tpr;
284 /* 8 Debug Registers - Skipped */
286 /* 16 XMM registers */
287 assert(whpx_register_names[idx] == WHvX64RegisterXmm0);
288 for (i = 0; i < 16; i += 1, idx += 1) {
289 vcxt.values[idx].Reg128.Low64 = env->xmm_regs[i].ZMM_Q(0);
290 vcxt.values[idx].Reg128.High64 = env->xmm_regs[i].ZMM_Q(1);
293 /* 8 FP registers */
294 assert(whpx_register_names[idx] == WHvX64RegisterFpMmx0);
295 for (i = 0; i < 8; i += 1, idx += 1) {
296 vcxt.values[idx].Fp.AsUINT128.Low64 = env->fpregs[i].mmx.MMX_Q(0);
297 /* vcxt.values[idx].Fp.AsUINT128.High64 =
298 env->fpregs[i].mmx.MMX_Q(1);
302 /* FP control status register */
303 assert(whpx_register_names[idx] == WHvX64RegisterFpControlStatus);
304 vcxt.values[idx].FpControlStatus.FpControl = env->fpuc;
305 vcxt.values[idx].FpControlStatus.FpStatus =
306 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
307 vcxt.values[idx].FpControlStatus.FpTag = 0;
308 for (i = 0; i < 8; ++i) {
309 vcxt.values[idx].FpControlStatus.FpTag |= (!env->fptags[i]) << i;
311 vcxt.values[idx].FpControlStatus.Reserved = 0;
312 vcxt.values[idx].FpControlStatus.LastFpOp = env->fpop;
313 vcxt.values[idx].FpControlStatus.LastFpRip = env->fpip;
314 idx += 1;
316 /* XMM control status register */
317 assert(whpx_register_names[idx] == WHvX64RegisterXmmControlStatus);
318 vcxt.values[idx].XmmControlStatus.LastFpRdp = 0;
319 vcxt.values[idx].XmmControlStatus.XmmStatusControl = env->mxcsr;
320 vcxt.values[idx].XmmControlStatus.XmmStatusControlMask = 0x0000ffff;
321 idx += 1;
323 /* MSRs */
324 assert(whpx_register_names[idx] == WHvX64RegisterTsc);
325 vcxt.values[idx++].Reg64 = env->tsc;
326 assert(whpx_register_names[idx] == WHvX64RegisterEfer);
327 vcxt.values[idx++].Reg64 = env->efer;
328 #ifdef TARGET_X86_64
329 assert(whpx_register_names[idx] == WHvX64RegisterKernelGsBase);
330 vcxt.values[idx++].Reg64 = env->kernelgsbase;
331 #endif
333 assert(whpx_register_names[idx] == WHvX64RegisterApicBase);
334 vcxt.values[idx++].Reg64 = vcpu->apic_base;
336 /* WHvX64RegisterPat - Skipped */
338 assert(whpx_register_names[idx] == WHvX64RegisterSysenterCs);
339 vcxt.values[idx++].Reg64 = env->sysenter_cs;
340 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEip);
341 vcxt.values[idx++].Reg64 = env->sysenter_eip;
342 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEsp);
343 vcxt.values[idx++].Reg64 = env->sysenter_esp;
344 assert(whpx_register_names[idx] == WHvX64RegisterStar);
345 vcxt.values[idx++].Reg64 = env->star;
346 #ifdef TARGET_X86_64
347 assert(whpx_register_names[idx] == WHvX64RegisterLstar);
348 vcxt.values[idx++].Reg64 = env->lstar;
349 assert(whpx_register_names[idx] == WHvX64RegisterCstar);
350 vcxt.values[idx++].Reg64 = env->cstar;
351 assert(whpx_register_names[idx] == WHvX64RegisterSfmask);
352 vcxt.values[idx++].Reg64 = env->fmask;
353 #endif
355 /* Interrupt / Event Registers - Skipped */
357 assert(idx == RTL_NUMBER_OF(whpx_register_names));
359 hr = WHvSetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
360 whpx_register_names,
361 RTL_NUMBER_OF(whpx_register_names),
362 &vcxt.values[0]);
364 if (FAILED(hr)) {
365 error_report("WHPX: Failed to set virtual processor context, hr=%08lx",
366 hr);
367 __debugbreak();
370 return;
373 static void whpx_get_registers(CPUState *cpu)
375 struct whpx_state *whpx = &whpx_global;
376 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
377 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
378 X86CPU *x86_cpu = X86_CPU(cpu);
379 struct whpx_register_set vcxt;
380 uint64_t tpr, apic_base;
381 HRESULT hr;
382 int idx = 0;
383 int i;
385 assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
387 hr = WHvGetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
388 whpx_register_names,
389 RTL_NUMBER_OF(whpx_register_names),
390 &vcxt.values[0]);
391 if (FAILED(hr)) {
392 error_report("WHPX: Failed to get virtual processor context, hr=%08lx",
393 hr);
394 __debugbreak();
397 /* Indexes for first 16 registers match between HV and QEMU definitions */
398 for (idx = 0; idx < CPU_NB_REGS64; idx += 1) {
399 env->regs[idx] = vcxt.values[idx].Reg64;
402 /* Same goes for RIP and RFLAGS */
403 assert(whpx_register_names[idx] == WHvX64RegisterRip);
404 env->eip = vcxt.values[idx++].Reg64;
405 assert(whpx_register_names[idx] == WHvX64RegisterRflags);
406 env->eflags = vcxt.values[idx++].Reg64;
408 /* Translate 6+4 segment registers. HV and QEMU order matches */
409 assert(idx == WHvX64RegisterEs);
410 for (i = 0; i < 6; i += 1, idx += 1) {
411 env->segs[i] = whpx_seg_h2q(&vcxt.values[idx].Segment);
414 assert(idx == WHvX64RegisterLdtr);
415 env->ldt = whpx_seg_h2q(&vcxt.values[idx++].Segment);
416 assert(idx == WHvX64RegisterTr);
417 env->tr = whpx_seg_h2q(&vcxt.values[idx++].Segment);
418 assert(idx == WHvX64RegisterIdtr);
419 env->idt.base = vcxt.values[idx].Table.Base;
420 env->idt.limit = vcxt.values[idx].Table.Limit;
421 idx += 1;
422 assert(idx == WHvX64RegisterGdtr);
423 env->gdt.base = vcxt.values[idx].Table.Base;
424 env->gdt.limit = vcxt.values[idx].Table.Limit;
425 idx += 1;
427 /* CR0, 2, 3, 4, 8 */
428 assert(whpx_register_names[idx] == WHvX64RegisterCr0);
429 env->cr[0] = vcxt.values[idx++].Reg64;
430 assert(whpx_register_names[idx] == WHvX64RegisterCr2);
431 env->cr[2] = vcxt.values[idx++].Reg64;
432 assert(whpx_register_names[idx] == WHvX64RegisterCr3);
433 env->cr[3] = vcxt.values[idx++].Reg64;
434 assert(whpx_register_names[idx] == WHvX64RegisterCr4);
435 env->cr[4] = vcxt.values[idx++].Reg64;
436 assert(whpx_register_names[idx] == WHvX64RegisterCr8);
437 tpr = vcxt.values[idx++].Reg64;
438 if (tpr != vcpu->tpr) {
439 vcpu->tpr = tpr;
440 cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
443 /* 8 Debug Registers - Skipped */
445 /* 16 XMM registers */
446 assert(whpx_register_names[idx] == WHvX64RegisterXmm0);
447 for (i = 0; i < 16; i += 1, idx += 1) {
448 env->xmm_regs[i].ZMM_Q(0) = vcxt.values[idx].Reg128.Low64;
449 env->xmm_regs[i].ZMM_Q(1) = vcxt.values[idx].Reg128.High64;
452 /* 8 FP registers */
453 assert(whpx_register_names[idx] == WHvX64RegisterFpMmx0);
454 for (i = 0; i < 8; i += 1, idx += 1) {
455 env->fpregs[i].mmx.MMX_Q(0) = vcxt.values[idx].Fp.AsUINT128.Low64;
456 /* env->fpregs[i].mmx.MMX_Q(1) =
457 vcxt.values[idx].Fp.AsUINT128.High64;
461 /* FP control status register */
462 assert(whpx_register_names[idx] == WHvX64RegisterFpControlStatus);
463 env->fpuc = vcxt.values[idx].FpControlStatus.FpControl;
464 env->fpstt = (vcxt.values[idx].FpControlStatus.FpStatus >> 11) & 0x7;
465 env->fpus = vcxt.values[idx].FpControlStatus.FpStatus & ~0x3800;
466 for (i = 0; i < 8; ++i) {
467 env->fptags[i] = !((vcxt.values[idx].FpControlStatus.FpTag >> i) & 1);
469 env->fpop = vcxt.values[idx].FpControlStatus.LastFpOp;
470 env->fpip = vcxt.values[idx].FpControlStatus.LastFpRip;
471 idx += 1;
473 /* XMM control status register */
474 assert(whpx_register_names[idx] == WHvX64RegisterXmmControlStatus);
475 env->mxcsr = vcxt.values[idx].XmmControlStatus.XmmStatusControl;
476 idx += 1;
478 /* MSRs */
479 assert(whpx_register_names[idx] == WHvX64RegisterTsc);
480 env->tsc = vcxt.values[idx++].Reg64;
481 assert(whpx_register_names[idx] == WHvX64RegisterEfer);
482 env->efer = vcxt.values[idx++].Reg64;
483 #ifdef TARGET_X86_64
484 assert(whpx_register_names[idx] == WHvX64RegisterKernelGsBase);
485 env->kernelgsbase = vcxt.values[idx++].Reg64;
486 #endif
488 assert(whpx_register_names[idx] == WHvX64RegisterApicBase);
489 apic_base = vcxt.values[idx++].Reg64;
490 if (apic_base != vcpu->apic_base) {
491 vcpu->apic_base = apic_base;
492 cpu_set_apic_base(x86_cpu->apic_state, vcpu->apic_base);
495 /* WHvX64RegisterPat - Skipped */
497 assert(whpx_register_names[idx] == WHvX64RegisterSysenterCs);
498 env->sysenter_cs = vcxt.values[idx++].Reg64;;
499 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEip);
500 env->sysenter_eip = vcxt.values[idx++].Reg64;
501 assert(whpx_register_names[idx] == WHvX64RegisterSysenterEsp);
502 env->sysenter_esp = vcxt.values[idx++].Reg64;
503 assert(whpx_register_names[idx] == WHvX64RegisterStar);
504 env->star = vcxt.values[idx++].Reg64;
505 #ifdef TARGET_X86_64
506 assert(whpx_register_names[idx] == WHvX64RegisterLstar);
507 env->lstar = vcxt.values[idx++].Reg64;
508 assert(whpx_register_names[idx] == WHvX64RegisterCstar);
509 env->cstar = vcxt.values[idx++].Reg64;
510 assert(whpx_register_names[idx] == WHvX64RegisterSfmask);
511 env->fmask = vcxt.values[idx++].Reg64;
512 #endif
514 /* Interrupt / Event Registers - Skipped */
516 assert(idx == RTL_NUMBER_OF(whpx_register_names));
518 return;
521 static HRESULT CALLBACK whpx_emu_ioport_callback(
522 void *ctx,
523 WHV_EMULATOR_IO_ACCESS_INFO *IoAccess)
525 MemTxAttrs attrs = { 0 };
526 address_space_rw(&address_space_io, IoAccess->Port, attrs,
527 (uint8_t *)&IoAccess->Data, IoAccess->AccessSize,
528 IoAccess->Direction);
529 return S_OK;
532 static HRESULT CALLBACK whpx_emu_memio_callback(
533 void *ctx,
534 WHV_EMULATOR_MEMORY_ACCESS_INFO *ma)
536 cpu_physical_memory_rw(ma->GpaAddress, ma->Data, ma->AccessSize,
537 ma->Direction);
538 return S_OK;
541 static HRESULT CALLBACK whpx_emu_getreg_callback(
542 void *ctx,
543 const WHV_REGISTER_NAME *RegisterNames,
544 UINT32 RegisterCount,
545 WHV_REGISTER_VALUE *RegisterValues)
547 HRESULT hr;
548 struct whpx_state *whpx = &whpx_global;
549 CPUState *cpu = (CPUState *)ctx;
551 hr = WHvGetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
552 RegisterNames, RegisterCount,
553 RegisterValues);
554 if (FAILED(hr)) {
555 error_report("WHPX: Failed to get virtual processor registers,"
556 " hr=%08lx", hr);
557 __debugbreak();
560 return hr;
563 static HRESULT CALLBACK whpx_emu_setreg_callback(
564 void *ctx,
565 const WHV_REGISTER_NAME *RegisterNames,
566 UINT32 RegisterCount,
567 const WHV_REGISTER_VALUE *RegisterValues)
569 HRESULT hr;
570 struct whpx_state *whpx = &whpx_global;
571 CPUState *cpu = (CPUState *)ctx;
573 hr = WHvSetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
574 RegisterNames, RegisterCount,
575 RegisterValues);
576 if (FAILED(hr)) {
577 error_report("WHPX: Failed to set virtual processor registers,"
578 " hr=%08lx", hr);
579 __debugbreak();
583 * The emulator just successfully wrote the register state. We clear the
584 * dirty state so we avoid the double write on resume of the VP.
586 cpu->vcpu_dirty = false;
588 return hr;
591 static HRESULT CALLBACK whpx_emu_translate_callback(
592 void *ctx,
593 WHV_GUEST_VIRTUAL_ADDRESS Gva,
594 WHV_TRANSLATE_GVA_FLAGS TranslateFlags,
595 WHV_TRANSLATE_GVA_RESULT_CODE *TranslationResult,
596 WHV_GUEST_PHYSICAL_ADDRESS *Gpa)
598 HRESULT hr;
599 struct whpx_state *whpx = &whpx_global;
600 CPUState *cpu = (CPUState *)ctx;
601 WHV_TRANSLATE_GVA_RESULT res;
603 hr = WHvTranslateGva(whpx->partition, cpu->cpu_index,
604 Gva, TranslateFlags, &res, Gpa);
605 if (FAILED(hr)) {
606 error_report("WHPX: Failed to translate GVA, hr=%08lx", hr);
607 __debugbreak();
608 } else {
609 *TranslationResult = res.ResultCode;
612 return hr;
615 static const WHV_EMULATOR_CALLBACKS whpx_emu_callbacks = {
616 .WHvEmulatorIoPortCallback = whpx_emu_ioport_callback,
617 .WHvEmulatorMemoryCallback = whpx_emu_memio_callback,
618 .WHvEmulatorGetVirtualProcessorRegisters = whpx_emu_getreg_callback,
619 .WHvEmulatorSetVirtualProcessorRegisters = whpx_emu_setreg_callback,
620 .WHvEmulatorTranslateGvaPage = whpx_emu_translate_callback,
623 static int whpx_handle_mmio(CPUState *cpu, WHV_MEMORY_ACCESS_CONTEXT *ctx)
625 HRESULT hr;
626 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
627 WHV_EMULATOR_STATUS emu_status;
629 hr = WHvEmulatorTryMmioEmulation(vcpu->emulator, cpu, ctx, &emu_status);
630 if (FAILED(hr)) {
631 __debugbreak();
632 error_report("WHPX: Failed to parse MMIO access, hr=%08lx", hr);
633 return -1;
636 if (!emu_status.EmulationSuccessful) {
637 __debugbreak();
638 error_report("WHPX: Failed to emulate MMIO access");
639 return -1;
642 return 0;
645 static int whpx_handle_portio(CPUState *cpu,
646 WHV_X64_IO_PORT_ACCESS_CONTEXT *ctx)
648 HRESULT hr;
649 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
650 WHV_EMULATOR_STATUS emu_status;
652 hr = WHvEmulatorTryIoEmulation(vcpu->emulator, cpu, ctx, &emu_status);
653 if (FAILED(hr)) {
654 __debugbreak();
655 error_report("WHPX: Failed to parse PortIO access, hr=%08lx", hr);
656 return -1;
659 if (!emu_status.EmulationSuccessful) {
660 __debugbreak();
661 error_report("WHPX: Failed to emulate PortMMIO access");
662 return -1;
665 return 0;
668 static int whpx_handle_halt(CPUState *cpu)
670 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
671 int ret = 0;
673 qemu_mutex_lock_iothread();
674 if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
675 (env->eflags & IF_MASK)) &&
676 !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
677 cpu->exception_index = EXCP_HLT;
678 cpu->halted = true;
679 ret = 1;
681 qemu_mutex_unlock_iothread();
683 return ret;
686 static void whpx_vcpu_pre_run(CPUState *cpu)
688 HRESULT hr;
689 struct whpx_state *whpx = &whpx_global;
690 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
691 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
692 X86CPU *x86_cpu = X86_CPU(cpu);
693 int irq;
694 WHV_X64_PENDING_INTERRUPTION_REGISTER new_int = {0};
695 UINT32 reg_count = 0;
696 WHV_REGISTER_VALUE reg_values[3] = {0};
697 WHV_REGISTER_NAME reg_names[3];
699 qemu_mutex_lock_iothread();
701 /* Inject NMI */
702 if (!vcpu->interrupt_in_flight.InterruptionPending &&
703 cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
704 if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
705 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
706 vcpu->interruptable = false;
707 new_int.InterruptionType = WHvX64PendingNmi;
708 new_int.InterruptionPending = 1;
709 new_int.InterruptionVector = 2;
711 if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
712 qemu_mutex_lock_iothread();
713 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
714 __debugbreak();
715 qemu_mutex_unlock_iothread();
720 * Force the VCPU out of its inner loop to process any INIT requests or
721 * commit pending TPR access.
723 if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
724 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
725 !(env->hflags & HF_SMM_MASK)) {
726 cpu->exit_request = 1;
728 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
729 cpu->exit_request = 1;
733 /* Get pending hard interruption or replay one that was overwritten */
734 if (!vcpu->interrupt_in_flight.InterruptionPending &&
735 vcpu->interruptable && (env->eflags & IF_MASK)) {
736 assert(!new_int.InterruptionPending);
737 if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
738 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
739 irq = cpu_get_pic_interrupt(env);
740 if (irq >= 0) {
741 new_int.InterruptionType = WHvX64PendingInterrupt;
742 new_int.InterruptionPending = 1;
743 new_int.InterruptionVector = irq;
748 /* Setup interrupt state if new one was prepared */
749 if (new_int.InterruptionPending) {
750 reg_values[reg_count].PendingInterruption = new_int;
751 reg_names[reg_count] = WHvRegisterPendingInterruption;
752 reg_count += 1;
755 /* Sync the TPR to the CR8 if was modified during the intercept */
756 reg_values[reg_count].Reg64 = cpu_get_apic_tpr(x86_cpu->apic_state);
757 if (reg_values[reg_count].Reg64 != vcpu->tpr) {
758 vcpu->tpr = reg_values[reg_count].Reg64;
759 cpu->exit_request = 1;
760 reg_names[reg_count] = WHvX64RegisterCr8;
761 reg_count += 1;
764 /* Update the state of the interrupt delivery notification */
765 if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
766 reg_values[reg_count].DeliverabilityNotifications.InterruptNotification
767 = 1;
768 if (vcpu->window_registered != 1) {
769 vcpu->window_registered = 1;
771 reg_names[reg_count] = WHvX64RegisterDeliverabilityNotifications;
772 reg_count += 1;
775 qemu_mutex_unlock_iothread();
777 if (reg_count) {
778 hr = WHvSetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
779 reg_names, reg_count, reg_values);
780 if (FAILED(hr)) {
781 error_report("WHPX: Failed to set interrupt state registers,"
782 " hr=%08lx", hr);
783 __debugbreak();
787 return;
790 static void whpx_vcpu_post_run(CPUState *cpu)
792 HRESULT hr;
793 struct whpx_state *whpx = &whpx_global;
794 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
795 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
796 X86CPU *x86_cpu = X86_CPU(cpu);
797 WHV_REGISTER_VALUE reg_values[4];
798 const WHV_REGISTER_NAME reg_names[4] = {
799 WHvX64RegisterRflags,
800 WHvX64RegisterCr8,
801 WHvRegisterPendingInterruption,
802 WHvRegisterInterruptState,
805 hr = WHvGetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index,
806 reg_names, 4, reg_values);
807 if (FAILED(hr)) {
808 error_report("WHPX: Failed to get interrupt state regusters,"
809 " hr=%08lx", hr);
810 __debugbreak();
811 vcpu->interruptable = false;
812 return;
815 assert(reg_names[0] == WHvX64RegisterRflags);
816 env->eflags = reg_values[0].Reg64;
818 assert(reg_names[1] == WHvX64RegisterCr8);
819 if (vcpu->tpr != reg_values[1].Reg64) {
820 vcpu->tpr = reg_values[1].Reg64;
821 qemu_mutex_lock_iothread();
822 cpu_set_apic_tpr(x86_cpu->apic_state, vcpu->tpr);
823 qemu_mutex_unlock_iothread();
826 assert(reg_names[2] == WHvRegisterPendingInterruption);
827 vcpu->interrupt_in_flight = reg_values[2].PendingInterruption;
829 assert(reg_names[3] == WHvRegisterInterruptState);
830 vcpu->interruptable = !reg_values[3].InterruptState.InterruptShadow;
832 return;
835 static void whpx_vcpu_process_async_events(CPUState *cpu)
837 struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
838 X86CPU *x86_cpu = X86_CPU(cpu);
839 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
841 if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
842 !(env->hflags & HF_SMM_MASK)) {
844 do_cpu_init(x86_cpu);
845 cpu->vcpu_dirty = true;
846 vcpu->interruptable = true;
849 if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
850 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
851 apic_poll_irq(x86_cpu->apic_state);
854 if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
855 (env->eflags & IF_MASK)) ||
856 (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
857 cpu->halted = false;
860 if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
861 if (!cpu->vcpu_dirty) {
862 whpx_get_registers(cpu);
864 do_cpu_sipi(x86_cpu);
867 if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
868 cpu->interrupt_request &= ~CPU_INTERRUPT_TPR;
869 if (!cpu->vcpu_dirty) {
870 whpx_get_registers(cpu);
872 apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip,
873 env->tpr_access_type);
876 return;
879 static int whpx_vcpu_run(CPUState *cpu)
881 HRESULT hr;
882 struct whpx_state *whpx = &whpx_global;
883 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
884 int ret;
886 whpx_vcpu_process_async_events(cpu);
887 if (cpu->halted) {
888 cpu->exception_index = EXCP_HLT;
889 atomic_set(&cpu->exit_request, false);
890 return 0;
893 qemu_mutex_unlock_iothread();
894 cpu_exec_start(cpu);
896 do {
897 if (cpu->vcpu_dirty) {
898 whpx_set_registers(cpu);
899 cpu->vcpu_dirty = false;
902 whpx_vcpu_pre_run(cpu);
904 if (atomic_read(&cpu->exit_request)) {
905 whpx_vcpu_kick(cpu);
908 for (;;) {
909 hr = WHvRunVirtualProcessor(whpx->partition, cpu->cpu_index,
910 &vcpu->exit_ctx, whpx->exit_ctx_size);
912 if (SUCCEEDED(hr) && (vcpu->exit_ctx.ExitReason ==
913 WHvRunVpExitReasonAlerted)) {
914 WHvCancelRunVirtualProcessor(whpx->partition, cpu->cpu_index,
916 } else {
917 break;
921 if (FAILED(hr)) {
922 error_report("WHPX: Failed to exec a virtual processor,"
923 " hr=%08lx", hr);
924 ret = -1;
925 break;
928 whpx_vcpu_post_run(cpu);
930 switch (vcpu->exit_ctx.ExitReason) {
931 case WHvRunVpExitReasonMemoryAccess:
932 ret = whpx_handle_mmio(cpu, &vcpu->exit_ctx.MemoryAccess);
933 break;
935 case WHvRunVpExitReasonX64IoPortAccess:
936 ret = whpx_handle_portio(cpu, &vcpu->exit_ctx.IoPortAccess);
937 break;
939 case WHvRunVpExitReasonX64InterruptWindow:
940 vcpu->window_registered = 0;
941 break;
943 case WHvRunVpExitReasonX64Halt:
944 ret = whpx_handle_halt(cpu);
945 break;
947 case WHvRunVpExitReasonCanceled:
948 cpu->exception_index = EXCP_INTERRUPT;
949 ret = 1;
950 break;
952 case WHvRunVpExitReasonNone:
953 case WHvRunVpExitReasonUnrecoverableException:
954 case WHvRunVpExitReasonInvalidVpRegisterValue:
955 case WHvRunVpExitReasonUnsupportedFeature:
956 case WHvRunVpExitReasonX64MsrAccess:
957 case WHvRunVpExitReasonX64Cpuid:
958 case WHvRunVpExitReasonException:
959 case WHvRunVpExitReasonAlerted:
960 default:
961 error_report("WHPX: Unexpected VP exit code %d",
962 vcpu->exit_ctx.ExitReason);
963 whpx_get_registers(cpu);
964 qemu_mutex_lock_iothread();
965 qemu_system_guest_panicked(cpu_get_crash_info(cpu));
966 qemu_mutex_unlock_iothread();
967 break;
970 } while (!ret);
972 cpu_exec_end(cpu);
973 qemu_mutex_lock_iothread();
974 current_cpu = cpu;
976 atomic_set(&cpu->exit_request, false);
978 return ret < 0;
981 static void do_whpx_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
983 whpx_get_registers(cpu);
984 cpu->vcpu_dirty = true;
987 static void do_whpx_cpu_synchronize_post_reset(CPUState *cpu,
988 run_on_cpu_data arg)
990 whpx_set_registers(cpu);
991 cpu->vcpu_dirty = false;
994 static void do_whpx_cpu_synchronize_post_init(CPUState *cpu,
995 run_on_cpu_data arg)
997 whpx_set_registers(cpu);
998 cpu->vcpu_dirty = false;
1001 static void do_whpx_cpu_synchronize_pre_loadvm(CPUState *cpu,
1002 run_on_cpu_data arg)
1004 cpu->vcpu_dirty = true;
1008 * CPU support.
1011 void whpx_cpu_synchronize_state(CPUState *cpu)
1013 if (!cpu->vcpu_dirty) {
1014 run_on_cpu(cpu, do_whpx_cpu_synchronize_state, RUN_ON_CPU_NULL);
1018 void whpx_cpu_synchronize_post_reset(CPUState *cpu)
1020 run_on_cpu(cpu, do_whpx_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
1023 void whpx_cpu_synchronize_post_init(CPUState *cpu)
1025 run_on_cpu(cpu, do_whpx_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
1028 void whpx_cpu_synchronize_pre_loadvm(CPUState *cpu)
1030 run_on_cpu(cpu, do_whpx_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
1034 * Vcpu support.
1037 static Error *whpx_migration_blocker;
1039 int whpx_init_vcpu(CPUState *cpu)
1041 HRESULT hr;
1042 struct whpx_state *whpx = &whpx_global;
1043 struct whpx_vcpu *vcpu;
1044 Error *local_error = NULL;
1046 /* Add migration blockers for all unsupported features of the
1047 * Windows Hypervisor Platform
1049 if (whpx_migration_blocker == NULL) {
1050 error_setg(&whpx_migration_blocker,
1051 "State blocked due to non-migratable CPUID feature support,"
1052 "dirty memory tracking support, and XSAVE/XRSTOR support");
1054 (void)migrate_add_blocker(whpx_migration_blocker, &local_error);
1055 if (local_error) {
1056 error_report_err(local_error);
1057 error_free(whpx_migration_blocker);
1058 migrate_del_blocker(whpx_migration_blocker);
1059 return -EINVAL;
1063 vcpu = g_malloc0(FIELD_OFFSET(struct whpx_vcpu, exit_ctx) +
1064 whpx->exit_ctx_size);
1066 if (!vcpu) {
1067 error_report("WHPX: Failed to allocte VCPU context.");
1068 return -ENOMEM;
1071 hr = WHvEmulatorCreateEmulator(whpx_emu_callbacks, &vcpu->emulator);
1072 if (FAILED(hr)) {
1073 error_report("WHPX: Failed to setup instruction completion support,"
1074 " hr=%08lx", hr);
1075 g_free(vcpu);
1076 return -EINVAL;
1079 hr = WHvCreateVirtualProcessor(whpx->partition, cpu->cpu_index, 0);
1080 if (FAILED(hr)) {
1081 error_report("WHPX: Failed to create a virtual processor,"
1082 " hr=%08lx", hr);
1083 WHvEmulatorDestroyEmulator(vcpu->emulator);
1084 g_free(vcpu);
1085 return -EINVAL;
1088 vcpu->interruptable = true;
1090 cpu->vcpu_dirty = true;
1091 cpu->hax_vcpu = (struct hax_vcpu_state *)vcpu;
1093 return 0;
1096 int whpx_vcpu_exec(CPUState *cpu)
1098 int ret;
1099 int fatal;
1101 for (;;) {
1102 if (cpu->exception_index >= EXCP_INTERRUPT) {
1103 ret = cpu->exception_index;
1104 cpu->exception_index = -1;
1105 break;
1108 fatal = whpx_vcpu_run(cpu);
1110 if (fatal) {
1111 error_report("WHPX: Failed to exec a virtual processor");
1112 abort();
1116 return ret;
1119 void whpx_destroy_vcpu(CPUState *cpu)
1121 struct whpx_state *whpx = &whpx_global;
1122 struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu);
1124 WHvDeleteVirtualProcessor(whpx->partition, cpu->cpu_index);
1125 WHvEmulatorDestroyEmulator(vcpu->emulator);
1126 g_free(cpu->hax_vcpu);
1127 return;
1130 void whpx_vcpu_kick(CPUState *cpu)
1132 struct whpx_state *whpx = &whpx_global;
1133 WHvCancelRunVirtualProcessor(whpx->partition, cpu->cpu_index, 0);
1137 * Memory support.
1140 static void whpx_update_mapping(hwaddr start_pa, ram_addr_t size,
1141 void *host_va, int add, int rom,
1142 const char *name)
1144 struct whpx_state *whpx = &whpx_global;
1145 HRESULT hr;
1148 if (add) {
1149 printf("WHPX: ADD PA:%p Size:%p, Host:%p, %s, '%s'\n",
1150 (void*)start_pa, (void*)size, host_va,
1151 (rom ? "ROM" : "RAM"), name);
1152 } else {
1153 printf("WHPX: DEL PA:%p Size:%p, Host:%p, '%s'\n",
1154 (void*)start_pa, (void*)size, host_va, name);
1158 if (add) {
1159 hr = WHvMapGpaRange(whpx->partition,
1160 host_va,
1161 start_pa,
1162 size,
1163 (WHvMapGpaRangeFlagRead |
1164 WHvMapGpaRangeFlagExecute |
1165 (rom ? 0 : WHvMapGpaRangeFlagWrite)));
1166 } else {
1167 hr = WHvUnmapGpaRange(whpx->partition,
1168 start_pa,
1169 size);
1172 if (FAILED(hr)) {
1173 error_report("WHPX: Failed to %s GPA range '%s' PA:%p, Size:%p bytes,"
1174 " Host:%p, hr=%08lx",
1175 (add ? "MAP" : "UNMAP"), name,
1176 (void *)start_pa, (void *)size, host_va, hr);
1180 static void whpx_process_section(MemoryRegionSection *section, int add)
1182 MemoryRegion *mr = section->mr;
1183 hwaddr start_pa = section->offset_within_address_space;
1184 ram_addr_t size = int128_get64(section->size);
1185 unsigned int delta;
1186 uint64_t host_va;
1188 if (!memory_region_is_ram(mr)) {
1189 return;
1192 delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask);
1193 delta &= ~qemu_real_host_page_mask;
1194 if (delta > size) {
1195 return;
1197 start_pa += delta;
1198 size -= delta;
1199 size &= qemu_real_host_page_mask;
1200 if (!size || (start_pa & ~qemu_real_host_page_mask)) {
1201 return;
1204 host_va = (uintptr_t)memory_region_get_ram_ptr(mr)
1205 + section->offset_within_region + delta;
1207 whpx_update_mapping(start_pa, size, (void *)host_va, add,
1208 memory_region_is_rom(mr), mr->name);
1211 static void whpx_region_add(MemoryListener *listener,
1212 MemoryRegionSection *section)
1214 memory_region_ref(section->mr);
1215 whpx_process_section(section, 1);
1218 static void whpx_region_del(MemoryListener *listener,
1219 MemoryRegionSection *section)
1221 whpx_process_section(section, 0);
1222 memory_region_unref(section->mr);
1225 static void whpx_transaction_begin(MemoryListener *listener)
1229 static void whpx_transaction_commit(MemoryListener *listener)
1233 static void whpx_log_sync(MemoryListener *listener,
1234 MemoryRegionSection *section)
1236 MemoryRegion *mr = section->mr;
1238 if (!memory_region_is_ram(mr)) {
1239 return;
1242 memory_region_set_dirty(mr, 0, int128_get64(section->size));
1245 static MemoryListener whpx_memory_listener = {
1246 .begin = whpx_transaction_begin,
1247 .commit = whpx_transaction_commit,
1248 .region_add = whpx_region_add,
1249 .region_del = whpx_region_del,
1250 .log_sync = whpx_log_sync,
1251 .priority = 10,
1254 static void whpx_memory_init(void)
1256 memory_listener_register(&whpx_memory_listener, &address_space_memory);
1259 static void whpx_handle_interrupt(CPUState *cpu, int mask)
1261 cpu->interrupt_request |= mask;
1263 if (!qemu_cpu_is_self(cpu)) {
1264 qemu_cpu_kick(cpu);
1269 * Partition support
1272 static int whpx_accel_init(MachineState *ms)
1274 struct whpx_state *whpx;
1275 int ret;
1276 HRESULT hr;
1277 WHV_CAPABILITY whpx_cap;
1278 WHV_PARTITION_PROPERTY prop;
1280 whpx = &whpx_global;
1282 memset(whpx, 0, sizeof(struct whpx_state));
1283 whpx->mem_quota = ms->ram_size;
1285 hr = WHvGetCapability(WHvCapabilityCodeHypervisorPresent, &whpx_cap,
1286 sizeof(whpx_cap));
1287 if (FAILED(hr) || !whpx_cap.HypervisorPresent) {
1288 error_report("WHPX: No accelerator found, hr=%08lx", hr);
1289 ret = -ENOSPC;
1290 goto error;
1293 hr = WHvCreatePartition(&whpx->partition);
1294 if (FAILED(hr)) {
1295 error_report("WHPX: Failed to create partition, hr=%08lx", hr);
1296 ret = -EINVAL;
1297 goto error;
1300 memset(&prop, 0, sizeof(WHV_PARTITION_PROPERTY));
1301 prop.PropertyCode = WHvPartitionPropertyCodeProcessorCount;
1302 prop.ProcessorCount = smp_cpus;
1303 hr = WHvSetPartitionProperty(whpx->partition,
1304 &prop,
1305 sizeof(WHV_PARTITION_PROPERTY));
1307 if (FAILED(hr)) {
1308 error_report("WHPX: Failed to set partition core count to %d,"
1309 " hr=%08lx", smp_cores, hr);
1310 ret = -EINVAL;
1311 goto error;
1314 hr = WHvSetupPartition(whpx->partition);
1315 if (FAILED(hr)) {
1316 error_report("WHPX: Failed to setup partition, hr=%08lx", hr);
1317 ret = -EINVAL;
1318 goto error;
1321 whpx->exit_ctx_size = WHvGetRunExitContextSize();
1322 assert(whpx->exit_ctx_size);
1324 whpx_memory_init();
1326 cpu_interrupt_handler = whpx_handle_interrupt;
1328 printf("Windows Hypervisor Platform accelerator is operational\n");
1329 return 0;
1331 error:
1333 if (NULL != whpx->partition) {
1334 WHvDeletePartition(whpx->partition);
1335 whpx->partition = NULL;
1339 return ret;
1342 int whpx_enabled(void)
1344 return whpx_allowed;
1347 static void whpx_accel_class_init(ObjectClass *oc, void *data)
1349 AccelClass *ac = ACCEL_CLASS(oc);
1350 ac->name = "WHPX";
1351 ac->init_machine = whpx_accel_init;
1352 ac->allowed = &whpx_allowed;
1355 static const TypeInfo whpx_accel_type = {
1356 .name = ACCEL_CLASS_NAME("whpx"),
1357 .parent = TYPE_ACCEL,
1358 .class_init = whpx_accel_class_init,
1361 static void whpx_type_init(void)
1363 type_register_static(&whpx_accel_type);
1366 type_init(whpx_type_init);