spapr: nested: Introduce H_GUEST_CREATE_VCPU hcall.
[qemu/ar7.git] / hw / ppc / spapr_nested.c
blob72bf3dc6d49f6bbef411722779518cdd9904dfd1
1 #include "qemu/osdep.h"
2 #include "qemu/cutils.h"
3 #include "exec/exec-all.h"
4 #include "helper_regs.h"
5 #include "hw/ppc/ppc.h"
6 #include "hw/ppc/spapr.h"
7 #include "hw/ppc/spapr_cpu_core.h"
8 #include "hw/ppc/spapr_nested.h"
9 #include "mmu-book3s-v3.h"
10 #include "cpu-models.h"
11 #include "qemu/log.h"
13 void spapr_nested_reset(SpaprMachineState *spapr)
15 if (spapr_get_cap(spapr, SPAPR_CAP_NESTED_KVM_HV)) {
16 spapr->nested.api = NESTED_API_KVM_HV;
17 spapr_unregister_nested_hv();
18 spapr_register_nested_hv();
19 } else {
20 spapr->nested.api = 0;
21 spapr->nested.capabilities_set = false;
25 uint8_t spapr_nested_api(SpaprMachineState *spapr)
27 return spapr->nested.api;
30 #ifdef CONFIG_TCG
32 bool spapr_get_pate_nested_hv(SpaprMachineState *spapr, PowerPCCPU *cpu,
33 target_ulong lpid, ppc_v3_pate_t *entry)
35 uint64_t patb, pats;
37 assert(lpid != 0);
39 patb = spapr->nested.ptcr & PTCR_PATB;
40 pats = spapr->nested.ptcr & PTCR_PATS;
42 /* Check if partition table is properly aligned */
43 if (patb & MAKE_64BIT_MASK(0, pats + 12)) {
44 return false;
47 /* Calculate number of entries */
48 pats = 1ull << (pats + 12 - 4);
49 if (pats <= lpid) {
50 return false;
53 /* Grab entry */
54 patb += 16 * lpid;
55 entry->dw0 = ldq_phys(CPU(cpu)->as, patb);
56 entry->dw1 = ldq_phys(CPU(cpu)->as, patb + 8);
57 return true;
60 #define PRTS_MASK 0x1f
62 static target_ulong h_set_ptbl(PowerPCCPU *cpu,
63 SpaprMachineState *spapr,
64 target_ulong opcode,
65 target_ulong *args)
67 target_ulong ptcr = args[0];
69 if (!spapr_get_cap(spapr, SPAPR_CAP_NESTED_KVM_HV)) {
70 return H_FUNCTION;
73 if ((ptcr & PRTS_MASK) + 12 - 4 > 12) {
74 return H_PARAMETER;
77 spapr->nested.ptcr = ptcr; /* Save new partition table */
79 return H_SUCCESS;
82 static target_ulong h_tlb_invalidate(PowerPCCPU *cpu,
83 SpaprMachineState *spapr,
84 target_ulong opcode,
85 target_ulong *args)
88 * The spapr virtual hypervisor nested HV implementation retains no L2
89 * translation state except for TLB. And the TLB is always invalidated
90 * across L1<->L2 transitions, so nothing is required here.
93 return H_SUCCESS;
96 static target_ulong h_copy_tofrom_guest(PowerPCCPU *cpu,
97 SpaprMachineState *spapr,
98 target_ulong opcode,
99 target_ulong *args)
102 * This HCALL is not required, L1 KVM will take a slow path and walk the
103 * page tables manually to do the data copy.
105 return H_FUNCTION;
108 static void nested_save_state(struct nested_ppc_state *save, PowerPCCPU *cpu)
110 CPUPPCState *env = &cpu->env;
112 memcpy(save->gpr, env->gpr, sizeof(save->gpr));
114 save->lr = env->lr;
115 save->ctr = env->ctr;
116 save->cfar = env->cfar;
117 save->msr = env->msr;
118 save->nip = env->nip;
120 save->cr = ppc_get_cr(env);
121 save->xer = cpu_read_xer(env);
123 save->lpcr = env->spr[SPR_LPCR];
124 save->lpidr = env->spr[SPR_LPIDR];
125 save->pcr = env->spr[SPR_PCR];
126 save->dpdes = env->spr[SPR_DPDES];
127 save->hfscr = env->spr[SPR_HFSCR];
128 save->srr0 = env->spr[SPR_SRR0];
129 save->srr1 = env->spr[SPR_SRR1];
130 save->sprg0 = env->spr[SPR_SPRG0];
131 save->sprg1 = env->spr[SPR_SPRG1];
132 save->sprg2 = env->spr[SPR_SPRG2];
133 save->sprg3 = env->spr[SPR_SPRG3];
134 save->pidr = env->spr[SPR_BOOKS_PID];
135 save->ppr = env->spr[SPR_PPR];
137 save->tb_offset = env->tb_env->tb_offset;
140 static void nested_load_state(PowerPCCPU *cpu, struct nested_ppc_state *load)
142 CPUState *cs = CPU(cpu);
143 CPUPPCState *env = &cpu->env;
145 memcpy(env->gpr, load->gpr, sizeof(env->gpr));
147 env->lr = load->lr;
148 env->ctr = load->ctr;
149 env->cfar = load->cfar;
150 env->msr = load->msr;
151 env->nip = load->nip;
153 ppc_set_cr(env, load->cr);
154 cpu_write_xer(env, load->xer);
156 env->spr[SPR_LPCR] = load->lpcr;
157 env->spr[SPR_LPIDR] = load->lpidr;
158 env->spr[SPR_PCR] = load->pcr;
159 env->spr[SPR_DPDES] = load->dpdes;
160 env->spr[SPR_HFSCR] = load->hfscr;
161 env->spr[SPR_SRR0] = load->srr0;
162 env->spr[SPR_SRR1] = load->srr1;
163 env->spr[SPR_SPRG0] = load->sprg0;
164 env->spr[SPR_SPRG1] = load->sprg1;
165 env->spr[SPR_SPRG2] = load->sprg2;
166 env->spr[SPR_SPRG3] = load->sprg3;
167 env->spr[SPR_BOOKS_PID] = load->pidr;
168 env->spr[SPR_PPR] = load->ppr;
170 env->tb_env->tb_offset = load->tb_offset;
173 * MSR updated, compute hflags and possible interrupts.
175 hreg_compute_hflags(env);
176 ppc_maybe_interrupt(env);
179 * Nested HV does not tag TLB entries between L1 and L2, so must
180 * flush on transition.
182 tlb_flush(cs);
183 env->reserve_addr = -1; /* Reset the reservation */
187 * When this handler returns, the environment is switched to the L2 guest
188 * and TCG begins running that. spapr_exit_nested() performs the switch from
189 * L2 back to L1 and returns from the H_ENTER_NESTED hcall.
191 static target_ulong h_enter_nested(PowerPCCPU *cpu,
192 SpaprMachineState *spapr,
193 target_ulong opcode,
194 target_ulong *args)
196 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
197 CPUPPCState *env = &cpu->env;
198 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
199 struct nested_ppc_state l2_state;
200 target_ulong hv_ptr = args[0];
201 target_ulong regs_ptr = args[1];
202 target_ulong hdec, now = cpu_ppc_load_tbl(env);
203 target_ulong lpcr, lpcr_mask;
204 struct kvmppc_hv_guest_state *hvstate;
205 struct kvmppc_hv_guest_state hv_state;
206 struct kvmppc_pt_regs *regs;
207 hwaddr len;
209 if (spapr->nested.ptcr == 0) {
210 return H_NOT_AVAILABLE;
213 len = sizeof(*hvstate);
214 hvstate = address_space_map(CPU(cpu)->as, hv_ptr, &len, false,
215 MEMTXATTRS_UNSPECIFIED);
216 if (len != sizeof(*hvstate)) {
217 address_space_unmap(CPU(cpu)->as, hvstate, len, 0, false);
218 return H_PARAMETER;
221 memcpy(&hv_state, hvstate, len);
223 address_space_unmap(CPU(cpu)->as, hvstate, len, len, false);
226 * We accept versions 1 and 2. Version 2 fields are unused because TCG
227 * does not implement DAWR*.
229 if (hv_state.version > HV_GUEST_STATE_VERSION) {
230 return H_PARAMETER;
233 if (hv_state.lpid == 0) {
234 return H_PARAMETER;
237 spapr_cpu->nested_host_state = g_try_new(struct nested_ppc_state, 1);
238 if (!spapr_cpu->nested_host_state) {
239 return H_NO_MEM;
242 assert(env->spr[SPR_LPIDR] == 0);
243 assert(env->spr[SPR_DPDES] == 0);
244 nested_save_state(spapr_cpu->nested_host_state, cpu);
246 len = sizeof(*regs);
247 regs = address_space_map(CPU(cpu)->as, regs_ptr, &len, false,
248 MEMTXATTRS_UNSPECIFIED);
249 if (!regs || len != sizeof(*regs)) {
250 address_space_unmap(CPU(cpu)->as, regs, len, 0, false);
251 g_free(spapr_cpu->nested_host_state);
252 return H_P2;
255 len = sizeof(l2_state.gpr);
256 assert(len == sizeof(regs->gpr));
257 memcpy(l2_state.gpr, regs->gpr, len);
259 l2_state.lr = regs->link;
260 l2_state.ctr = regs->ctr;
261 l2_state.xer = regs->xer;
262 l2_state.cr = regs->ccr;
263 l2_state.msr = regs->msr;
264 l2_state.nip = regs->nip;
266 address_space_unmap(CPU(cpu)->as, regs, len, len, false);
268 l2_state.cfar = hv_state.cfar;
269 l2_state.lpidr = hv_state.lpid;
271 lpcr_mask = LPCR_DPFD | LPCR_ILE | LPCR_AIL | LPCR_LD | LPCR_MER;
272 lpcr = (env->spr[SPR_LPCR] & ~lpcr_mask) | (hv_state.lpcr & lpcr_mask);
273 lpcr |= LPCR_HR | LPCR_UPRT | LPCR_GTSE | LPCR_HVICE | LPCR_HDICE;
274 lpcr &= ~LPCR_LPES0;
275 l2_state.lpcr = lpcr & pcc->lpcr_mask;
277 l2_state.pcr = hv_state.pcr;
278 /* hv_state.amor is not used */
279 l2_state.dpdes = hv_state.dpdes;
280 l2_state.hfscr = hv_state.hfscr;
281 /* TCG does not implement DAWR*, CIABR, PURR, SPURR, IC, VTB, HEIR SPRs*/
282 l2_state.srr0 = hv_state.srr0;
283 l2_state.srr1 = hv_state.srr1;
284 l2_state.sprg0 = hv_state.sprg[0];
285 l2_state.sprg1 = hv_state.sprg[1];
286 l2_state.sprg2 = hv_state.sprg[2];
287 l2_state.sprg3 = hv_state.sprg[3];
288 l2_state.pidr = hv_state.pidr;
289 l2_state.ppr = hv_state.ppr;
290 l2_state.tb_offset = env->tb_env->tb_offset + hv_state.tb_offset;
293 * Switch to the nested guest environment and start the "hdec" timer.
295 nested_load_state(cpu, &l2_state);
297 hdec = hv_state.hdec_expiry - now;
298 cpu_ppc_hdecr_init(env);
299 cpu_ppc_store_hdecr(env, hdec);
302 * The hv_state.vcpu_token is not needed. It is used by the KVM
303 * implementation to remember which L2 vCPU last ran on which physical
304 * CPU so as to invalidate process scope translations if it is moved
305 * between physical CPUs. For now TLBs are always flushed on L1<->L2
306 * transitions so this is not a problem.
308 * Could validate that the same vcpu_token does not attempt to run on
309 * different L1 vCPUs at the same time, but that would be a L1 KVM bug
310 * and it's not obviously worth a new data structure to do it.
313 spapr_cpu->in_nested = true;
316 * The spapr hcall helper sets env->gpr[3] to the return value, but at
317 * this point the L1 is not returning from the hcall but rather we
318 * start running the L2, so r3 must not be clobbered, so return env->gpr[3]
319 * to leave it unchanged.
321 return env->gpr[3];
324 static void spapr_exit_nested_hv(PowerPCCPU *cpu, int excp)
326 CPUPPCState *env = &cpu->env;
327 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
328 struct nested_ppc_state l2_state;
329 target_ulong hv_ptr = spapr_cpu->nested_host_state->gpr[4];
330 target_ulong regs_ptr = spapr_cpu->nested_host_state->gpr[5];
331 target_ulong hsrr0, hsrr1, hdar, asdr, hdsisr;
332 struct kvmppc_hv_guest_state *hvstate;
333 struct kvmppc_pt_regs *regs;
334 hwaddr len;
336 nested_save_state(&l2_state, cpu);
337 hsrr0 = env->spr[SPR_HSRR0];
338 hsrr1 = env->spr[SPR_HSRR1];
339 hdar = env->spr[SPR_HDAR];
340 hdsisr = env->spr[SPR_HDSISR];
341 asdr = env->spr[SPR_ASDR];
344 * Switch back to the host environment (including for any error).
346 assert(env->spr[SPR_LPIDR] != 0);
347 nested_load_state(cpu, spapr_cpu->nested_host_state);
348 env->gpr[3] = env->excp_vectors[excp]; /* hcall return value */
350 cpu_ppc_hdecr_exit(env);
352 spapr_cpu->in_nested = false;
354 g_free(spapr_cpu->nested_host_state);
355 spapr_cpu->nested_host_state = NULL;
357 len = sizeof(*hvstate);
358 hvstate = address_space_map(CPU(cpu)->as, hv_ptr, &len, true,
359 MEMTXATTRS_UNSPECIFIED);
360 if (len != sizeof(*hvstate)) {
361 address_space_unmap(CPU(cpu)->as, hvstate, len, 0, true);
362 env->gpr[3] = H_PARAMETER;
363 return;
366 hvstate->cfar = l2_state.cfar;
367 hvstate->lpcr = l2_state.lpcr;
368 hvstate->pcr = l2_state.pcr;
369 hvstate->dpdes = l2_state.dpdes;
370 hvstate->hfscr = l2_state.hfscr;
372 if (excp == POWERPC_EXCP_HDSI) {
373 hvstate->hdar = hdar;
374 hvstate->hdsisr = hdsisr;
375 hvstate->asdr = asdr;
376 } else if (excp == POWERPC_EXCP_HISI) {
377 hvstate->asdr = asdr;
380 /* HEIR should be implemented for HV mode and saved here. */
381 hvstate->srr0 = l2_state.srr0;
382 hvstate->srr1 = l2_state.srr1;
383 hvstate->sprg[0] = l2_state.sprg0;
384 hvstate->sprg[1] = l2_state.sprg1;
385 hvstate->sprg[2] = l2_state.sprg2;
386 hvstate->sprg[3] = l2_state.sprg3;
387 hvstate->pidr = l2_state.pidr;
388 hvstate->ppr = l2_state.ppr;
390 /* Is it okay to specify write length larger than actual data written? */
391 address_space_unmap(CPU(cpu)->as, hvstate, len, len, true);
393 len = sizeof(*regs);
394 regs = address_space_map(CPU(cpu)->as, regs_ptr, &len, true,
395 MEMTXATTRS_UNSPECIFIED);
396 if (!regs || len != sizeof(*regs)) {
397 address_space_unmap(CPU(cpu)->as, regs, len, 0, true);
398 env->gpr[3] = H_P2;
399 return;
402 len = sizeof(env->gpr);
403 assert(len == sizeof(regs->gpr));
404 memcpy(regs->gpr, l2_state.gpr, len);
406 regs->link = l2_state.lr;
407 regs->ctr = l2_state.ctr;
408 regs->xer = l2_state.xer;
409 regs->ccr = l2_state.cr;
411 if (excp == POWERPC_EXCP_MCHECK ||
412 excp == POWERPC_EXCP_RESET ||
413 excp == POWERPC_EXCP_SYSCALL) {
414 regs->nip = l2_state.srr0;
415 regs->msr = l2_state.srr1 & env->msr_mask;
416 } else {
417 regs->nip = hsrr0;
418 regs->msr = hsrr1 & env->msr_mask;
421 /* Is it okay to specify write length larger than actual data written? */
422 address_space_unmap(CPU(cpu)->as, regs, len, len, true);
425 void spapr_exit_nested(PowerPCCPU *cpu, int excp)
427 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
428 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
430 assert(spapr_cpu->in_nested);
431 if (spapr_nested_api(spapr) == NESTED_API_KVM_HV) {
432 spapr_exit_nested_hv(cpu, excp);
433 } else {
434 g_assert_not_reached();
438 static
439 SpaprMachineStateNestedGuest *spapr_get_nested_guest(SpaprMachineState *spapr,
440 target_ulong guestid)
442 SpaprMachineStateNestedGuest *guest;
444 guest = g_hash_table_lookup(spapr->nested.guests, GINT_TO_POINTER(guestid));
445 return guest;
448 static target_ulong h_guest_get_capabilities(PowerPCCPU *cpu,
449 SpaprMachineState *spapr,
450 target_ulong opcode,
451 target_ulong *args)
453 CPUPPCState *env = &cpu->env;
454 target_ulong flags = args[0];
456 if (flags) { /* don't handle any flags capabilities for now */
457 return H_PARAMETER;
460 /* P10 capabilities */
461 if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0,
462 spapr->max_compat_pvr)) {
463 env->gpr[4] |= H_GUEST_CAPABILITIES_P10_MODE;
466 /* P9 capabilities */
467 if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0,
468 spapr->max_compat_pvr)) {
469 env->gpr[4] |= H_GUEST_CAPABILITIES_P9_MODE;
472 return H_SUCCESS;
475 static target_ulong h_guest_set_capabilities(PowerPCCPU *cpu,
476 SpaprMachineState *spapr,
477 target_ulong opcode,
478 target_ulong *args)
480 CPUPPCState *env = &cpu->env;
481 target_ulong flags = args[0];
482 target_ulong capabilities = args[1];
483 env->gpr[4] = 0;
485 if (flags) { /* don't handle any flags capabilities for now */
486 return H_PARAMETER;
489 if (capabilities & H_GUEST_CAPABILITIES_COPY_MEM) {
490 env->gpr[4] = 1;
491 return H_P2; /* isn't supported */
495 * If there are no capabilities configured, set the R5 to the index of
496 * the first supported Power Processor Mode
498 if (!capabilities) {
499 env->gpr[4] = 1;
501 /* set R5 to the first supported Power Processor Mode */
502 if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0,
503 spapr->max_compat_pvr)) {
504 env->gpr[5] = H_GUEST_CAP_P10_MODE_BMAP;
505 } else if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0,
506 spapr->max_compat_pvr)) {
507 env->gpr[5] = H_GUEST_CAP_P9_MODE_BMAP;
510 return H_P2;
514 * If an invalid capability is set, R5 should contain the index of the
515 * invalid capability bit
517 if (capabilities & ~H_GUEST_CAP_VALID_MASK) {
518 env->gpr[4] = 1;
520 /* Set R5 to the index of the invalid capability */
521 env->gpr[5] = 63 - ctz64(capabilities);
523 return H_P2;
526 if (!spapr->nested.capabilities_set) {
527 spapr->nested.capabilities_set = true;
528 spapr->nested.pvr_base = env->spr[SPR_PVR];
529 return H_SUCCESS;
530 } else {
531 return H_STATE;
535 static void
536 destroy_guest_helper(gpointer value)
538 struct SpaprMachineStateNestedGuest *guest = value;
539 g_free(guest->vcpus);
540 g_free(guest);
543 static target_ulong h_guest_create(PowerPCCPU *cpu,
544 SpaprMachineState *spapr,
545 target_ulong opcode,
546 target_ulong *args)
548 CPUPPCState *env = &cpu->env;
549 target_ulong flags = args[0];
550 target_ulong continue_token = args[1];
551 uint64_t guestid;
552 int nguests = 0;
553 struct SpaprMachineStateNestedGuest *guest;
555 if (flags) { /* don't handle any flags for now */
556 return H_UNSUPPORTED_FLAG;
559 if (continue_token != -1) {
560 return H_P2;
563 if (!spapr->nested.capabilities_set) {
564 return H_STATE;
567 if (!spapr->nested.guests) {
568 spapr->nested.guests = g_hash_table_new_full(NULL,
569 NULL,
570 NULL,
571 destroy_guest_helper);
574 nguests = g_hash_table_size(spapr->nested.guests);
576 if (nguests == PAPR_NESTED_GUEST_MAX) {
577 return H_NO_MEM;
580 /* Lookup for available guestid */
581 for (guestid = 1; guestid < PAPR_NESTED_GUEST_MAX; guestid++) {
582 if (!(g_hash_table_lookup(spapr->nested.guests,
583 GINT_TO_POINTER(guestid)))) {
584 break;
588 if (guestid == PAPR_NESTED_GUEST_MAX) {
589 return H_NO_MEM;
592 guest = g_try_new0(struct SpaprMachineStateNestedGuest, 1);
593 if (!guest) {
594 return H_NO_MEM;
597 guest->pvr_logical = spapr->nested.pvr_base;
598 g_hash_table_insert(spapr->nested.guests, GINT_TO_POINTER(guestid), guest);
599 env->gpr[4] = guestid;
601 return H_SUCCESS;
604 static target_ulong h_guest_delete(PowerPCCPU *cpu,
605 SpaprMachineState *spapr,
606 target_ulong opcode,
607 target_ulong *args)
609 target_ulong flags = args[0];
610 target_ulong guestid = args[1];
611 struct SpaprMachineStateNestedGuest *guest;
614 * handle flag deleteAllGuests, if set:
615 * guestid is ignored and all guests are deleted
618 if (flags & ~H_GUEST_DELETE_ALL_FLAG) {
619 return H_UNSUPPORTED_FLAG; /* other flag bits reserved */
620 } else if (flags & H_GUEST_DELETE_ALL_FLAG) {
621 g_hash_table_destroy(spapr->nested.guests);
622 return H_SUCCESS;
625 guest = g_hash_table_lookup(spapr->nested.guests, GINT_TO_POINTER(guestid));
626 if (!guest) {
627 return H_P2;
630 g_hash_table_remove(spapr->nested.guests, GINT_TO_POINTER(guestid));
632 return H_SUCCESS;
635 static target_ulong h_guest_create_vcpu(PowerPCCPU *cpu,
636 SpaprMachineState *spapr,
637 target_ulong opcode,
638 target_ulong *args)
640 target_ulong flags = args[0];
641 target_ulong guestid = args[1];
642 target_ulong vcpuid = args[2];
643 SpaprMachineStateNestedGuest *guest;
645 if (flags) { /* don't handle any flags for now */
646 return H_UNSUPPORTED_FLAG;
649 guest = spapr_get_nested_guest(spapr, guestid);
650 if (!guest) {
651 return H_P2;
654 if (vcpuid < guest->nr_vcpus) {
655 qemu_log_mask(LOG_UNIMP, "vcpuid " TARGET_FMT_ld " already in use.",
656 vcpuid);
657 return H_IN_USE;
659 /* linear vcpuid allocation only */
660 assert(vcpuid == guest->nr_vcpus);
662 if (guest->nr_vcpus >= PAPR_NESTED_GUEST_VCPU_MAX) {
663 return H_P3;
666 SpaprMachineStateNestedGuestVcpu *vcpus, *curr_vcpu;
667 vcpus = g_try_renew(struct SpaprMachineStateNestedGuestVcpu,
668 guest->vcpus,
669 guest->nr_vcpus + 1);
670 if (!vcpus) {
671 return H_NO_MEM;
673 guest->vcpus = vcpus;
674 curr_vcpu = &vcpus[guest->nr_vcpus];
675 memset(curr_vcpu, 0, sizeof(SpaprMachineStateNestedGuestVcpu));
677 curr_vcpu->enabled = true;
678 guest->nr_vcpus++;
680 return H_SUCCESS;
683 void spapr_register_nested_hv(void)
685 spapr_register_hypercall(KVMPPC_H_SET_PARTITION_TABLE, h_set_ptbl);
686 spapr_register_hypercall(KVMPPC_H_ENTER_NESTED, h_enter_nested);
687 spapr_register_hypercall(KVMPPC_H_TLB_INVALIDATE, h_tlb_invalidate);
688 spapr_register_hypercall(KVMPPC_H_COPY_TOFROM_GUEST, h_copy_tofrom_guest);
691 void spapr_unregister_nested_hv(void)
693 spapr_unregister_hypercall(KVMPPC_H_SET_PARTITION_TABLE);
694 spapr_unregister_hypercall(KVMPPC_H_ENTER_NESTED);
695 spapr_unregister_hypercall(KVMPPC_H_TLB_INVALIDATE);
696 spapr_unregister_hypercall(KVMPPC_H_COPY_TOFROM_GUEST);
699 void spapr_register_nested_papr(void)
701 spapr_register_hypercall(H_GUEST_GET_CAPABILITIES,
702 h_guest_get_capabilities);
703 spapr_register_hypercall(H_GUEST_SET_CAPABILITIES,
704 h_guest_set_capabilities);
705 spapr_register_hypercall(H_GUEST_CREATE, h_guest_create);
706 spapr_register_hypercall(H_GUEST_DELETE, h_guest_delete);
707 spapr_register_hypercall(H_GUEST_CREATE_VCPU, h_guest_create_vcpu);
710 void spapr_unregister_nested_papr(void)
712 spapr_unregister_hypercall(H_GUEST_GET_CAPABILITIES);
713 spapr_unregister_hypercall(H_GUEST_SET_CAPABILITIES);
714 spapr_unregister_hypercall(H_GUEST_CREATE);
715 spapr_unregister_hypercall(H_GUEST_DELETE);
716 spapr_unregister_hypercall(H_GUEST_CREATE_VCPU);
719 #else
720 void spapr_exit_nested(PowerPCCPU *cpu, int excp)
722 g_assert_not_reached();
725 void spapr_register_nested_hv(void)
727 /* DO NOTHING */
730 void spapr_unregister_nested_hv(void)
732 /* DO NOTHING */
735 bool spapr_get_pate_nested_hv(SpaprMachineState *spapr, PowerPCCPU *cpu,
736 target_ulong lpid, ppc_v3_pate_t *entry)
738 return false;
741 void spapr_register_nested_papr(void)
743 /* DO NOTHING */
746 void spapr_unregister_nested_papr(void)
748 /* DO NOTHING */
751 #endif