1 #include "qemu/osdep.h"
2 #include "qemu/cutils.h"
3 #include "qapi/error.h"
4 #include "sysemu/hw_accel.h"
5 #include "sysemu/runstate.h"
6 #include "sysemu/tcg.h"
8 #include "qemu/main-loop.h"
9 #include "qemu/module.h"
10 #include "qemu/error-report.h"
11 #include "exec/exec-all.h"
12 #include "exec/tb-flush.h"
13 #include "helper_regs.h"
14 #include "hw/ppc/ppc.h"
15 #include "hw/ppc/spapr.h"
16 #include "hw/ppc/spapr_cpu_core.h"
17 #include "hw/ppc/spapr_nested.h"
18 #include "mmu-hash64.h"
19 #include "cpu-models.h"
22 #include "hw/ppc/fdt.h"
23 #include "hw/ppc/spapr_ovec.h"
24 #include "hw/ppc/spapr_numa.h"
25 #include "mmu-book3s-v3.h"
26 #include "hw/mem/memory-device.h"
28 bool is_ram_address(SpaprMachineState
*spapr
, hwaddr addr
)
30 MachineState
*machine
= MACHINE(spapr
);
31 DeviceMemoryState
*dms
= machine
->device_memory
;
33 if (addr
< machine
->ram_size
) {
36 if (dms
&& (addr
>= dms
->base
)
37 && ((addr
- dms
->base
) < memory_region_size(&dms
->mr
))) {
44 /* Convert a return code from the KVM ioctl()s implementing resize HPT
45 * into a PAPR hypercall return code */
46 static target_ulong
resize_hpt_convert_rc(int ret
)
49 return H_LONG_BUSY_ORDER_100_SEC
;
50 } else if (ret
>= 10000) {
51 return H_LONG_BUSY_ORDER_10_SEC
;
52 } else if (ret
>= 1000) {
53 return H_LONG_BUSY_ORDER_1_SEC
;
54 } else if (ret
>= 100) {
55 return H_LONG_BUSY_ORDER_100_MSEC
;
56 } else if (ret
>= 10) {
57 return H_LONG_BUSY_ORDER_10_MSEC
;
59 return H_LONG_BUSY_ORDER_1_MSEC
;
82 static target_ulong
h_resize_hpt_prepare(PowerPCCPU
*cpu
,
83 SpaprMachineState
*spapr
,
87 target_ulong flags
= args
[0];
89 uint64_t current_ram_size
;
92 if (spapr
->resize_hpt
== SPAPR_RESIZE_HPT_DISABLED
) {
96 if (!spapr
->htab_shift
) {
97 /* Radix guest, no HPT */
98 return H_NOT_AVAILABLE
;
101 trace_spapr_h_resize_hpt_prepare(flags
, shift
);
107 if (shift
&& ((shift
< 18) || (shift
> 46))) {
111 current_ram_size
= MACHINE(spapr
)->ram_size
+ get_plugged_memory_size();
113 /* We only allow the guest to allocate an HPT one order above what
114 * we'd normally give them (to stop a small guest claiming a huge
115 * chunk of resources in the HPT */
116 if (shift
> (spapr_hpt_shift_for_ramsize(current_ram_size
) + 1)) {
120 rc
= kvmppc_resize_hpt_prepare(cpu
, flags
, shift
);
122 return resize_hpt_convert_rc(rc
);
129 return softmmu_resize_hpt_prepare(cpu
, spapr
, shift
);
132 static void do_push_sregs_to_kvm_pr(CPUState
*cs
, run_on_cpu_data data
)
136 cpu_synchronize_state(cs
);
138 ret
= kvmppc_put_books_sregs(POWERPC_CPU(cs
));
140 error_report("failed to push sregs to KVM: %s", strerror(-ret
));
145 void push_sregs_to_kvm_pr(SpaprMachineState
*spapr
)
150 * This is a hack for the benefit of KVM PR - it abuses the SDR1
151 * slot in kvm_sregs to communicate the userspace address of the
154 if (!kvm_enabled() || !spapr
->htab
) {
159 run_on_cpu(cs
, do_push_sregs_to_kvm_pr
, RUN_ON_CPU_NULL
);
163 static target_ulong
h_resize_hpt_commit(PowerPCCPU
*cpu
,
164 SpaprMachineState
*spapr
,
168 target_ulong flags
= args
[0];
169 target_ulong shift
= args
[1];
172 if (spapr
->resize_hpt
== SPAPR_RESIZE_HPT_DISABLED
) {
176 if (!spapr
->htab_shift
) {
177 /* Radix guest, no HPT */
178 return H_NOT_AVAILABLE
;
181 trace_spapr_h_resize_hpt_commit(flags
, shift
);
183 rc
= kvmppc_resize_hpt_commit(cpu
, flags
, shift
);
185 rc
= resize_hpt_convert_rc(rc
);
186 if (rc
== H_SUCCESS
) {
187 /* Need to set the new htab_shift in the machine state */
188 spapr
->htab_shift
= shift
;
197 return softmmu_resize_hpt_commit(cpu
, spapr
, flags
, shift
);
202 static target_ulong
h_set_sprg0(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
203 target_ulong opcode
, target_ulong
*args
)
205 cpu_synchronize_state(CPU(cpu
));
206 cpu
->env
.spr
[SPR_SPRG0
] = args
[0];
211 static target_ulong
h_set_dabr(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
212 target_ulong opcode
, target_ulong
*args
)
214 if (!ppc_has_spr(cpu
, SPR_DABR
)) {
215 return H_HARDWARE
; /* DABR register not available */
217 cpu_synchronize_state(CPU(cpu
));
219 if (ppc_has_spr(cpu
, SPR_DABRX
)) {
220 cpu
->env
.spr
[SPR_DABRX
] = 0x3; /* Use Problem and Privileged state */
221 } else if (!(args
[0] & 0x4)) { /* Breakpoint Translation set? */
222 return H_RESERVED_DABR
;
225 cpu
->env
.spr
[SPR_DABR
] = args
[0];
229 static target_ulong
h_set_xdabr(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
230 target_ulong opcode
, target_ulong
*args
)
232 target_ulong dabrx
= args
[1];
234 if (!ppc_has_spr(cpu
, SPR_DABR
) || !ppc_has_spr(cpu
, SPR_DABRX
)) {
238 if ((dabrx
& ~0xfULL
) != 0 || (dabrx
& H_DABRX_HYPERVISOR
) != 0
239 || (dabrx
& (H_DABRX_KERNEL
| H_DABRX_USER
)) == 0) {
243 cpu_synchronize_state(CPU(cpu
));
244 cpu
->env
.spr
[SPR_DABRX
] = dabrx
;
245 cpu
->env
.spr
[SPR_DABR
] = args
[0];
250 static target_ulong
h_page_init(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
251 target_ulong opcode
, target_ulong
*args
)
253 target_ulong flags
= args
[0];
254 hwaddr dst
= args
[1];
255 hwaddr src
= args
[2];
256 hwaddr len
= TARGET_PAGE_SIZE
;
257 uint8_t *pdst
, *psrc
;
258 target_long ret
= H_SUCCESS
;
260 if (flags
& ~(H_ICACHE_SYNCHRONIZE
| H_ICACHE_INVALIDATE
261 | H_COPY_PAGE
| H_ZERO_PAGE
)) {
262 qemu_log_mask(LOG_UNIMP
, "h_page_init: Bad flags (" TARGET_FMT_lx
"\n",
267 /* Map-in destination */
268 if (!is_ram_address(spapr
, dst
) || (dst
& ~TARGET_PAGE_MASK
) != 0) {
271 pdst
= cpu_physical_memory_map(dst
, &len
, true);
272 if (!pdst
|| len
!= TARGET_PAGE_SIZE
) {
276 if (flags
& H_COPY_PAGE
) {
277 /* Map-in source, copy to destination, and unmap source again */
278 if (!is_ram_address(spapr
, src
) || (src
& ~TARGET_PAGE_MASK
) != 0) {
282 psrc
= cpu_physical_memory_map(src
, &len
, false);
283 if (!psrc
|| len
!= TARGET_PAGE_SIZE
) {
287 memcpy(pdst
, psrc
, len
);
288 cpu_physical_memory_unmap(psrc
, len
, 0, len
);
289 } else if (flags
& H_ZERO_PAGE
) {
290 memset(pdst
, 0, len
); /* Just clear the destination page */
293 if (kvm_enabled() && (flags
& H_ICACHE_SYNCHRONIZE
) != 0) {
294 kvmppc_dcbst_range(cpu
, pdst
, len
);
296 if (flags
& (H_ICACHE_SYNCHRONIZE
| H_ICACHE_INVALIDATE
)) {
298 kvmppc_icbi_range(cpu
, pdst
, len
);
305 cpu_physical_memory_unmap(pdst
, TARGET_PAGE_SIZE
, 1, len
);
309 #define FLAGS_REGISTER_VPA 0x0000200000000000ULL
310 #define FLAGS_REGISTER_DTL 0x0000400000000000ULL
311 #define FLAGS_REGISTER_SLBSHADOW 0x0000600000000000ULL
312 #define FLAGS_DEREGISTER_VPA 0x0000a00000000000ULL
313 #define FLAGS_DEREGISTER_DTL 0x0000c00000000000ULL
314 #define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL
316 static target_ulong
register_vpa(PowerPCCPU
*cpu
, target_ulong vpa
)
318 CPUState
*cs
= CPU(cpu
);
319 CPUPPCState
*env
= &cpu
->env
;
320 SpaprCpuState
*spapr_cpu
= spapr_cpu_state(cpu
);
325 hcall_dprintf("Can't cope with registering a VPA at logical 0\n");
329 if (vpa
% env
->dcache_line_size
) {
332 /* FIXME: bounds check the address */
334 size
= lduw_be_phys(cs
->as
, vpa
+ 0x4);
336 if (size
< VPA_MIN_SIZE
) {
340 /* VPA is not allowed to cross a page boundary */
341 if ((vpa
/ 4096) != ((vpa
+ size
- 1) / 4096)) {
345 spapr_cpu
->vpa_addr
= vpa
;
347 tmp
= ldub_phys(cs
->as
, spapr_cpu
->vpa_addr
+ VPA_SHARED_PROC_OFFSET
);
348 tmp
|= VPA_SHARED_PROC_VAL
;
349 stb_phys(cs
->as
, spapr_cpu
->vpa_addr
+ VPA_SHARED_PROC_OFFSET
, tmp
);
354 static target_ulong
deregister_vpa(PowerPCCPU
*cpu
, target_ulong vpa
)
356 SpaprCpuState
*spapr_cpu
= spapr_cpu_state(cpu
);
358 if (spapr_cpu
->slb_shadow_addr
) {
362 if (spapr_cpu
->dtl_addr
) {
366 spapr_cpu
->vpa_addr
= 0;
370 static target_ulong
register_slb_shadow(PowerPCCPU
*cpu
, target_ulong addr
)
372 SpaprCpuState
*spapr_cpu
= spapr_cpu_state(cpu
);
376 hcall_dprintf("Can't cope with SLB shadow at logical 0\n");
380 size
= ldl_be_phys(CPU(cpu
)->as
, addr
+ 0x4);
385 if ((addr
/ 4096) != ((addr
+ size
- 1) / 4096)) {
389 if (!spapr_cpu
->vpa_addr
) {
393 spapr_cpu
->slb_shadow_addr
= addr
;
394 spapr_cpu
->slb_shadow_size
= size
;
399 static target_ulong
deregister_slb_shadow(PowerPCCPU
*cpu
, target_ulong addr
)
401 SpaprCpuState
*spapr_cpu
= spapr_cpu_state(cpu
);
403 spapr_cpu
->slb_shadow_addr
= 0;
404 spapr_cpu
->slb_shadow_size
= 0;
408 static target_ulong
register_dtl(PowerPCCPU
*cpu
, target_ulong addr
)
410 SpaprCpuState
*spapr_cpu
= spapr_cpu_state(cpu
);
414 hcall_dprintf("Can't cope with DTL at logical 0\n");
418 size
= ldl_be_phys(CPU(cpu
)->as
, addr
+ 0x4);
424 if (!spapr_cpu
->vpa_addr
) {
428 spapr_cpu
->dtl_addr
= addr
;
429 spapr_cpu
->dtl_size
= size
;
434 static target_ulong
deregister_dtl(PowerPCCPU
*cpu
, target_ulong addr
)
436 SpaprCpuState
*spapr_cpu
= spapr_cpu_state(cpu
);
438 spapr_cpu
->dtl_addr
= 0;
439 spapr_cpu
->dtl_size
= 0;
444 static target_ulong
h_register_vpa(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
445 target_ulong opcode
, target_ulong
*args
)
447 target_ulong flags
= args
[0];
448 target_ulong procno
= args
[1];
449 target_ulong vpa
= args
[2];
450 target_ulong ret
= H_PARAMETER
;
453 tcpu
= spapr_find_cpu(procno
);
459 case FLAGS_REGISTER_VPA
:
460 ret
= register_vpa(tcpu
, vpa
);
463 case FLAGS_DEREGISTER_VPA
:
464 ret
= deregister_vpa(tcpu
, vpa
);
467 case FLAGS_REGISTER_SLBSHADOW
:
468 ret
= register_slb_shadow(tcpu
, vpa
);
471 case FLAGS_DEREGISTER_SLBSHADOW
:
472 ret
= deregister_slb_shadow(tcpu
, vpa
);
475 case FLAGS_REGISTER_DTL
:
476 ret
= register_dtl(tcpu
, vpa
);
479 case FLAGS_DEREGISTER_DTL
:
480 ret
= deregister_dtl(tcpu
, vpa
);
487 static target_ulong
h_cede(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
488 target_ulong opcode
, target_ulong
*args
)
490 CPUPPCState
*env
= &cpu
->env
;
491 CPUState
*cs
= CPU(cpu
);
492 SpaprCpuState
*spapr_cpu
= spapr_cpu_state(cpu
);
494 env
->msr
|= (1ULL << MSR_EE
);
495 hreg_compute_hflags(env
);
496 ppc_maybe_interrupt(env
);
498 if (spapr_cpu
->prod
) {
499 spapr_cpu
->prod
= false;
503 if (!cpu_has_work(cs
)) {
505 cs
->exception_index
= EXCP_HLT
;
506 cs
->exit_request
= 1;
507 ppc_maybe_interrupt(env
);
514 * Confer to self, aka join. Cede could use the same pattern as well, if
515 * EXCP_HLT can be changed to ECXP_HALTED.
517 static target_ulong
h_confer_self(PowerPCCPU
*cpu
)
519 CPUState
*cs
= CPU(cpu
);
520 SpaprCpuState
*spapr_cpu
= spapr_cpu_state(cpu
);
522 if (spapr_cpu
->prod
) {
523 spapr_cpu
->prod
= false;
527 cs
->exception_index
= EXCP_HALTED
;
528 cs
->exit_request
= 1;
529 ppc_maybe_interrupt(&cpu
->env
);
534 static target_ulong
h_join(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
535 target_ulong opcode
, target_ulong
*args
)
537 CPUPPCState
*env
= &cpu
->env
;
539 bool last_unjoined
= true;
541 if (env
->msr
& (1ULL << MSR_EE
)) {
546 * Must not join the last CPU running. Interestingly, no such restriction
547 * for H_CONFER-to-self, but that is probably not intended to be used
548 * when H_JOIN is available.
551 PowerPCCPU
*c
= POWERPC_CPU(cs
);
552 CPUPPCState
*e
= &c
->env
;
557 /* Don't have a way to indicate joined, so use halted && MSR[EE]=0 */
558 if (!cs
->halted
|| (e
->msr
& (1ULL << MSR_EE
))) {
559 last_unjoined
= false;
567 return h_confer_self(cpu
);
570 static target_ulong
h_confer(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
571 target_ulong opcode
, target_ulong
*args
)
573 target_long target
= args
[0];
574 uint32_t dispatch
= args
[1];
575 CPUState
*cs
= CPU(cpu
);
576 SpaprCpuState
*spapr_cpu
;
579 * -1 means confer to all other CPUs without dispatch counter check,
580 * otherwise it's a targeted confer.
583 PowerPCCPU
*target_cpu
= spapr_find_cpu(target
);
584 uint32_t target_dispatch
;
591 * target == self is a special case, we wait until prodded, without
592 * dispatch counter check.
594 if (cpu
== target_cpu
) {
595 return h_confer_self(cpu
);
598 spapr_cpu
= spapr_cpu_state(target_cpu
);
599 if (!spapr_cpu
->vpa_addr
|| ((dispatch
& 1) == 0)) {
603 target_dispatch
= ldl_be_phys(cs
->as
,
604 spapr_cpu
->vpa_addr
+ VPA_DISPATCH_COUNTER
);
605 if (target_dispatch
!= dispatch
) {
610 * The targeted confer does not do anything special beyond yielding
611 * the current vCPU, but even this should be better than nothing.
612 * At least for single-threaded tcg, it gives the target a chance to
613 * run before we run again. Multi-threaded tcg does not really do
614 * anything with EXCP_YIELD yet.
618 cs
->exception_index
= EXCP_YIELD
;
619 cs
->exit_request
= 1;
625 static target_ulong
h_prod(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
626 target_ulong opcode
, target_ulong
*args
)
628 target_long target
= args
[0];
631 SpaprCpuState
*spapr_cpu
;
633 tcpu
= spapr_find_cpu(target
);
639 spapr_cpu
= spapr_cpu_state(tcpu
);
640 spapr_cpu
->prod
= true;
642 ppc_maybe_interrupt(&cpu
->env
);
648 static target_ulong
h_rtas(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
649 target_ulong opcode
, target_ulong
*args
)
651 target_ulong rtas_r3
= args
[0];
652 uint32_t token
= rtas_ld(rtas_r3
, 0);
653 uint32_t nargs
= rtas_ld(rtas_r3
, 1);
654 uint32_t nret
= rtas_ld(rtas_r3
, 2);
656 return spapr_rtas_call(cpu
, spapr
, token
, nargs
, rtas_r3
+ 12,
657 nret
, rtas_r3
+ 12 + 4*nargs
);
660 static target_ulong
h_logical_load(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
661 target_ulong opcode
, target_ulong
*args
)
663 CPUState
*cs
= CPU(cpu
);
664 target_ulong size
= args
[0];
665 target_ulong addr
= args
[1];
669 args
[0] = ldub_phys(cs
->as
, addr
);
672 args
[0] = lduw_phys(cs
->as
, addr
);
675 args
[0] = ldl_phys(cs
->as
, addr
);
678 args
[0] = ldq_phys(cs
->as
, addr
);
684 static target_ulong
h_logical_store(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
685 target_ulong opcode
, target_ulong
*args
)
687 CPUState
*cs
= CPU(cpu
);
689 target_ulong size
= args
[0];
690 target_ulong addr
= args
[1];
691 target_ulong val
= args
[2];
695 stb_phys(cs
->as
, addr
, val
);
698 stw_phys(cs
->as
, addr
, val
);
701 stl_phys(cs
->as
, addr
, val
);
704 stq_phys(cs
->as
, addr
, val
);
710 static target_ulong
h_logical_memop(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
711 target_ulong opcode
, target_ulong
*args
)
713 CPUState
*cs
= CPU(cpu
);
715 target_ulong dst
= args
[0]; /* Destination address */
716 target_ulong src
= args
[1]; /* Source address */
717 target_ulong esize
= args
[2]; /* Element size (0=1,1=2,2=4,3=8) */
718 target_ulong count
= args
[3]; /* Element count */
719 target_ulong op
= args
[4]; /* 0 = copy, 1 = invert */
721 unsigned int mask
= (1 << esize
) - 1;
722 int step
= 1 << esize
;
724 if (count
> 0x80000000) {
728 if ((dst
& mask
) || (src
& mask
) || (op
> 1)) {
732 if (dst
>= src
&& dst
< (src
+ (count
<< esize
))) {
733 dst
= dst
+ ((count
- 1) << esize
);
734 src
= src
+ ((count
- 1) << esize
);
741 tmp
= ldub_phys(cs
->as
, src
);
744 tmp
= lduw_phys(cs
->as
, src
);
747 tmp
= ldl_phys(cs
->as
, src
);
750 tmp
= ldq_phys(cs
->as
, src
);
760 stb_phys(cs
->as
, dst
, tmp
);
763 stw_phys(cs
->as
, dst
, tmp
);
766 stl_phys(cs
->as
, dst
, tmp
);
769 stq_phys(cs
->as
, dst
, tmp
);
779 static target_ulong
h_logical_icbi(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
780 target_ulong opcode
, target_ulong
*args
)
782 /* Nothing to do on emulation, KVM will trap this in the kernel */
786 static target_ulong
h_logical_dcbf(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
787 target_ulong opcode
, target_ulong
*args
)
789 /* Nothing to do on emulation, KVM will trap this in the kernel */
793 static target_ulong
h_set_mode_resource_set_ciabr(PowerPCCPU
*cpu
,
794 SpaprMachineState
*spapr
,
799 CPUPPCState
*env
= &cpu
->env
;
801 assert(tcg_enabled()); /* KVM will have handled this */
804 return H_UNSUPPORTED_FLAG
;
809 if ((value1
& PPC_BITMASK(62, 63)) == 0x3) {
813 ppc_store_ciabr(env
, value1
);
818 static target_ulong
h_set_mode_resource_set_dawr0(PowerPCCPU
*cpu
,
819 SpaprMachineState
*spapr
,
824 CPUPPCState
*env
= &cpu
->env
;
826 assert(tcg_enabled()); /* KVM will have handled this */
829 return H_UNSUPPORTED_FLAG
;
831 if (value2
& PPC_BIT(61)) {
835 ppc_store_dawr0(env
, value1
);
836 ppc_store_dawrx0(env
, value2
);
841 static target_ulong
h_set_mode_resource_le(PowerPCCPU
*cpu
,
842 SpaprMachineState
*spapr
,
855 case H_SET_MODE_ENDIAN_BIG
:
856 spapr_set_all_lpcrs(0, LPCR_ILE
);
857 spapr_pci_switch_vga(spapr
, true);
860 case H_SET_MODE_ENDIAN_LITTLE
:
861 spapr_set_all_lpcrs(LPCR_ILE
, LPCR_ILE
);
862 spapr_pci_switch_vga(spapr
, false);
866 return H_UNSUPPORTED_FLAG
;
869 static target_ulong
h_set_mode_resource_addr_trans_mode(PowerPCCPU
*cpu
,
870 SpaprMachineState
*spapr
,
884 * AIL-1 is not architected, and AIL-2 is not supported by QEMU spapr.
885 * It is supported for faithful emulation of bare metal systems, but for
886 * compatibility concerns we leave it out of the pseries machine.
888 if (mflags
!= 0 && mflags
!= 3) {
889 return H_UNSUPPORTED_FLAG
;
893 if (!spapr_get_cap(spapr
, SPAPR_CAP_AIL_MODE_3
)) {
894 return H_UNSUPPORTED_FLAG
;
898 spapr_set_all_lpcrs(mflags
<< LPCR_AIL_SHIFT
, LPCR_AIL
);
903 static target_ulong
h_set_mode(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
904 target_ulong opcode
, target_ulong
*args
)
906 target_ulong resource
= args
[1];
907 target_ulong ret
= H_P2
;
910 case H_SET_MODE_RESOURCE_SET_CIABR
:
911 ret
= h_set_mode_resource_set_ciabr(cpu
, spapr
, args
[0], args
[2],
914 case H_SET_MODE_RESOURCE_SET_DAWR0
:
915 ret
= h_set_mode_resource_set_dawr0(cpu
, spapr
, args
[0], args
[2],
918 case H_SET_MODE_RESOURCE_LE
:
919 ret
= h_set_mode_resource_le(cpu
, spapr
, args
[0], args
[2], args
[3]);
921 case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE
:
922 ret
= h_set_mode_resource_addr_trans_mode(cpu
, spapr
, args
[0],
930 static target_ulong
h_clean_slb(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
931 target_ulong opcode
, target_ulong
*args
)
933 qemu_log_mask(LOG_UNIMP
, "Unimplemented SPAPR hcall 0x"TARGET_FMT_lx
"%s\n",
934 opcode
, " (H_CLEAN_SLB)");
938 static target_ulong
h_invalidate_pid(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
939 target_ulong opcode
, target_ulong
*args
)
941 qemu_log_mask(LOG_UNIMP
, "Unimplemented SPAPR hcall 0x"TARGET_FMT_lx
"%s\n",
942 opcode
, " (H_INVALIDATE_PID)");
946 static void spapr_check_setup_free_hpt(SpaprMachineState
*spapr
,
947 uint64_t patbe_old
, uint64_t patbe_new
)
951 * HASH->HASH || RADIX->RADIX || NOTHING->RADIX : Do Nothing
952 * HASH->RADIX : Free HPT
953 * RADIX->HASH : Allocate HPT
954 * NOTHING->HASH : Allocate HPT
955 * Note: NOTHING implies the case where we said the guest could choose
956 * later and so assumed radix and now it's called H_REG_PROC_TBL
959 if ((patbe_old
& PATE1_GR
) == (patbe_new
& PATE1_GR
)) {
960 /* We assume RADIX, so this catches all the "Do Nothing" cases */
961 } else if (!(patbe_old
& PATE1_GR
)) {
962 /* HASH->RADIX : Free HPT */
963 spapr_free_hpt(spapr
);
964 } else if (!(patbe_new
& PATE1_GR
)) {
965 /* RADIX->HASH || NOTHING->HASH : Allocate HPT */
966 spapr_setup_hpt(spapr
);
971 #define FLAGS_MASK 0x01FULL
972 #define FLAG_MODIFY 0x10
973 #define FLAG_REGISTER 0x08
974 #define FLAG_RADIX 0x04
975 #define FLAG_HASH_PROC_TBL 0x02
976 #define FLAG_GTSE 0x01
978 static target_ulong
h_register_process_table(PowerPCCPU
*cpu
,
979 SpaprMachineState
*spapr
,
983 target_ulong flags
= args
[0];
984 target_ulong proc_tbl
= args
[1];
985 target_ulong page_size
= args
[2];
986 target_ulong table_size
= args
[3];
987 target_ulong update_lpcr
= 0;
988 target_ulong table_byte_size
;
991 if (flags
& ~FLAGS_MASK
) { /* Check no reserved bits are set */
994 if (flags
& FLAG_MODIFY
) {
995 if (flags
& FLAG_REGISTER
) {
996 /* Check process table alignment */
997 table_byte_size
= 1ULL << (table_size
+ 12);
998 if (proc_tbl
& (table_byte_size
- 1)) {
999 qemu_log_mask(LOG_GUEST_ERROR
,
1000 "%s: process table not properly aligned: proc_tbl 0x"
1001 TARGET_FMT_lx
" proc_tbl_size 0x"TARGET_FMT_lx
"\n",
1002 __func__
, proc_tbl
, table_byte_size
);
1004 if (flags
& FLAG_RADIX
) { /* Register new RADIX process table */
1005 if (proc_tbl
& 0xfff || proc_tbl
>> 60) {
1007 } else if (page_size
) {
1009 } else if (table_size
> 24) {
1012 cproc
= PATE1_GR
| proc_tbl
| table_size
;
1013 } else { /* Register new HPT process table */
1014 if (flags
& FLAG_HASH_PROC_TBL
) { /* Hash with Segment Tables */
1015 /* TODO - Not Supported */
1016 /* Technically caused by flag bits => H_PARAMETER */
1018 } else { /* Hash with SLB */
1019 if (proc_tbl
>> 38) {
1021 } else if (page_size
& ~0x7) {
1023 } else if (table_size
> 24) {
1027 cproc
= (proc_tbl
<< 25) | page_size
<< 5 | table_size
;
1030 } else { /* Deregister current process table */
1032 * Set to benign value: (current GR) | 0. This allows
1033 * deregistration in KVM to succeed even if the radix bit
1034 * in flags doesn't match the radix bit in the old PATE.
1036 cproc
= spapr
->patb_entry
& PATE1_GR
;
1038 } else { /* Maintain current registration */
1039 if (!(flags
& FLAG_RADIX
) != !(spapr
->patb_entry
& PATE1_GR
)) {
1040 /* Technically caused by flag bits => H_PARAMETER */
1041 return H_PARAMETER
; /* Existing Process Table Mismatch */
1043 cproc
= spapr
->patb_entry
;
1046 /* Check if we need to setup OR free the hpt */
1047 spapr_check_setup_free_hpt(spapr
, spapr
->patb_entry
, cproc
);
1049 spapr
->patb_entry
= cproc
; /* Save new process table */
1051 /* Update the UPRT, HR and GTSE bits in the LPCR for all cpus */
1052 if (flags
& FLAG_RADIX
) /* Radix must use process tables, also set HR */
1053 update_lpcr
|= (LPCR_UPRT
| LPCR_HR
);
1054 else if (flags
& FLAG_HASH_PROC_TBL
) /* Hash with process tables */
1055 update_lpcr
|= LPCR_UPRT
;
1056 if (flags
& FLAG_GTSE
) /* Guest translation shootdown enable */
1057 update_lpcr
|= LPCR_GTSE
;
1059 spapr_set_all_lpcrs(update_lpcr
, LPCR_UPRT
| LPCR_HR
| LPCR_GTSE
);
1061 if (kvm_enabled()) {
1062 return kvmppc_configure_v3_mmu(cpu
, flags
& FLAG_RADIX
,
1063 flags
& FLAG_GTSE
, cproc
);
1068 #define H_SIGNAL_SYS_RESET_ALL -1
1069 #define H_SIGNAL_SYS_RESET_ALLBUTSELF -2
1071 static target_ulong
h_signal_sys_reset(PowerPCCPU
*cpu
,
1072 SpaprMachineState
*spapr
,
1073 target_ulong opcode
, target_ulong
*args
)
1075 target_long target
= args
[0];
1080 if (target
< H_SIGNAL_SYS_RESET_ALLBUTSELF
) {
1085 PowerPCCPU
*c
= POWERPC_CPU(cs
);
1087 if (target
== H_SIGNAL_SYS_RESET_ALLBUTSELF
) {
1092 run_on_cpu(cs
, spapr_do_system_reset_on_cpu
, RUN_ON_CPU_NULL
);
1098 cs
= CPU(spapr_find_cpu(target
));
1100 run_on_cpu(cs
, spapr_do_system_reset_on_cpu
, RUN_ON_CPU_NULL
);
1107 /* Returns either a logical PVR or zero if none was found */
1108 static uint32_t cas_check_pvr(PowerPCCPU
*cpu
, uint32_t max_compat
,
1109 target_ulong
*addr
, bool *raw_mode_supported
)
1111 bool explicit_match
= false; /* Matched the CPU's real PVR */
1112 uint32_t best_compat
= 0;
1116 * We scan the supplied table of PVRs looking for two things
1117 * 1. Is our real CPU PVR in the list?
1118 * 2. What's the "best" listed logical PVR
1120 for (i
= 0; i
< 512; ++i
) {
1121 uint32_t pvr
, pvr_mask
;
1123 pvr_mask
= ldl_be_phys(&address_space_memory
, *addr
);
1124 pvr
= ldl_be_phys(&address_space_memory
, *addr
+ 4);
1127 if (~pvr_mask
& pvr
) {
1128 break; /* Terminator record */
1131 if ((cpu
->env
.spr
[SPR_PVR
] & pvr_mask
) == (pvr
& pvr_mask
)) {
1132 explicit_match
= true;
1134 if (ppc_check_compat(cpu
, pvr
, best_compat
, max_compat
)) {
1140 *raw_mode_supported
= explicit_match
;
1142 /* Parsing finished */
1143 trace_spapr_cas_pvr(cpu
->compat_pvr
, explicit_match
, best_compat
);
1149 target_ulong
do_client_architecture_support(PowerPCCPU
*cpu
,
1150 SpaprMachineState
*spapr
,
1152 target_ulong fdt_bufsize
)
1154 target_ulong ov_table
; /* Working address in data buffer */
1156 SpaprOptionVector
*ov1_guest
, *ov5_guest
;
1158 bool raw_mode_supported
= false;
1162 uint32_t max_compat
= spapr
->max_compat_pvr
;
1164 /* CAS is supposed to be called early when only the boot vCPU is active. */
1166 if (cs
== CPU(cpu
)) {
1170 warn_report("guest has multiple active vCPUs at CAS, which is not allowed");
1171 return H_MULTI_THREADS_ACTIVE
;
1175 cas_pvr
= cas_check_pvr(cpu
, max_compat
, &vec
, &raw_mode_supported
);
1176 if (!cas_pvr
&& (!raw_mode_supported
|| max_compat
)) {
1178 * We couldn't find a suitable compatibility mode, and either
1179 * the guest doesn't support "raw" mode for this CPU, or "raw"
1180 * mode is disabled because a maximum compat mode is set.
1182 error_report("Couldn't negotiate a suitable PVR during CAS");
1187 if (cpu
->compat_pvr
!= cas_pvr
) {
1188 Error
*local_err
= NULL
;
1190 if (ppc_set_compat_all(cas_pvr
, &local_err
) < 0) {
1191 /* We fail to set compat mode (likely because running with KVM PR),
1192 * but maybe we can fallback to raw mode if the guest supports it.
1194 if (!raw_mode_supported
) {
1195 error_report_err(local_err
);
1198 error_free(local_err
);
1202 /* For the future use: here @ov_table points to the first option vector */
1205 ov1_guest
= spapr_ovec_parse_vector(ov_table
, 1);
1207 warn_report("guest didn't provide option vector 1");
1210 ov5_guest
= spapr_ovec_parse_vector(ov_table
, 5);
1212 spapr_ovec_cleanup(ov1_guest
);
1213 warn_report("guest didn't provide option vector 5");
1216 if (spapr_ovec_test(ov5_guest
, OV5_MMU_BOTH
)) {
1217 error_report("guest requested hash and radix MMU, which is invalid.");
1220 if (spapr_ovec_test(ov5_guest
, OV5_XIVE_BOTH
)) {
1221 error_report("guest requested an invalid interrupt mode");
1225 guest_radix
= spapr_ovec_test(ov5_guest
, OV5_MMU_RADIX_300
);
1227 guest_xive
= spapr_ovec_test(ov5_guest
, OV5_XIVE_EXPLOIT
);
1230 * HPT resizing is a bit of a special case, because when enabled
1231 * we assume an HPT guest will support it until it says it
1232 * doesn't, instead of assuming it won't support it until it says
1233 * it does. Strictly speaking that approach could break for
1234 * guests which don't make a CAS call, but those are so old we
1235 * don't care about them. Without that assumption we'd have to
1236 * make at least a temporary allocation of an HPT sized for max
1237 * memory, which could be impossibly difficult under KVM HV if
1240 if (!guest_radix
&& !spapr_ovec_test(ov5_guest
, OV5_HPT_RESIZE
)) {
1241 int maxshift
= spapr_hpt_shift_for_ramsize(MACHINE(spapr
)->maxram_size
);
1243 if (spapr
->resize_hpt
== SPAPR_RESIZE_HPT_REQUIRED
) {
1245 "h_client_architecture_support: Guest doesn't support HPT resizing, but resize-hpt=required");
1249 if (spapr
->htab_shift
< maxshift
) {
1250 /* Guest doesn't know about HPT resizing, so we
1251 * pre-emptively resize for the maximum permitted RAM. At
1252 * the point this is called, nothing should have been
1253 * entered into the existing HPT */
1254 spapr_reallocate_hpt(spapr
, maxshift
, &error_fatal
);
1255 push_sregs_to_kvm_pr(spapr
);
1259 /* NOTE: there are actually a number of ov5 bits where input from the
1260 * guest is always zero, and the platform/QEMU enables them independently
1261 * of guest input. To model these properly we'd want some sort of mask,
1262 * but since they only currently apply to memory migration as defined
1263 * by LoPAPR 1.1, 14.5.4.8, which QEMU doesn't implement, we don't need
1264 * to worry about this for now.
1267 /* full range of negotiated ov5 capabilities */
1268 spapr_ovec_intersect(spapr
->ov5_cas
, spapr
->ov5
, ov5_guest
);
1269 spapr_ovec_cleanup(ov5_guest
);
1271 spapr_check_mmu_mode(guest_radix
);
1273 spapr
->cas_pre_isa3_guest
= !spapr_ovec_test(ov1_guest
, OV1_PPC_3_00
);
1274 spapr_ovec_cleanup(ov1_guest
);
1277 * Check for NUMA affinity conditions now that we know which NUMA
1278 * affinity the guest will use.
1280 spapr_numa_associativity_check(spapr
);
1283 * Ensure the guest asks for an interrupt mode we support;
1284 * otherwise terminate the boot.
1287 if (!spapr
->irq
->xive
) {
1289 "Guest requested unavailable interrupt mode (XIVE), try the ic-mode=xive or ic-mode=dual machine property");
1293 if (!spapr
->irq
->xics
) {
1295 "Guest requested unavailable interrupt mode (XICS), either don't set the ic-mode machine property or try ic-mode=xics or ic-mode=dual");
1300 spapr_irq_update_active_intc(spapr
);
1303 * Process all pending hot-plug/unplug requests now. An updated full
1304 * rendered FDT will be returned to the guest.
1306 spapr_drc_reset_all(spapr
);
1307 spapr_clear_pending_hotplug_events(spapr
);
1310 * If spapr_machine_reset() did not set up a HPT but one is necessary
1311 * (because the guest isn't going to use radix) then set it up here.
1313 if ((spapr
->patb_entry
& PATE1_GR
) && !guest_radix
) {
1314 /* legacy hash or new hash: */
1315 spapr_setup_hpt(spapr
);
1318 fdt
= spapr_build_fdt(spapr
, spapr
->vof
!= NULL
, fdt_bufsize
);
1319 g_free(spapr
->fdt_blob
);
1320 spapr
->fdt_size
= fdt_totalsize(fdt
);
1321 spapr
->fdt_initial_size
= spapr
->fdt_size
;
1322 spapr
->fdt_blob
= fdt
;
1325 * Set the machine->fdt pointer again since we just freed
1326 * it above (by freeing spapr->fdt_blob). We set this
1327 * pointer to enable support for the 'dumpdtb' QMP/HMP
1330 MACHINE(spapr
)->fdt
= fdt
;
1335 static target_ulong
h_client_architecture_support(PowerPCCPU
*cpu
,
1336 SpaprMachineState
*spapr
,
1337 target_ulong opcode
,
1340 target_ulong vec
= ppc64_phys_to_real(args
[0]);
1341 target_ulong fdt_buf
= args
[1];
1342 target_ulong fdt_bufsize
= args
[2];
1344 SpaprDeviceTreeUpdateHeader hdr
= { .version_id
= 1 };
1346 if (fdt_bufsize
< sizeof(hdr
)) {
1347 error_report("SLOF provided insufficient CAS buffer "
1348 TARGET_FMT_lu
" (min: %zu)", fdt_bufsize
, sizeof(hdr
));
1352 fdt_bufsize
-= sizeof(hdr
);
1354 ret
= do_client_architecture_support(cpu
, spapr
, vec
, fdt_bufsize
);
1355 if (ret
== H_SUCCESS
) {
1356 _FDT((fdt_pack(spapr
->fdt_blob
)));
1357 spapr
->fdt_size
= fdt_totalsize(spapr
->fdt_blob
);
1358 spapr
->fdt_initial_size
= spapr
->fdt_size
;
1360 cpu_physical_memory_write(fdt_buf
, &hdr
, sizeof(hdr
));
1361 cpu_physical_memory_write(fdt_buf
+ sizeof(hdr
), spapr
->fdt_blob
,
1363 trace_spapr_cas_continue(spapr
->fdt_size
+ sizeof(hdr
));
1369 target_ulong
spapr_vof_client_architecture_support(MachineState
*ms
,
1371 target_ulong ovec_addr
)
1373 SpaprMachineState
*spapr
= SPAPR_MACHINE(ms
);
1375 target_ulong ret
= do_client_architecture_support(POWERPC_CPU(cs
), spapr
,
1376 ovec_addr
, FDT_MAX_SIZE
);
1379 * This adds stdout and generates phandles for boottime and CAS FDTs.
1380 * It is alright to update the FDT here as do_client_architecture_support()
1383 spapr_vof_client_dt_finalize(spapr
, spapr
->fdt_blob
);
1388 static target_ulong
h_get_cpu_characteristics(PowerPCCPU
*cpu
,
1389 SpaprMachineState
*spapr
,
1390 target_ulong opcode
,
1393 uint64_t characteristics
= H_CPU_CHAR_HON_BRANCH_HINTS
&
1394 ~H_CPU_CHAR_THR_RECONF_TRIG
;
1395 uint64_t behaviour
= H_CPU_BEHAV_FAVOUR_SECURITY
;
1396 uint8_t safe_cache
= spapr_get_cap(spapr
, SPAPR_CAP_CFPC
);
1397 uint8_t safe_bounds_check
= spapr_get_cap(spapr
, SPAPR_CAP_SBBC
);
1398 uint8_t safe_indirect_branch
= spapr_get_cap(spapr
, SPAPR_CAP_IBS
);
1399 uint8_t count_cache_flush_assist
= spapr_get_cap(spapr
,
1400 SPAPR_CAP_CCF_ASSIST
);
1402 switch (safe_cache
) {
1403 case SPAPR_CAP_WORKAROUND
:
1404 characteristics
|= H_CPU_CHAR_L1D_FLUSH_ORI30
;
1405 characteristics
|= H_CPU_CHAR_L1D_FLUSH_TRIG2
;
1406 characteristics
|= H_CPU_CHAR_L1D_THREAD_PRIV
;
1407 behaviour
|= H_CPU_BEHAV_L1D_FLUSH_PR
;
1409 case SPAPR_CAP_FIXED
:
1410 behaviour
|= H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY
;
1411 behaviour
|= H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS
;
1413 default: /* broken */
1414 assert(safe_cache
== SPAPR_CAP_BROKEN
);
1415 behaviour
|= H_CPU_BEHAV_L1D_FLUSH_PR
;
1419 switch (safe_bounds_check
) {
1420 case SPAPR_CAP_WORKAROUND
:
1421 characteristics
|= H_CPU_CHAR_SPEC_BAR_ORI31
;
1422 behaviour
|= H_CPU_BEHAV_BNDS_CHK_SPEC_BAR
;
1424 case SPAPR_CAP_FIXED
:
1426 default: /* broken */
1427 assert(safe_bounds_check
== SPAPR_CAP_BROKEN
);
1428 behaviour
|= H_CPU_BEHAV_BNDS_CHK_SPEC_BAR
;
1432 switch (safe_indirect_branch
) {
1433 case SPAPR_CAP_FIXED_NA
:
1435 case SPAPR_CAP_FIXED_CCD
:
1436 characteristics
|= H_CPU_CHAR_CACHE_COUNT_DIS
;
1438 case SPAPR_CAP_FIXED_IBS
:
1439 characteristics
|= H_CPU_CHAR_BCCTRL_SERIALISED
;
1441 case SPAPR_CAP_WORKAROUND
:
1442 behaviour
|= H_CPU_BEHAV_FLUSH_COUNT_CACHE
;
1443 if (count_cache_flush_assist
) {
1444 characteristics
|= H_CPU_CHAR_BCCTR_FLUSH_ASSIST
;
1447 default: /* broken */
1448 assert(safe_indirect_branch
== SPAPR_CAP_BROKEN
);
1452 args
[0] = characteristics
;
1453 args
[1] = behaviour
;
1457 static target_ulong
h_update_dt(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
1458 target_ulong opcode
, target_ulong
*args
)
1460 target_ulong dt
= ppc64_phys_to_real(args
[0]);
1461 struct fdt_header hdr
= { 0 };
1463 SpaprMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(spapr
);
1466 cpu_physical_memory_read(dt
, &hdr
, sizeof(hdr
));
1467 cb
= fdt32_to_cpu(hdr
.totalsize
);
1469 if (!smc
->update_dt_enabled
) {
1473 /* Check that the fdt did not grow out of proportion */
1474 if (cb
> spapr
->fdt_initial_size
* 2) {
1475 trace_spapr_update_dt_failed_size(spapr
->fdt_initial_size
, cb
,
1476 fdt32_to_cpu(hdr
.magic
));
1480 fdt
= g_malloc0(cb
);
1481 cpu_physical_memory_read(dt
, fdt
, cb
);
1483 /* Check the fdt consistency */
1484 if (fdt_check_full(fdt
, cb
)) {
1485 trace_spapr_update_dt_failed_check(spapr
->fdt_initial_size
, cb
,
1486 fdt32_to_cpu(hdr
.magic
));
1490 g_free(spapr
->fdt_blob
);
1491 spapr
->fdt_size
= cb
;
1492 spapr
->fdt_blob
= fdt
;
1493 trace_spapr_update_dt(cb
);
1498 static spapr_hcall_fn papr_hypercall_table
[(MAX_HCALL_OPCODE
/ 4) + 1];
1499 static spapr_hcall_fn kvmppc_hypercall_table
[KVMPPC_HCALL_MAX
- KVMPPC_HCALL_BASE
+ 1];
1500 static spapr_hcall_fn svm_hypercall_table
[(SVM_HCALL_MAX
- SVM_HCALL_BASE
) / 4 + 1];
1502 void spapr_register_hypercall(target_ulong opcode
, spapr_hcall_fn fn
)
1504 spapr_hcall_fn
*slot
;
1506 if (opcode
<= MAX_HCALL_OPCODE
) {
1507 assert((opcode
& 0x3) == 0);
1509 slot
= &papr_hypercall_table
[opcode
/ 4];
1510 } else if (opcode
>= SVM_HCALL_BASE
&& opcode
<= SVM_HCALL_MAX
) {
1511 /* we only have SVM-related hcall numbers assigned in multiples of 4 */
1512 assert((opcode
& 0x3) == 0);
1514 slot
= &svm_hypercall_table
[(opcode
- SVM_HCALL_BASE
) / 4];
1516 assert((opcode
>= KVMPPC_HCALL_BASE
) && (opcode
<= KVMPPC_HCALL_MAX
));
1518 slot
= &kvmppc_hypercall_table
[opcode
- KVMPPC_HCALL_BASE
];
1525 target_ulong
spapr_hypercall(PowerPCCPU
*cpu
, target_ulong opcode
,
1528 SpaprMachineState
*spapr
= SPAPR_MACHINE(qdev_get_machine());
1530 if ((opcode
<= MAX_HCALL_OPCODE
)
1531 && ((opcode
& 0x3) == 0)) {
1532 spapr_hcall_fn fn
= papr_hypercall_table
[opcode
/ 4];
1535 return fn(cpu
, spapr
, opcode
, args
);
1537 } else if ((opcode
>= SVM_HCALL_BASE
) &&
1538 (opcode
<= SVM_HCALL_MAX
)) {
1539 spapr_hcall_fn fn
= svm_hypercall_table
[(opcode
- SVM_HCALL_BASE
) / 4];
1542 return fn(cpu
, spapr
, opcode
, args
);
1544 } else if ((opcode
>= KVMPPC_HCALL_BASE
) &&
1545 (opcode
<= KVMPPC_HCALL_MAX
)) {
1546 spapr_hcall_fn fn
= kvmppc_hypercall_table
[opcode
- KVMPPC_HCALL_BASE
];
1549 return fn(cpu
, spapr
, opcode
, args
);
1553 qemu_log_mask(LOG_UNIMP
, "Unimplemented SPAPR hcall 0x" TARGET_FMT_lx
"\n",
1559 static void hypercall_register_softmmu(void)
1564 static target_ulong
h_softmmu(PowerPCCPU
*cpu
, SpaprMachineState
*spapr
,
1565 target_ulong opcode
, target_ulong
*args
)
1567 g_assert_not_reached();
1570 static void hypercall_register_softmmu(void)
1573 spapr_register_hypercall(H_ENTER
, h_softmmu
);
1574 spapr_register_hypercall(H_REMOVE
, h_softmmu
);
1575 spapr_register_hypercall(H_PROTECT
, h_softmmu
);
1576 spapr_register_hypercall(H_READ
, h_softmmu
);
1579 spapr_register_hypercall(H_BULK_REMOVE
, h_softmmu
);
1583 static void hypercall_register_types(void)
1585 hypercall_register_softmmu();
1587 /* hcall-hpt-resize */
1588 spapr_register_hypercall(H_RESIZE_HPT_PREPARE
, h_resize_hpt_prepare
);
1589 spapr_register_hypercall(H_RESIZE_HPT_COMMIT
, h_resize_hpt_commit
);
1592 spapr_register_hypercall(H_REGISTER_VPA
, h_register_vpa
);
1593 spapr_register_hypercall(H_CEDE
, h_cede
);
1594 spapr_register_hypercall(H_CONFER
, h_confer
);
1595 spapr_register_hypercall(H_PROD
, h_prod
);
1598 spapr_register_hypercall(H_JOIN
, h_join
);
1600 spapr_register_hypercall(H_SIGNAL_SYS_RESET
, h_signal_sys_reset
);
1602 /* processor register resource access h-calls */
1603 spapr_register_hypercall(H_SET_SPRG0
, h_set_sprg0
);
1604 spapr_register_hypercall(H_SET_DABR
, h_set_dabr
);
1605 spapr_register_hypercall(H_SET_XDABR
, h_set_xdabr
);
1606 spapr_register_hypercall(H_PAGE_INIT
, h_page_init
);
1607 spapr_register_hypercall(H_SET_MODE
, h_set_mode
);
1609 /* In Memory Table MMU h-calls */
1610 spapr_register_hypercall(H_CLEAN_SLB
, h_clean_slb
);
1611 spapr_register_hypercall(H_INVALIDATE_PID
, h_invalidate_pid
);
1612 spapr_register_hypercall(H_REGISTER_PROC_TBL
, h_register_process_table
);
1614 /* hcall-get-cpu-characteristics */
1615 spapr_register_hypercall(H_GET_CPU_CHARACTERISTICS
,
1616 h_get_cpu_characteristics
);
1618 /* "debugger" hcalls (also used by SLOF). Note: We do -not- differentiate
1619 * here between the "CI" and the "CACHE" variants, they will use whatever
1620 * mapping attributes qemu is using. When using KVM, the kernel will
1621 * enforce the attributes more strongly
1623 spapr_register_hypercall(H_LOGICAL_CI_LOAD
, h_logical_load
);
1624 spapr_register_hypercall(H_LOGICAL_CI_STORE
, h_logical_store
);
1625 spapr_register_hypercall(H_LOGICAL_CACHE_LOAD
, h_logical_load
);
1626 spapr_register_hypercall(H_LOGICAL_CACHE_STORE
, h_logical_store
);
1627 spapr_register_hypercall(H_LOGICAL_ICBI
, h_logical_icbi
);
1628 spapr_register_hypercall(H_LOGICAL_DCBF
, h_logical_dcbf
);
1629 spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP
, h_logical_memop
);
1631 /* qemu/KVM-PPC specific hcalls */
1632 spapr_register_hypercall(KVMPPC_H_RTAS
, h_rtas
);
1634 /* ibm,client-architecture-support support */
1635 spapr_register_hypercall(KVMPPC_H_CAS
, h_client_architecture_support
);
1637 spapr_register_hypercall(KVMPPC_H_UPDATE_DT
, h_update_dt
);
1639 spapr_register_nested();
1642 type_init(hypercall_register_types
)