1 #include "qemu/osdep.h"
2 #include "sysemu/sysemu.h"
4 #include "helper_regs.h"
5 #include "hw/ppc/spapr.h"
6 #include "mmu-hash64.h"
7 #include "cpu-models.h"
18 static void do_spr_sync(void *arg
)
20 struct SPRSyncState
*s
= arg
;
21 PowerPCCPU
*cpu
= POWERPC_CPU(s
->cs
);
22 CPUPPCState
*env
= &cpu
->env
;
24 cpu_synchronize_state(s
->cs
);
25 env
->spr
[s
->spr
] &= ~s
->mask
;
26 env
->spr
[s
->spr
] |= s
->value
;
29 static void set_spr(CPUState
*cs
, int spr
, target_ulong value
,
32 struct SPRSyncState s
= {
38 run_on_cpu(cs
, do_spr_sync
, &s
);
41 static inline bool valid_pte_index(CPUPPCState
*env
, target_ulong pte_index
)
44 * hash value/pteg group index is normalized by htab_mask
46 if (((pte_index
& ~7ULL) / HPTES_PER_GROUP
) & ~env
->htab_mask
) {
52 static bool is_ram_address(sPAPRMachineState
*spapr
, hwaddr addr
)
54 MachineState
*machine
= MACHINE(spapr
);
55 MemoryHotplugState
*hpms
= &spapr
->hotplug_memory
;
57 if (addr
< machine
->ram_size
) {
60 if ((addr
>= hpms
->base
)
61 && ((addr
- hpms
->base
) < memory_region_size(&hpms
->mr
))) {
68 static target_ulong
h_enter(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
69 target_ulong opcode
, target_ulong
*args
)
71 CPUPPCState
*env
= &cpu
->env
;
72 target_ulong flags
= args
[0];
73 target_ulong pte_index
= args
[1];
74 target_ulong pteh
= args
[2];
75 target_ulong ptel
= args
[3];
76 unsigned apshift
, spshift
;
81 apshift
= ppc_hash64_hpte_page_shift_noslb(cpu
, pteh
, ptel
, &spshift
);
83 /* Bad page size encoding */
87 raddr
= (ptel
& HPTE64_R_RPN
) & ~((1ULL << apshift
) - 1);
89 if (is_ram_address(spapr
, raddr
)) {
90 /* Regular RAM - should have WIMG=0010 */
91 if ((ptel
& HPTE64_R_WIMG
) != HPTE64_R_M
) {
95 /* Looks like an IO address */
96 /* FIXME: What WIMG combinations could be sensible for IO?
97 * For now we allow WIMG=010x, but are there others? */
98 /* FIXME: Should we check against registered IO addresses? */
99 if ((ptel
& (HPTE64_R_W
| HPTE64_R_I
| HPTE64_R_M
)) != HPTE64_R_I
) {
106 if (!valid_pte_index(env
, pte_index
)) {
111 if (likely((flags
& H_EXACT
) == 0)) {
113 token
= ppc_hash64_start_access(cpu
, pte_index
);
114 for (; index
< 8; index
++) {
115 if (!(ppc_hash64_load_hpte0(cpu
, token
, index
) & HPTE64_V_VALID
)) {
119 ppc_hash64_stop_access(token
);
124 token
= ppc_hash64_start_access(cpu
, pte_index
);
125 if (ppc_hash64_load_hpte0(cpu
, token
, 0) & HPTE64_V_VALID
) {
126 ppc_hash64_stop_access(token
);
129 ppc_hash64_stop_access(token
);
132 ppc_hash64_store_hpte(cpu
, pte_index
+ index
,
133 pteh
| HPTE64_V_HPTE_DIRTY
, ptel
);
135 args
[0] = pte_index
+ index
;
141 REMOVE_NOT_FOUND
= 1,
146 static RemoveResult
remove_hpte(PowerPCCPU
*cpu
, target_ulong ptex
,
149 target_ulong
*vp
, target_ulong
*rp
)
151 CPUPPCState
*env
= &cpu
->env
;
155 if (!valid_pte_index(env
, ptex
)) {
159 token
= ppc_hash64_start_access(cpu
, ptex
);
160 v
= ppc_hash64_load_hpte0(cpu
, token
, 0);
161 r
= ppc_hash64_load_hpte1(cpu
, token
, 0);
162 ppc_hash64_stop_access(token
);
164 if ((v
& HPTE64_V_VALID
) == 0 ||
165 ((flags
& H_AVPN
) && (v
& ~0x7fULL
) != avpn
) ||
166 ((flags
& H_ANDCOND
) && (v
& avpn
) != 0)) {
167 return REMOVE_NOT_FOUND
;
171 ppc_hash64_store_hpte(cpu
, ptex
, HPTE64_V_HPTE_DIRTY
, 0);
172 ppc_hash64_tlb_flush_hpte(cpu
, ptex
, v
, r
);
173 return REMOVE_SUCCESS
;
176 static target_ulong
h_remove(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
177 target_ulong opcode
, target_ulong
*args
)
179 target_ulong flags
= args
[0];
180 target_ulong pte_index
= args
[1];
181 target_ulong avpn
= args
[2];
184 ret
= remove_hpte(cpu
, pte_index
, avpn
, flags
,
191 case REMOVE_NOT_FOUND
:
201 g_assert_not_reached();
204 #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
205 #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
206 #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
207 #define H_BULK_REMOVE_END 0xc000000000000000ULL
208 #define H_BULK_REMOVE_CODE 0x3000000000000000ULL
209 #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
210 #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
211 #define H_BULK_REMOVE_PARM 0x2000000000000000ULL
212 #define H_BULK_REMOVE_HW 0x3000000000000000ULL
213 #define H_BULK_REMOVE_RC 0x0c00000000000000ULL
214 #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
215 #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
216 #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
217 #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
218 #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
220 #define H_BULK_REMOVE_MAX_BATCH 4
222 static target_ulong
h_bulk_remove(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
223 target_ulong opcode
, target_ulong
*args
)
227 for (i
= 0; i
< H_BULK_REMOVE_MAX_BATCH
; i
++) {
228 target_ulong
*tsh
= &args
[i
*2];
229 target_ulong tsl
= args
[i
*2 + 1];
230 target_ulong v
, r
, ret
;
232 if ((*tsh
& H_BULK_REMOVE_TYPE
) == H_BULK_REMOVE_END
) {
234 } else if ((*tsh
& H_BULK_REMOVE_TYPE
) != H_BULK_REMOVE_REQUEST
) {
238 *tsh
&= H_BULK_REMOVE_PTEX
| H_BULK_REMOVE_FLAGS
;
239 *tsh
|= H_BULK_REMOVE_RESPONSE
;
241 if ((*tsh
& H_BULK_REMOVE_ANDCOND
) && (*tsh
& H_BULK_REMOVE_AVPN
)) {
242 *tsh
|= H_BULK_REMOVE_PARM
;
246 ret
= remove_hpte(cpu
, *tsh
& H_BULK_REMOVE_PTEX
, tsl
,
247 (*tsh
& H_BULK_REMOVE_FLAGS
) >> 26,
254 *tsh
|= (r
& (HPTE64_R_C
| HPTE64_R_R
)) << 43;
268 static target_ulong
h_protect(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
269 target_ulong opcode
, target_ulong
*args
)
271 CPUPPCState
*env
= &cpu
->env
;
272 target_ulong flags
= args
[0];
273 target_ulong pte_index
= args
[1];
274 target_ulong avpn
= args
[2];
278 if (!valid_pte_index(env
, pte_index
)) {
282 token
= ppc_hash64_start_access(cpu
, pte_index
);
283 v
= ppc_hash64_load_hpte0(cpu
, token
, 0);
284 r
= ppc_hash64_load_hpte1(cpu
, token
, 0);
285 ppc_hash64_stop_access(token
);
287 if ((v
& HPTE64_V_VALID
) == 0 ||
288 ((flags
& H_AVPN
) && (v
& ~0x7fULL
) != avpn
)) {
292 r
&= ~(HPTE64_R_PP0
| HPTE64_R_PP
| HPTE64_R_N
|
293 HPTE64_R_KEY_HI
| HPTE64_R_KEY_LO
);
294 r
|= (flags
<< 55) & HPTE64_R_PP0
;
295 r
|= (flags
<< 48) & HPTE64_R_KEY_HI
;
296 r
|= flags
& (HPTE64_R_PP
| HPTE64_R_N
| HPTE64_R_KEY_LO
);
297 ppc_hash64_store_hpte(cpu
, pte_index
,
298 (v
& ~HPTE64_V_VALID
) | HPTE64_V_HPTE_DIRTY
, 0);
299 ppc_hash64_tlb_flush_hpte(cpu
, pte_index
, v
, r
);
300 /* Don't need a memory barrier, due to qemu's global lock */
301 ppc_hash64_store_hpte(cpu
, pte_index
, v
| HPTE64_V_HPTE_DIRTY
, r
);
305 static target_ulong
h_read(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
306 target_ulong opcode
, target_ulong
*args
)
308 CPUPPCState
*env
= &cpu
->env
;
309 target_ulong flags
= args
[0];
310 target_ulong pte_index
= args
[1];
312 int i
, ridx
, n_entries
= 1;
314 if (!valid_pte_index(env
, pte_index
)) {
318 if (flags
& H_READ_4
) {
319 /* Clear the two low order bits */
320 pte_index
&= ~(3ULL);
324 hpte
= env
->external_htab
+ (pte_index
* HASH_PTE_SIZE_64
);
326 for (i
= 0, ridx
= 0; i
< n_entries
; i
++) {
327 args
[ridx
++] = ldq_p(hpte
);
328 args
[ridx
++] = ldq_p(hpte
+ (HASH_PTE_SIZE_64
/2));
329 hpte
+= HASH_PTE_SIZE_64
;
335 static target_ulong
h_set_dabr(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
336 target_ulong opcode
, target_ulong
*args
)
338 /* FIXME: actually implement this */
342 #define FLAGS_REGISTER_VPA 0x0000200000000000ULL
343 #define FLAGS_REGISTER_DTL 0x0000400000000000ULL
344 #define FLAGS_REGISTER_SLBSHADOW 0x0000600000000000ULL
345 #define FLAGS_DEREGISTER_VPA 0x0000a00000000000ULL
346 #define FLAGS_DEREGISTER_DTL 0x0000c00000000000ULL
347 #define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL
349 #define VPA_MIN_SIZE 640
350 #define VPA_SIZE_OFFSET 0x4
351 #define VPA_SHARED_PROC_OFFSET 0x9
352 #define VPA_SHARED_PROC_VAL 0x2
354 static target_ulong
register_vpa(CPUPPCState
*env
, target_ulong vpa
)
356 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
361 hcall_dprintf("Can't cope with registering a VPA at logical 0\n");
365 if (vpa
% env
->dcache_line_size
) {
368 /* FIXME: bounds check the address */
370 size
= lduw_be_phys(cs
->as
, vpa
+ 0x4);
372 if (size
< VPA_MIN_SIZE
) {
376 /* VPA is not allowed to cross a page boundary */
377 if ((vpa
/ 4096) != ((vpa
+ size
- 1) / 4096)) {
383 tmp
= ldub_phys(cs
->as
, env
->vpa_addr
+ VPA_SHARED_PROC_OFFSET
);
384 tmp
|= VPA_SHARED_PROC_VAL
;
385 stb_phys(cs
->as
, env
->vpa_addr
+ VPA_SHARED_PROC_OFFSET
, tmp
);
390 static target_ulong
deregister_vpa(CPUPPCState
*env
, target_ulong vpa
)
392 if (env
->slb_shadow_addr
) {
404 static target_ulong
register_slb_shadow(CPUPPCState
*env
, target_ulong addr
)
406 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
410 hcall_dprintf("Can't cope with SLB shadow at logical 0\n");
414 size
= ldl_be_phys(cs
->as
, addr
+ 0x4);
419 if ((addr
/ 4096) != ((addr
+ size
- 1) / 4096)) {
423 if (!env
->vpa_addr
) {
427 env
->slb_shadow_addr
= addr
;
428 env
->slb_shadow_size
= size
;
433 static target_ulong
deregister_slb_shadow(CPUPPCState
*env
, target_ulong addr
)
435 env
->slb_shadow_addr
= 0;
436 env
->slb_shadow_size
= 0;
440 static target_ulong
register_dtl(CPUPPCState
*env
, target_ulong addr
)
442 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
446 hcall_dprintf("Can't cope with DTL at logical 0\n");
450 size
= ldl_be_phys(cs
->as
, addr
+ 0x4);
456 if (!env
->vpa_addr
) {
460 env
->dtl_addr
= addr
;
461 env
->dtl_size
= size
;
466 static target_ulong
deregister_dtl(CPUPPCState
*env
, target_ulong addr
)
474 static target_ulong
h_register_vpa(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
475 target_ulong opcode
, target_ulong
*args
)
477 target_ulong flags
= args
[0];
478 target_ulong procno
= args
[1];
479 target_ulong vpa
= args
[2];
480 target_ulong ret
= H_PARAMETER
;
484 tcpu
= ppc_get_vcpu_by_dt_id(procno
);
491 case FLAGS_REGISTER_VPA
:
492 ret
= register_vpa(tenv
, vpa
);
495 case FLAGS_DEREGISTER_VPA
:
496 ret
= deregister_vpa(tenv
, vpa
);
499 case FLAGS_REGISTER_SLBSHADOW
:
500 ret
= register_slb_shadow(tenv
, vpa
);
503 case FLAGS_DEREGISTER_SLBSHADOW
:
504 ret
= deregister_slb_shadow(tenv
, vpa
);
507 case FLAGS_REGISTER_DTL
:
508 ret
= register_dtl(tenv
, vpa
);
511 case FLAGS_DEREGISTER_DTL
:
512 ret
= deregister_dtl(tenv
, vpa
);
519 static target_ulong
h_cede(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
520 target_ulong opcode
, target_ulong
*args
)
522 CPUPPCState
*env
= &cpu
->env
;
523 CPUState
*cs
= CPU(cpu
);
525 env
->msr
|= (1ULL << MSR_EE
);
526 hreg_compute_hflags(env
);
527 if (!cpu_has_work(cs
)) {
529 cs
->exception_index
= EXCP_HLT
;
530 cs
->exit_request
= 1;
535 static target_ulong
h_rtas(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
536 target_ulong opcode
, target_ulong
*args
)
538 target_ulong rtas_r3
= args
[0];
539 uint32_t token
= rtas_ld(rtas_r3
, 0);
540 uint32_t nargs
= rtas_ld(rtas_r3
, 1);
541 uint32_t nret
= rtas_ld(rtas_r3
, 2);
543 return spapr_rtas_call(cpu
, spapr
, token
, nargs
, rtas_r3
+ 12,
544 nret
, rtas_r3
+ 12 + 4*nargs
);
547 static target_ulong
h_logical_load(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
548 target_ulong opcode
, target_ulong
*args
)
550 CPUState
*cs
= CPU(cpu
);
551 target_ulong size
= args
[0];
552 target_ulong addr
= args
[1];
556 args
[0] = ldub_phys(cs
->as
, addr
);
559 args
[0] = lduw_phys(cs
->as
, addr
);
562 args
[0] = ldl_phys(cs
->as
, addr
);
565 args
[0] = ldq_phys(cs
->as
, addr
);
571 static target_ulong
h_logical_store(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
572 target_ulong opcode
, target_ulong
*args
)
574 CPUState
*cs
= CPU(cpu
);
576 target_ulong size
= args
[0];
577 target_ulong addr
= args
[1];
578 target_ulong val
= args
[2];
582 stb_phys(cs
->as
, addr
, val
);
585 stw_phys(cs
->as
, addr
, val
);
588 stl_phys(cs
->as
, addr
, val
);
591 stq_phys(cs
->as
, addr
, val
);
597 static target_ulong
h_logical_memop(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
598 target_ulong opcode
, target_ulong
*args
)
600 CPUState
*cs
= CPU(cpu
);
602 target_ulong dst
= args
[0]; /* Destination address */
603 target_ulong src
= args
[1]; /* Source address */
604 target_ulong esize
= args
[2]; /* Element size (0=1,1=2,2=4,3=8) */
605 target_ulong count
= args
[3]; /* Element count */
606 target_ulong op
= args
[4]; /* 0 = copy, 1 = invert */
608 unsigned int mask
= (1 << esize
) - 1;
609 int step
= 1 << esize
;
611 if (count
> 0x80000000) {
615 if ((dst
& mask
) || (src
& mask
) || (op
> 1)) {
619 if (dst
>= src
&& dst
< (src
+ (count
<< esize
))) {
620 dst
= dst
+ ((count
- 1) << esize
);
621 src
= src
+ ((count
- 1) << esize
);
628 tmp
= ldub_phys(cs
->as
, src
);
631 tmp
= lduw_phys(cs
->as
, src
);
634 tmp
= ldl_phys(cs
->as
, src
);
637 tmp
= ldq_phys(cs
->as
, src
);
647 stb_phys(cs
->as
, dst
, tmp
);
650 stw_phys(cs
->as
, dst
, tmp
);
653 stl_phys(cs
->as
, dst
, tmp
);
656 stq_phys(cs
->as
, dst
, tmp
);
666 static target_ulong
h_logical_icbi(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
667 target_ulong opcode
, target_ulong
*args
)
669 /* Nothing to do on emulation, KVM will trap this in the kernel */
673 static target_ulong
h_logical_dcbf(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
674 target_ulong opcode
, target_ulong
*args
)
676 /* Nothing to do on emulation, KVM will trap this in the kernel */
680 static target_ulong
h_set_mode_resource_le(PowerPCCPU
*cpu
,
695 case H_SET_MODE_ENDIAN_BIG
:
697 set_spr(cs
, SPR_LPCR
, 0, LPCR_ILE
);
699 spapr_pci_switch_vga(true);
702 case H_SET_MODE_ENDIAN_LITTLE
:
704 set_spr(cs
, SPR_LPCR
, LPCR_ILE
, LPCR_ILE
);
706 spapr_pci_switch_vga(false);
710 return H_UNSUPPORTED_FLAG
;
713 static target_ulong
h_set_mode_resource_addr_trans_mode(PowerPCCPU
*cpu
,
719 PowerPCCPUClass
*pcc
= POWERPC_CPU_GET_CLASS(cpu
);
722 if (!(pcc
->insns_flags2
& PPC2_ISA207S
)) {
733 case H_SET_MODE_ADDR_TRANS_NONE
:
736 case H_SET_MODE_ADDR_TRANS_0001_8000
:
739 case H_SET_MODE_ADDR_TRANS_C000_0000_0000_4000
:
740 prefix
= 0xC000000000004000ULL
;
743 return H_UNSUPPORTED_FLAG
;
747 CPUPPCState
*env
= &POWERPC_CPU(cpu
)->env
;
749 set_spr(cs
, SPR_LPCR
, mflags
<< LPCR_AIL_SHIFT
, LPCR_AIL
);
750 env
->excp_prefix
= prefix
;
756 static target_ulong
h_set_mode(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
757 target_ulong opcode
, target_ulong
*args
)
759 target_ulong resource
= args
[1];
760 target_ulong ret
= H_P2
;
763 case H_SET_MODE_RESOURCE_LE
:
764 ret
= h_set_mode_resource_le(cpu
, args
[0], args
[2], args
[3]);
766 case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE
:
767 ret
= h_set_mode_resource_addr_trans_mode(cpu
, args
[0],
776 * Return the offset to the requested option vector @vector in the
777 * option vector table @table.
779 static target_ulong
cas_get_option_vector(int vector
, target_ulong table
)
782 char nr_vectors
, nr_entries
;
788 nr_vectors
= (ldl_phys(&address_space_memory
, table
) >> 24) + 1;
789 if (!vector
|| vector
> nr_vectors
) {
792 table
++; /* skip nr option vectors */
794 for (i
= 0; i
< vector
- 1; i
++) {
795 nr_entries
= ldl_phys(&address_space_memory
, table
) >> 24;
796 table
+= nr_entries
+ 2;
803 uint32_t cpu_version
;
807 static void do_set_compat(void *arg
)
809 SetCompatState
*s
= arg
;
811 cpu_synchronize_state(CPU(s
->cpu
));
812 ppc_set_compat(s
->cpu
, s
->cpu_version
, &s
->err
);
815 #define get_compat_level(cpuver) ( \
816 ((cpuver) == CPU_POWERPC_LOGICAL_2_05) ? 2050 : \
817 ((cpuver) == CPU_POWERPC_LOGICAL_2_06) ? 2060 : \
818 ((cpuver) == CPU_POWERPC_LOGICAL_2_06_PLUS) ? 2061 : \
819 ((cpuver) == CPU_POWERPC_LOGICAL_2_07) ? 2070 : 0)
821 #define OV5_DRCONF_MEMORY 0x20
823 static target_ulong
h_client_architecture_support(PowerPCCPU
*cpu_
,
824 sPAPRMachineState
*spapr
,
828 target_ulong list
= ppc64_phys_to_real(args
[0]);
829 target_ulong ov_table
, ov5
;
830 PowerPCCPUClass
*pcc_
= POWERPC_CPU_GET_CLASS(cpu_
);
832 bool cpu_match
= false, cpu_update
= true, memory_update
= false;
833 unsigned old_cpu_version
= cpu_
->cpu_version
;
834 unsigned compat_lvl
= 0, cpu_version
= 0;
835 unsigned max_lvl
= get_compat_level(cpu_
->max_compat
);
840 for (counter
= 0; counter
< 512; ++counter
) {
841 uint32_t pvr
, pvr_mask
;
843 pvr_mask
= ldl_be_phys(&address_space_memory
, list
);
845 pvr
= ldl_be_phys(&address_space_memory
, list
);
848 trace_spapr_cas_pvr_try(pvr
);
850 ((cpu_
->env
.spr
[SPR_PVR
] & pvr_mask
) == (pvr
& pvr_mask
))) {
853 } else if (pvr
== cpu_
->cpu_version
) {
855 cpu_version
= cpu_
->cpu_version
;
856 } else if (!cpu_match
) {
857 /* If it is a logical PVR, try to determine the highest level */
858 unsigned lvl
= get_compat_level(pvr
);
860 bool is205
= (pcc_
->pcr_mask
& PCR_COMPAT_2_05
) &&
861 (lvl
== get_compat_level(CPU_POWERPC_LOGICAL_2_05
));
862 bool is206
= (pcc_
->pcr_mask
& PCR_COMPAT_2_06
) &&
863 ((lvl
== get_compat_level(CPU_POWERPC_LOGICAL_2_06
)) ||
864 (lvl
== get_compat_level(CPU_POWERPC_LOGICAL_2_06_PLUS
)));
866 if (is205
|| is206
) {
868 /* User did not set the level, choose the highest */
869 if (compat_lvl
<= lvl
) {
873 } else if (max_lvl
>= lvl
) {
874 /* User chose the level, don't set higher than this */
881 /* Terminator record */
882 if (~pvr_mask
& pvr
) {
887 /* Parsing finished */
888 trace_spapr_cas_pvr(cpu_
->cpu_version
, cpu_match
,
889 cpu_version
, pcc_
->pcr_mask
);
892 if (old_cpu_version
!= cpu_version
) {
895 .cpu
= POWERPC_CPU(cs
),
896 .cpu_version
= cpu_version
,
900 run_on_cpu(cs
, do_set_compat
, &s
);
903 error_report_err(s
.err
);
913 /* For the future use: here @ov_table points to the first option vector */
916 ov5
= cas_get_option_vector(5, ov_table
);
921 /* @list now points to OV 5 */
922 ov5_byte2
= ldub_phys(&address_space_memory
, ov5
+ 2);
923 if (ov5_byte2
& OV5_DRCONF_MEMORY
) {
924 memory_update
= true;
927 if (spapr_h_cas_compose_response(spapr
, args
[1], args
[2],
928 cpu_update
, memory_update
)) {
929 qemu_system_reset_request();
935 static spapr_hcall_fn papr_hypercall_table
[(MAX_HCALL_OPCODE
/ 4) + 1];
936 static spapr_hcall_fn kvmppc_hypercall_table
[KVMPPC_HCALL_MAX
- KVMPPC_HCALL_BASE
+ 1];
938 void spapr_register_hypercall(target_ulong opcode
, spapr_hcall_fn fn
)
940 spapr_hcall_fn
*slot
;
942 if (opcode
<= MAX_HCALL_OPCODE
) {
943 assert((opcode
& 0x3) == 0);
945 slot
= &papr_hypercall_table
[opcode
/ 4];
947 assert((opcode
>= KVMPPC_HCALL_BASE
) && (opcode
<= KVMPPC_HCALL_MAX
));
949 slot
= &kvmppc_hypercall_table
[opcode
- KVMPPC_HCALL_BASE
];
956 target_ulong
spapr_hypercall(PowerPCCPU
*cpu
, target_ulong opcode
,
959 sPAPRMachineState
*spapr
= SPAPR_MACHINE(qdev_get_machine());
961 if ((opcode
<= MAX_HCALL_OPCODE
)
962 && ((opcode
& 0x3) == 0)) {
963 spapr_hcall_fn fn
= papr_hypercall_table
[opcode
/ 4];
966 return fn(cpu
, spapr
, opcode
, args
);
968 } else if ((opcode
>= KVMPPC_HCALL_BASE
) &&
969 (opcode
<= KVMPPC_HCALL_MAX
)) {
970 spapr_hcall_fn fn
= kvmppc_hypercall_table
[opcode
- KVMPPC_HCALL_BASE
];
973 return fn(cpu
, spapr
, opcode
, args
);
977 qemu_log_mask(LOG_UNIMP
, "Unimplemented SPAPR hcall 0x" TARGET_FMT_lx
"\n",
982 static void hypercall_register_types(void)
985 spapr_register_hypercall(H_ENTER
, h_enter
);
986 spapr_register_hypercall(H_REMOVE
, h_remove
);
987 spapr_register_hypercall(H_PROTECT
, h_protect
);
988 spapr_register_hypercall(H_READ
, h_read
);
991 spapr_register_hypercall(H_BULK_REMOVE
, h_bulk_remove
);
994 spapr_register_hypercall(H_SET_DABR
, h_set_dabr
);
997 spapr_register_hypercall(H_REGISTER_VPA
, h_register_vpa
);
998 spapr_register_hypercall(H_CEDE
, h_cede
);
1000 /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate
1001 * here between the "CI" and the "CACHE" variants, they will use whatever
1002 * mapping attributes qemu is using. When using KVM, the kernel will
1003 * enforce the attributes more strongly
1005 spapr_register_hypercall(H_LOGICAL_CI_LOAD
, h_logical_load
);
1006 spapr_register_hypercall(H_LOGICAL_CI_STORE
, h_logical_store
);
1007 spapr_register_hypercall(H_LOGICAL_CACHE_LOAD
, h_logical_load
);
1008 spapr_register_hypercall(H_LOGICAL_CACHE_STORE
, h_logical_store
);
1009 spapr_register_hypercall(H_LOGICAL_ICBI
, h_logical_icbi
);
1010 spapr_register_hypercall(H_LOGICAL_DCBF
, h_logical_dcbf
);
1011 spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP
, h_logical_memop
);
1013 /* qemu/KVM-PPC specific hcalls */
1014 spapr_register_hypercall(KVMPPC_H_RTAS
, h_rtas
);
1016 spapr_register_hypercall(H_SET_MODE
, h_set_mode
);
1018 /* ibm,client-architecture-support support */
1019 spapr_register_hypercall(KVMPPC_H_CAS
, h_client_architecture_support
);
1022 type_init(hypercall_register_types
)