1 #include "sysemu/sysemu.h"
3 #include "helper_regs.h"
4 #include "hw/ppc/spapr.h"
5 #include "mmu-hash64.h"
6 #include "cpu-models.h"
17 static void do_spr_sync(void *arg
)
19 struct SPRSyncState
*s
= arg
;
20 PowerPCCPU
*cpu
= POWERPC_CPU(s
->cs
);
21 CPUPPCState
*env
= &cpu
->env
;
23 cpu_synchronize_state(s
->cs
);
24 env
->spr
[s
->spr
] &= ~s
->mask
;
25 env
->spr
[s
->spr
] |= s
->value
;
28 static void set_spr(CPUState
*cs
, int spr
, target_ulong value
,
31 struct SPRSyncState s
= {
37 run_on_cpu(cs
, do_spr_sync
, &s
);
40 static target_ulong
compute_tlbie_rb(target_ulong v
, target_ulong r
,
41 target_ulong pte_index
)
43 target_ulong rb
, va_low
;
45 rb
= (v
& ~0x7fULL
) << 16; /* AVA field */
46 va_low
= pte_index
>> 3;
47 if (v
& HPTE64_V_SECONDARY
) {
50 /* xor vsid from AVA */
51 if (!(v
& HPTE64_V_1TB_SEG
)) {
57 if (v
& HPTE64_V_LARGE
) {
58 rb
|= 1; /* L field */
59 #if 0 /* Disable that P7 specific bit for now */
61 /* non-16MB large page, must be 64k */
62 /* (masks depend on page size) */
63 rb
|= 0x1000; /* page encoding in LP field */
64 rb
|= (va_low
& 0x7f) << 16; /* 7b of VA in AVA/LP field */
65 rb
|= (va_low
& 0xfe); /* AVAL field */
70 rb
|= (va_low
& 0x7ff) << 12; /* remaining 11b of AVA */
72 rb
|= (v
>> 54) & 0x300; /* B field */
76 static inline bool valid_pte_index(CPUPPCState
*env
, target_ulong pte_index
)
79 * hash value/pteg group index is normalized by htab_mask
81 if (((pte_index
& ~7ULL) / HPTES_PER_GROUP
) & ~env
->htab_mask
) {
87 static target_ulong
h_enter(PowerPCCPU
*cpu
, sPAPREnvironment
*spapr
,
88 target_ulong opcode
, target_ulong
*args
)
90 CPUPPCState
*env
= &cpu
->env
;
91 target_ulong flags
= args
[0];
92 target_ulong pte_index
= args
[1];
93 target_ulong pteh
= args
[2];
94 target_ulong ptel
= args
[3];
95 target_ulong page_shift
= 12;
100 /* only handle 4k and 16M pages for now */
101 if (pteh
& HPTE64_V_LARGE
) {
102 #if 0 /* We don't support 64k pages yet */
103 if ((ptel
& 0xf000) == 0x1000) {
107 if ((ptel
& 0xff000) == 0) {
110 /* lowest AVA bit must be 0 for 16M pages */
119 raddr
= (ptel
& HPTE64_R_RPN
) & ~((1ULL << page_shift
) - 1);
121 if (raddr
< spapr
->ram_limit
) {
122 /* Regular RAM - should have WIMG=0010 */
123 if ((ptel
& HPTE64_R_WIMG
) != HPTE64_R_M
) {
127 /* Looks like an IO address */
128 /* FIXME: What WIMG combinations could be sensible for IO?
129 * For now we allow WIMG=010x, but are there others? */
130 /* FIXME: Should we check against registered IO addresses? */
131 if ((ptel
& (HPTE64_R_W
| HPTE64_R_I
| HPTE64_R_M
)) != HPTE64_R_I
) {
138 if (!valid_pte_index(env
, pte_index
)) {
143 if (likely((flags
& H_EXACT
) == 0)) {
145 token
= ppc_hash64_start_access(cpu
, pte_index
);
146 for (; index
< 8; index
++) {
147 if ((ppc_hash64_load_hpte0(env
, token
, index
) & HPTE64_V_VALID
) == 0) {
151 ppc_hash64_stop_access(token
);
156 token
= ppc_hash64_start_access(cpu
, pte_index
);
157 if (ppc_hash64_load_hpte0(env
, token
, 0) & HPTE64_V_VALID
) {
158 ppc_hash64_stop_access(token
);
161 ppc_hash64_stop_access(token
);
164 ppc_hash64_store_hpte(env
, pte_index
+ index
,
165 pteh
| HPTE64_V_HPTE_DIRTY
, ptel
);
167 args
[0] = pte_index
+ index
;
173 REMOVE_NOT_FOUND
= 1,
178 static RemoveResult
remove_hpte(CPUPPCState
*env
, target_ulong ptex
,
181 target_ulong
*vp
, target_ulong
*rp
)
184 target_ulong v
, r
, rb
;
186 if (!valid_pte_index(env
, ptex
)) {
190 token
= ppc_hash64_start_access(ppc_env_get_cpu(env
), ptex
);
191 v
= ppc_hash64_load_hpte0(env
, token
, 0);
192 r
= ppc_hash64_load_hpte1(env
, token
, 0);
193 ppc_hash64_stop_access(token
);
195 if ((v
& HPTE64_V_VALID
) == 0 ||
196 ((flags
& H_AVPN
) && (v
& ~0x7fULL
) != avpn
) ||
197 ((flags
& H_ANDCOND
) && (v
& avpn
) != 0)) {
198 return REMOVE_NOT_FOUND
;
202 ppc_hash64_store_hpte(env
, ptex
, HPTE64_V_HPTE_DIRTY
, 0);
203 rb
= compute_tlbie_rb(v
, r
, ptex
);
204 ppc_tlb_invalidate_one(env
, rb
);
205 return REMOVE_SUCCESS
;
208 static target_ulong
h_remove(PowerPCCPU
*cpu
, sPAPREnvironment
*spapr
,
209 target_ulong opcode
, target_ulong
*args
)
211 CPUPPCState
*env
= &cpu
->env
;
212 target_ulong flags
= args
[0];
213 target_ulong pte_index
= args
[1];
214 target_ulong avpn
= args
[2];
217 ret
= remove_hpte(env
, pte_index
, avpn
, flags
,
224 case REMOVE_NOT_FOUND
:
234 g_assert_not_reached();
237 #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
238 #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
239 #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
240 #define H_BULK_REMOVE_END 0xc000000000000000ULL
241 #define H_BULK_REMOVE_CODE 0x3000000000000000ULL
242 #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
243 #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
244 #define H_BULK_REMOVE_PARM 0x2000000000000000ULL
245 #define H_BULK_REMOVE_HW 0x3000000000000000ULL
246 #define H_BULK_REMOVE_RC 0x0c00000000000000ULL
247 #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
248 #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
249 #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
250 #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
251 #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
253 #define H_BULK_REMOVE_MAX_BATCH 4
255 static target_ulong
h_bulk_remove(PowerPCCPU
*cpu
, sPAPREnvironment
*spapr
,
256 target_ulong opcode
, target_ulong
*args
)
258 CPUPPCState
*env
= &cpu
->env
;
261 for (i
= 0; i
< H_BULK_REMOVE_MAX_BATCH
; i
++) {
262 target_ulong
*tsh
= &args
[i
*2];
263 target_ulong tsl
= args
[i
*2 + 1];
264 target_ulong v
, r
, ret
;
266 if ((*tsh
& H_BULK_REMOVE_TYPE
) == H_BULK_REMOVE_END
) {
268 } else if ((*tsh
& H_BULK_REMOVE_TYPE
) != H_BULK_REMOVE_REQUEST
) {
272 *tsh
&= H_BULK_REMOVE_PTEX
| H_BULK_REMOVE_FLAGS
;
273 *tsh
|= H_BULK_REMOVE_RESPONSE
;
275 if ((*tsh
& H_BULK_REMOVE_ANDCOND
) && (*tsh
& H_BULK_REMOVE_AVPN
)) {
276 *tsh
|= H_BULK_REMOVE_PARM
;
280 ret
= remove_hpte(env
, *tsh
& H_BULK_REMOVE_PTEX
, tsl
,
281 (*tsh
& H_BULK_REMOVE_FLAGS
) >> 26,
288 *tsh
|= (r
& (HPTE64_R_C
| HPTE64_R_R
)) << 43;
302 static target_ulong
h_protect(PowerPCCPU
*cpu
, sPAPREnvironment
*spapr
,
303 target_ulong opcode
, target_ulong
*args
)
305 CPUPPCState
*env
= &cpu
->env
;
306 target_ulong flags
= args
[0];
307 target_ulong pte_index
= args
[1];
308 target_ulong avpn
= args
[2];
310 target_ulong v
, r
, rb
;
312 if (!valid_pte_index(env
, pte_index
)) {
316 token
= ppc_hash64_start_access(cpu
, pte_index
);
317 v
= ppc_hash64_load_hpte0(env
, token
, 0);
318 r
= ppc_hash64_load_hpte1(env
, token
, 0);
319 ppc_hash64_stop_access(token
);
321 if ((v
& HPTE64_V_VALID
) == 0 ||
322 ((flags
& H_AVPN
) && (v
& ~0x7fULL
) != avpn
)) {
326 r
&= ~(HPTE64_R_PP0
| HPTE64_R_PP
| HPTE64_R_N
|
327 HPTE64_R_KEY_HI
| HPTE64_R_KEY_LO
);
328 r
|= (flags
<< 55) & HPTE64_R_PP0
;
329 r
|= (flags
<< 48) & HPTE64_R_KEY_HI
;
330 r
|= flags
& (HPTE64_R_PP
| HPTE64_R_N
| HPTE64_R_KEY_LO
);
331 rb
= compute_tlbie_rb(v
, r
, pte_index
);
332 ppc_hash64_store_hpte(env
, pte_index
,
333 (v
& ~HPTE64_V_VALID
) | HPTE64_V_HPTE_DIRTY
, 0);
334 ppc_tlb_invalidate_one(env
, rb
);
335 /* Don't need a memory barrier, due to qemu's global lock */
336 ppc_hash64_store_hpte(env
, pte_index
, v
| HPTE64_V_HPTE_DIRTY
, r
);
340 static target_ulong
h_read(PowerPCCPU
*cpu
, sPAPREnvironment
*spapr
,
341 target_ulong opcode
, target_ulong
*args
)
343 CPUPPCState
*env
= &cpu
->env
;
344 target_ulong flags
= args
[0];
345 target_ulong pte_index
= args
[1];
347 int i
, ridx
, n_entries
= 1;
349 if (!valid_pte_index(env
, pte_index
)) {
353 if (flags
& H_READ_4
) {
354 /* Clear the two low order bits */
355 pte_index
&= ~(3ULL);
359 hpte
= env
->external_htab
+ (pte_index
* HASH_PTE_SIZE_64
);
361 for (i
= 0, ridx
= 0; i
< n_entries
; i
++) {
362 args
[ridx
++] = ldq_p(hpte
);
363 args
[ridx
++] = ldq_p(hpte
+ (HASH_PTE_SIZE_64
/2));
364 hpte
+= HASH_PTE_SIZE_64
;
370 static target_ulong
h_set_dabr(PowerPCCPU
*cpu
, sPAPREnvironment
*spapr
,
371 target_ulong opcode
, target_ulong
*args
)
373 /* FIXME: actually implement this */
377 #define FLAGS_REGISTER_VPA 0x0000200000000000ULL
378 #define FLAGS_REGISTER_DTL 0x0000400000000000ULL
379 #define FLAGS_REGISTER_SLBSHADOW 0x0000600000000000ULL
380 #define FLAGS_DEREGISTER_VPA 0x0000a00000000000ULL
381 #define FLAGS_DEREGISTER_DTL 0x0000c00000000000ULL
382 #define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL
384 #define VPA_MIN_SIZE 640
385 #define VPA_SIZE_OFFSET 0x4
386 #define VPA_SHARED_PROC_OFFSET 0x9
387 #define VPA_SHARED_PROC_VAL 0x2
389 static target_ulong
register_vpa(CPUPPCState
*env
, target_ulong vpa
)
391 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
396 hcall_dprintf("Can't cope with registering a VPA at logical 0\n");
400 if (vpa
% env
->dcache_line_size
) {
403 /* FIXME: bounds check the address */
405 size
= lduw_be_phys(cs
->as
, vpa
+ 0x4);
407 if (size
< VPA_MIN_SIZE
) {
411 /* VPA is not allowed to cross a page boundary */
412 if ((vpa
/ 4096) != ((vpa
+ size
- 1) / 4096)) {
418 tmp
= ldub_phys(cs
->as
, env
->vpa_addr
+ VPA_SHARED_PROC_OFFSET
);
419 tmp
|= VPA_SHARED_PROC_VAL
;
420 stb_phys(cs
->as
, env
->vpa_addr
+ VPA_SHARED_PROC_OFFSET
, tmp
);
425 static target_ulong
deregister_vpa(CPUPPCState
*env
, target_ulong vpa
)
427 if (env
->slb_shadow_addr
) {
439 static target_ulong
register_slb_shadow(CPUPPCState
*env
, target_ulong addr
)
441 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
445 hcall_dprintf("Can't cope with SLB shadow at logical 0\n");
449 size
= ldl_be_phys(cs
->as
, addr
+ 0x4);
454 if ((addr
/ 4096) != ((addr
+ size
- 1) / 4096)) {
458 if (!env
->vpa_addr
) {
462 env
->slb_shadow_addr
= addr
;
463 env
->slb_shadow_size
= size
;
468 static target_ulong
deregister_slb_shadow(CPUPPCState
*env
, target_ulong addr
)
470 env
->slb_shadow_addr
= 0;
471 env
->slb_shadow_size
= 0;
475 static target_ulong
register_dtl(CPUPPCState
*env
, target_ulong addr
)
477 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
481 hcall_dprintf("Can't cope with DTL at logical 0\n");
485 size
= ldl_be_phys(cs
->as
, addr
+ 0x4);
491 if (!env
->vpa_addr
) {
495 env
->dtl_addr
= addr
;
496 env
->dtl_size
= size
;
501 static target_ulong
deregister_dtl(CPUPPCState
*env
, target_ulong addr
)
509 static target_ulong
h_register_vpa(PowerPCCPU
*cpu
, sPAPREnvironment
*spapr
,
510 target_ulong opcode
, target_ulong
*args
)
512 target_ulong flags
= args
[0];
513 target_ulong procno
= args
[1];
514 target_ulong vpa
= args
[2];
515 target_ulong ret
= H_PARAMETER
;
519 tcpu
= ppc_get_vcpu_by_dt_id(procno
);
526 case FLAGS_REGISTER_VPA
:
527 ret
= register_vpa(tenv
, vpa
);
530 case FLAGS_DEREGISTER_VPA
:
531 ret
= deregister_vpa(tenv
, vpa
);
534 case FLAGS_REGISTER_SLBSHADOW
:
535 ret
= register_slb_shadow(tenv
, vpa
);
538 case FLAGS_DEREGISTER_SLBSHADOW
:
539 ret
= deregister_slb_shadow(tenv
, vpa
);
542 case FLAGS_REGISTER_DTL
:
543 ret
= register_dtl(tenv
, vpa
);
546 case FLAGS_DEREGISTER_DTL
:
547 ret
= deregister_dtl(tenv
, vpa
);
554 static target_ulong
h_cede(PowerPCCPU
*cpu
, sPAPREnvironment
*spapr
,
555 target_ulong opcode
, target_ulong
*args
)
557 CPUPPCState
*env
= &cpu
->env
;
558 CPUState
*cs
= CPU(cpu
);
560 env
->msr
|= (1ULL << MSR_EE
);
561 hreg_compute_hflags(env
);
562 if (!cpu_has_work(cs
)) {
564 cs
->exception_index
= EXCP_HLT
;
565 cs
->exit_request
= 1;
570 static target_ulong
h_rtas(PowerPCCPU
*cpu
, sPAPREnvironment
*spapr
,
571 target_ulong opcode
, target_ulong
*args
)
573 target_ulong rtas_r3
= args
[0];
574 uint32_t token
= rtas_ld(rtas_r3
, 0);
575 uint32_t nargs
= rtas_ld(rtas_r3
, 1);
576 uint32_t nret
= rtas_ld(rtas_r3
, 2);
578 return spapr_rtas_call(cpu
, spapr
, token
, nargs
, rtas_r3
+ 12,
579 nret
, rtas_r3
+ 12 + 4*nargs
);
582 static target_ulong
h_logical_load(PowerPCCPU
*cpu
, sPAPREnvironment
*spapr
,
583 target_ulong opcode
, target_ulong
*args
)
585 CPUState
*cs
= CPU(cpu
);
586 target_ulong size
= args
[0];
587 target_ulong addr
= args
[1];
591 args
[0] = ldub_phys(cs
->as
, addr
);
594 args
[0] = lduw_phys(cs
->as
, addr
);
597 args
[0] = ldl_phys(cs
->as
, addr
);
600 args
[0] = ldq_phys(cs
->as
, addr
);
606 static target_ulong
h_logical_store(PowerPCCPU
*cpu
, sPAPREnvironment
*spapr
,
607 target_ulong opcode
, target_ulong
*args
)
609 CPUState
*cs
= CPU(cpu
);
611 target_ulong size
= args
[0];
612 target_ulong addr
= args
[1];
613 target_ulong val
= args
[2];
617 stb_phys(cs
->as
, addr
, val
);
620 stw_phys(cs
->as
, addr
, val
);
623 stl_phys(cs
->as
, addr
, val
);
626 stq_phys(cs
->as
, addr
, val
);
632 static target_ulong
h_logical_memop(PowerPCCPU
*cpu
, sPAPREnvironment
*spapr
,
633 target_ulong opcode
, target_ulong
*args
)
635 CPUState
*cs
= CPU(cpu
);
637 target_ulong dst
= args
[0]; /* Destination address */
638 target_ulong src
= args
[1]; /* Source address */
639 target_ulong esize
= args
[2]; /* Element size (0=1,1=2,2=4,3=8) */
640 target_ulong count
= args
[3]; /* Element count */
641 target_ulong op
= args
[4]; /* 0 = copy, 1 = invert */
643 unsigned int mask
= (1 << esize
) - 1;
644 int step
= 1 << esize
;
646 if (count
> 0x80000000) {
650 if ((dst
& mask
) || (src
& mask
) || (op
> 1)) {
654 if (dst
>= src
&& dst
< (src
+ (count
<< esize
))) {
655 dst
= dst
+ ((count
- 1) << esize
);
656 src
= src
+ ((count
- 1) << esize
);
663 tmp
= ldub_phys(cs
->as
, src
);
666 tmp
= lduw_phys(cs
->as
, src
);
669 tmp
= ldl_phys(cs
->as
, src
);
672 tmp
= ldq_phys(cs
->as
, src
);
682 stb_phys(cs
->as
, dst
, tmp
);
685 stw_phys(cs
->as
, dst
, tmp
);
688 stl_phys(cs
->as
, dst
, tmp
);
691 stq_phys(cs
->as
, dst
, tmp
);
701 static target_ulong
h_logical_icbi(PowerPCCPU
*cpu
, sPAPREnvironment
*spapr
,
702 target_ulong opcode
, target_ulong
*args
)
704 /* Nothing to do on emulation, KVM will trap this in the kernel */
708 static target_ulong
h_logical_dcbf(PowerPCCPU
*cpu
, sPAPREnvironment
*spapr
,
709 target_ulong opcode
, target_ulong
*args
)
711 /* Nothing to do on emulation, KVM will trap this in the kernel */
715 static target_ulong
h_set_mode_resource_le(PowerPCCPU
*cpu
,
730 case H_SET_MODE_ENDIAN_BIG
:
732 set_spr(cs
, SPR_LPCR
, 0, LPCR_ILE
);
734 spapr_pci_switch_vga(true);
737 case H_SET_MODE_ENDIAN_LITTLE
:
739 set_spr(cs
, SPR_LPCR
, LPCR_ILE
, LPCR_ILE
);
741 spapr_pci_switch_vga(false);
745 return H_UNSUPPORTED_FLAG
;
748 static target_ulong
h_set_mode_resource_addr_trans_mode(PowerPCCPU
*cpu
,
754 PowerPCCPUClass
*pcc
= POWERPC_CPU_GET_CLASS(cpu
);
757 if (!(pcc
->insns_flags2
& PPC2_ISA207S
)) {
768 case H_SET_MODE_ADDR_TRANS_NONE
:
771 case H_SET_MODE_ADDR_TRANS_0001_8000
:
774 case H_SET_MODE_ADDR_TRANS_C000_0000_0000_4000
:
775 prefix
= 0xC000000000004000ULL
;
778 return H_UNSUPPORTED_FLAG
;
782 CPUPPCState
*env
= &POWERPC_CPU(cpu
)->env
;
784 set_spr(cs
, SPR_LPCR
, mflags
<< LPCR_AIL_SHIFT
, LPCR_AIL
);
785 env
->excp_prefix
= prefix
;
791 static target_ulong
h_set_mode(PowerPCCPU
*cpu
, sPAPREnvironment
*spapr
,
792 target_ulong opcode
, target_ulong
*args
)
794 target_ulong resource
= args
[1];
795 target_ulong ret
= H_P2
;
798 case H_SET_MODE_RESOURCE_LE
:
799 ret
= h_set_mode_resource_le(cpu
, args
[0], args
[2], args
[3]);
801 case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE
:
802 ret
= h_set_mode_resource_addr_trans_mode(cpu
, args
[0],
812 uint32_t cpu_version
;
816 static void do_set_compat(void *arg
)
818 SetCompatState
*s
= arg
;
820 cpu_synchronize_state(CPU(s
->cpu
));
821 s
->ret
= ppc_set_compat(s
->cpu
, s
->cpu_version
);
824 #define get_compat_level(cpuver) ( \
825 ((cpuver) == CPU_POWERPC_LOGICAL_2_05) ? 2050 : \
826 ((cpuver) == CPU_POWERPC_LOGICAL_2_06) ? 2060 : \
827 ((cpuver) == CPU_POWERPC_LOGICAL_2_06_PLUS) ? 2061 : \
828 ((cpuver) == CPU_POWERPC_LOGICAL_2_07) ? 2070 : 0)
830 static target_ulong
h_client_architecture_support(PowerPCCPU
*cpu_
,
831 sPAPREnvironment
*spapr
,
835 target_ulong list
= args
[0];
836 PowerPCCPUClass
*pcc_
= POWERPC_CPU_GET_CLASS(cpu_
);
838 bool cpu_match
= false;
839 unsigned old_cpu_version
= cpu_
->cpu_version
;
840 unsigned compat_lvl
= 0, cpu_version
= 0;
841 unsigned max_lvl
= get_compat_level(cpu_
->max_compat
);
845 for (counter
= 0; counter
< 512; ++counter
) {
846 uint32_t pvr
, pvr_mask
;
848 pvr_mask
= rtas_ld(list
, 0);
850 pvr
= rtas_ld(list
, 0);
853 trace_spapr_cas_pvr_try(pvr
);
855 ((cpu_
->env
.spr
[SPR_PVR
] & pvr_mask
) == (pvr
& pvr_mask
))) {
858 } else if (pvr
== cpu_
->cpu_version
) {
860 cpu_version
= cpu_
->cpu_version
;
861 } else if (!cpu_match
) {
862 /* If it is a logical PVR, try to determine the highest level */
863 unsigned lvl
= get_compat_level(pvr
);
865 bool is205
= (pcc_
->pcr_mask
& PCR_COMPAT_2_05
) &&
866 (lvl
== get_compat_level(CPU_POWERPC_LOGICAL_2_05
));
867 bool is206
= (pcc_
->pcr_mask
& PCR_COMPAT_2_06
) &&
868 ((lvl
== get_compat_level(CPU_POWERPC_LOGICAL_2_06
)) ||
869 (lvl
== get_compat_level(CPU_POWERPC_LOGICAL_2_06_PLUS
)));
871 if (is205
|| is206
) {
873 /* User did not set the level, choose the highest */
874 if (compat_lvl
<= lvl
) {
878 } else if (max_lvl
>= lvl
) {
879 /* User chose the level, don't set higher than this */
886 /* Terminator record */
887 if (~pvr_mask
& pvr
) {
892 /* For the future use: here @list points to the first capability */
894 /* Parsing finished */
895 trace_spapr_cas_pvr(cpu_
->cpu_version
, cpu_match
,
896 cpu_version
, pcc_
->pcr_mask
);
899 if (old_cpu_version
!= cpu_version
) {
902 .cpu
= POWERPC_CPU(cs
),
903 .cpu_version
= cpu_version
,
907 run_on_cpu(cs
, do_set_compat
, &s
);
910 fprintf(stderr
, "Unable to set compatibility mode\n");
924 if (spapr_h_cas_compose_response(args
[1], args
[2])) {
925 qemu_system_reset_request();
931 static spapr_hcall_fn papr_hypercall_table
[(MAX_HCALL_OPCODE
/ 4) + 1];
932 static spapr_hcall_fn kvmppc_hypercall_table
[KVMPPC_HCALL_MAX
- KVMPPC_HCALL_BASE
+ 1];
934 void spapr_register_hypercall(target_ulong opcode
, spapr_hcall_fn fn
)
936 spapr_hcall_fn
*slot
;
938 if (opcode
<= MAX_HCALL_OPCODE
) {
939 assert((opcode
& 0x3) == 0);
941 slot
= &papr_hypercall_table
[opcode
/ 4];
943 assert((opcode
>= KVMPPC_HCALL_BASE
) && (opcode
<= KVMPPC_HCALL_MAX
));
945 slot
= &kvmppc_hypercall_table
[opcode
- KVMPPC_HCALL_BASE
];
952 target_ulong
spapr_hypercall(PowerPCCPU
*cpu
, target_ulong opcode
,
955 if ((opcode
<= MAX_HCALL_OPCODE
)
956 && ((opcode
& 0x3) == 0)) {
957 spapr_hcall_fn fn
= papr_hypercall_table
[opcode
/ 4];
960 return fn(cpu
, spapr
, opcode
, args
);
962 } else if ((opcode
>= KVMPPC_HCALL_BASE
) &&
963 (opcode
<= KVMPPC_HCALL_MAX
)) {
964 spapr_hcall_fn fn
= kvmppc_hypercall_table
[opcode
- KVMPPC_HCALL_BASE
];
967 return fn(cpu
, spapr
, opcode
, args
);
971 hcall_dprintf("Unimplemented hcall 0x" TARGET_FMT_lx
"\n", opcode
);
975 static void hypercall_register_types(void)
978 spapr_register_hypercall(H_ENTER
, h_enter
);
979 spapr_register_hypercall(H_REMOVE
, h_remove
);
980 spapr_register_hypercall(H_PROTECT
, h_protect
);
981 spapr_register_hypercall(H_READ
, h_read
);
984 spapr_register_hypercall(H_BULK_REMOVE
, h_bulk_remove
);
987 spapr_register_hypercall(H_SET_DABR
, h_set_dabr
);
990 spapr_register_hypercall(H_REGISTER_VPA
, h_register_vpa
);
991 spapr_register_hypercall(H_CEDE
, h_cede
);
993 /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate
994 * here between the "CI" and the "CACHE" variants, they will use whatever
995 * mapping attributes qemu is using. When using KVM, the kernel will
996 * enforce the attributes more strongly
998 spapr_register_hypercall(H_LOGICAL_CI_LOAD
, h_logical_load
);
999 spapr_register_hypercall(H_LOGICAL_CI_STORE
, h_logical_store
);
1000 spapr_register_hypercall(H_LOGICAL_CACHE_LOAD
, h_logical_load
);
1001 spapr_register_hypercall(H_LOGICAL_CACHE_STORE
, h_logical_store
);
1002 spapr_register_hypercall(H_LOGICAL_ICBI
, h_logical_icbi
);
1003 spapr_register_hypercall(H_LOGICAL_DCBF
, h_logical_dcbf
);
1004 spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP
, h_logical_memop
);
1006 /* qemu/KVM-PPC specific hcalls */
1007 spapr_register_hypercall(KVMPPC_H_RTAS
, h_rtas
);
1009 spapr_register_hypercall(H_SET_MODE
, h_set_mode
);
1011 /* ibm,client-architecture-support support */
1012 spapr_register_hypercall(KVMPPC_H_CAS
, h_client_architecture_support
);
1015 type_init(hypercall_register_types
)