1 #include "qemu/osdep.h"
2 #include "qapi/error.h"
3 #include "sysemu/hw_accel.h"
4 #include "sysemu/sysemu.h"
7 #include "exec/exec-all.h"
8 #include "helper_regs.h"
9 #include "hw/ppc/spapr.h"
10 #include "mmu-hash64.h"
11 #include "cpu-models.h"
14 #include "hw/ppc/spapr_ovec.h"
22 static void do_spr_sync(CPUState
*cs
, run_on_cpu_data arg
)
24 struct SPRSyncState
*s
= arg
.host_ptr
;
25 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
26 CPUPPCState
*env
= &cpu
->env
;
28 cpu_synchronize_state(cs
);
29 env
->spr
[s
->spr
] &= ~s
->mask
;
30 env
->spr
[s
->spr
] |= s
->value
;
33 static void set_spr(CPUState
*cs
, int spr
, target_ulong value
,
36 struct SPRSyncState s
= {
41 run_on_cpu(cs
, do_spr_sync
, RUN_ON_CPU_HOST_PTR(&s
));
44 static bool has_spr(PowerPCCPU
*cpu
, int spr
)
46 /* We can test whether the SPR is defined by checking for a valid name */
47 return cpu
->env
.spr_cb
[spr
].name
!= NULL
;
50 static inline bool valid_pte_index(CPUPPCState
*env
, target_ulong pte_index
)
53 * hash value/pteg group index is normalized by htab_mask
55 if (((pte_index
& ~7ULL) / HPTES_PER_GROUP
) & ~env
->htab_mask
) {
61 static bool is_ram_address(sPAPRMachineState
*spapr
, hwaddr addr
)
63 MachineState
*machine
= MACHINE(spapr
);
64 MemoryHotplugState
*hpms
= &spapr
->hotplug_memory
;
66 if (addr
< machine
->ram_size
) {
69 if ((addr
>= hpms
->base
)
70 && ((addr
- hpms
->base
) < memory_region_size(&hpms
->mr
))) {
77 static target_ulong
h_enter(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
78 target_ulong opcode
, target_ulong
*args
)
80 CPUPPCState
*env
= &cpu
->env
;
81 target_ulong flags
= args
[0];
82 target_ulong pte_index
= args
[1];
83 target_ulong pteh
= args
[2];
84 target_ulong ptel
= args
[3];
90 apshift
= ppc_hash64_hpte_page_shift_noslb(cpu
, pteh
, ptel
);
92 /* Bad page size encoding */
96 raddr
= (ptel
& HPTE64_R_RPN
) & ~((1ULL << apshift
) - 1);
98 if (is_ram_address(spapr
, raddr
)) {
99 /* Regular RAM - should have WIMG=0010 */
100 if ((ptel
& HPTE64_R_WIMG
) != HPTE64_R_M
) {
104 target_ulong wimg_flags
;
105 /* Looks like an IO address */
106 /* FIXME: What WIMG combinations could be sensible for IO?
107 * For now we allow WIMG=010x, but are there others? */
108 /* FIXME: Should we check against registered IO addresses? */
109 wimg_flags
= (ptel
& (HPTE64_R_W
| HPTE64_R_I
| HPTE64_R_M
));
111 if (wimg_flags
!= HPTE64_R_I
&&
112 wimg_flags
!= (HPTE64_R_I
| HPTE64_R_M
)) {
119 if (!valid_pte_index(env
, pte_index
)) {
124 if (likely((flags
& H_EXACT
) == 0)) {
126 token
= ppc_hash64_start_access(cpu
, pte_index
);
127 for (; index
< 8; index
++) {
128 if (!(ppc_hash64_load_hpte0(cpu
, token
, index
) & HPTE64_V_VALID
)) {
132 ppc_hash64_stop_access(cpu
, token
);
137 token
= ppc_hash64_start_access(cpu
, pte_index
);
138 if (ppc_hash64_load_hpte0(cpu
, token
, 0) & HPTE64_V_VALID
) {
139 ppc_hash64_stop_access(cpu
, token
);
142 ppc_hash64_stop_access(cpu
, token
);
145 ppc_hash64_store_hpte(cpu
, pte_index
+ index
,
146 pteh
| HPTE64_V_HPTE_DIRTY
, ptel
);
148 args
[0] = pte_index
+ index
;
154 REMOVE_NOT_FOUND
= 1,
159 static RemoveResult
remove_hpte(PowerPCCPU
*cpu
, target_ulong ptex
,
162 target_ulong
*vp
, target_ulong
*rp
)
164 CPUPPCState
*env
= &cpu
->env
;
168 if (!valid_pte_index(env
, ptex
)) {
172 token
= ppc_hash64_start_access(cpu
, ptex
);
173 v
= ppc_hash64_load_hpte0(cpu
, token
, 0);
174 r
= ppc_hash64_load_hpte1(cpu
, token
, 0);
175 ppc_hash64_stop_access(cpu
, token
);
177 if ((v
& HPTE64_V_VALID
) == 0 ||
178 ((flags
& H_AVPN
) && (v
& ~0x7fULL
) != avpn
) ||
179 ((flags
& H_ANDCOND
) && (v
& avpn
) != 0)) {
180 return REMOVE_NOT_FOUND
;
184 ppc_hash64_store_hpte(cpu
, ptex
, HPTE64_V_HPTE_DIRTY
, 0);
185 ppc_hash64_tlb_flush_hpte(cpu
, ptex
, v
, r
);
186 return REMOVE_SUCCESS
;
189 static target_ulong
h_remove(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
190 target_ulong opcode
, target_ulong
*args
)
192 CPUPPCState
*env
= &cpu
->env
;
193 target_ulong flags
= args
[0];
194 target_ulong pte_index
= args
[1];
195 target_ulong avpn
= args
[2];
198 ret
= remove_hpte(cpu
, pte_index
, avpn
, flags
,
203 check_tlb_flush(env
, true);
206 case REMOVE_NOT_FOUND
:
216 g_assert_not_reached();
219 #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
220 #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
221 #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
222 #define H_BULK_REMOVE_END 0xc000000000000000ULL
223 #define H_BULK_REMOVE_CODE 0x3000000000000000ULL
224 #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
225 #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
226 #define H_BULK_REMOVE_PARM 0x2000000000000000ULL
227 #define H_BULK_REMOVE_HW 0x3000000000000000ULL
228 #define H_BULK_REMOVE_RC 0x0c00000000000000ULL
229 #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
230 #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
231 #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
232 #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
233 #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
235 #define H_BULK_REMOVE_MAX_BATCH 4
237 static target_ulong
h_bulk_remove(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
238 target_ulong opcode
, target_ulong
*args
)
240 CPUPPCState
*env
= &cpu
->env
;
242 target_ulong rc
= H_SUCCESS
;
244 for (i
= 0; i
< H_BULK_REMOVE_MAX_BATCH
; i
++) {
245 target_ulong
*tsh
= &args
[i
*2];
246 target_ulong tsl
= args
[i
*2 + 1];
247 target_ulong v
, r
, ret
;
249 if ((*tsh
& H_BULK_REMOVE_TYPE
) == H_BULK_REMOVE_END
) {
251 } else if ((*tsh
& H_BULK_REMOVE_TYPE
) != H_BULK_REMOVE_REQUEST
) {
255 *tsh
&= H_BULK_REMOVE_PTEX
| H_BULK_REMOVE_FLAGS
;
256 *tsh
|= H_BULK_REMOVE_RESPONSE
;
258 if ((*tsh
& H_BULK_REMOVE_ANDCOND
) && (*tsh
& H_BULK_REMOVE_AVPN
)) {
259 *tsh
|= H_BULK_REMOVE_PARM
;
263 ret
= remove_hpte(cpu
, *tsh
& H_BULK_REMOVE_PTEX
, tsl
,
264 (*tsh
& H_BULK_REMOVE_FLAGS
) >> 26,
271 *tsh
|= (r
& (HPTE64_R_C
| HPTE64_R_R
)) << 43;
284 check_tlb_flush(env
, true);
289 static target_ulong
h_protect(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
290 target_ulong opcode
, target_ulong
*args
)
292 CPUPPCState
*env
= &cpu
->env
;
293 target_ulong flags
= args
[0];
294 target_ulong pte_index
= args
[1];
295 target_ulong avpn
= args
[2];
299 if (!valid_pte_index(env
, pte_index
)) {
303 token
= ppc_hash64_start_access(cpu
, pte_index
);
304 v
= ppc_hash64_load_hpte0(cpu
, token
, 0);
305 r
= ppc_hash64_load_hpte1(cpu
, token
, 0);
306 ppc_hash64_stop_access(cpu
, token
);
308 if ((v
& HPTE64_V_VALID
) == 0 ||
309 ((flags
& H_AVPN
) && (v
& ~0x7fULL
) != avpn
)) {
313 r
&= ~(HPTE64_R_PP0
| HPTE64_R_PP
| HPTE64_R_N
|
314 HPTE64_R_KEY_HI
| HPTE64_R_KEY_LO
);
315 r
|= (flags
<< 55) & HPTE64_R_PP0
;
316 r
|= (flags
<< 48) & HPTE64_R_KEY_HI
;
317 r
|= flags
& (HPTE64_R_PP
| HPTE64_R_N
| HPTE64_R_KEY_LO
);
318 ppc_hash64_store_hpte(cpu
, pte_index
,
319 (v
& ~HPTE64_V_VALID
) | HPTE64_V_HPTE_DIRTY
, 0);
320 ppc_hash64_tlb_flush_hpte(cpu
, pte_index
, v
, r
);
322 check_tlb_flush(env
, true);
323 /* Don't need a memory barrier, due to qemu's global lock */
324 ppc_hash64_store_hpte(cpu
, pte_index
, v
| HPTE64_V_HPTE_DIRTY
, r
);
328 static target_ulong
h_read(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
329 target_ulong opcode
, target_ulong
*args
)
331 CPUPPCState
*env
= &cpu
->env
;
332 target_ulong flags
= args
[0];
333 target_ulong pte_index
= args
[1];
335 int i
, ridx
, n_entries
= 1;
337 if (!valid_pte_index(env
, pte_index
)) {
341 if (flags
& H_READ_4
) {
342 /* Clear the two low order bits */
343 pte_index
&= ~(3ULL);
347 hpte
= env
->external_htab
+ (pte_index
* HASH_PTE_SIZE_64
);
349 for (i
= 0, ridx
= 0; i
< n_entries
; i
++) {
350 args
[ridx
++] = ldq_p(hpte
);
351 args
[ridx
++] = ldq_p(hpte
+ (HASH_PTE_SIZE_64
/2));
352 hpte
+= HASH_PTE_SIZE_64
;
358 static target_ulong
h_set_sprg0(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
359 target_ulong opcode
, target_ulong
*args
)
361 cpu_synchronize_state(CPU(cpu
));
362 cpu
->env
.spr
[SPR_SPRG0
] = args
[0];
367 static target_ulong
h_set_dabr(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
368 target_ulong opcode
, target_ulong
*args
)
370 if (!has_spr(cpu
, SPR_DABR
)) {
371 return H_HARDWARE
; /* DABR register not available */
373 cpu_synchronize_state(CPU(cpu
));
375 if (has_spr(cpu
, SPR_DABRX
)) {
376 cpu
->env
.spr
[SPR_DABRX
] = 0x3; /* Use Problem and Privileged state */
377 } else if (!(args
[0] & 0x4)) { /* Breakpoint Translation set? */
378 return H_RESERVED_DABR
;
381 cpu
->env
.spr
[SPR_DABR
] = args
[0];
385 static target_ulong
h_set_xdabr(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
386 target_ulong opcode
, target_ulong
*args
)
388 target_ulong dabrx
= args
[1];
390 if (!has_spr(cpu
, SPR_DABR
) || !has_spr(cpu
, SPR_DABRX
)) {
394 if ((dabrx
& ~0xfULL
) != 0 || (dabrx
& H_DABRX_HYPERVISOR
) != 0
395 || (dabrx
& (H_DABRX_KERNEL
| H_DABRX_USER
)) == 0) {
399 cpu_synchronize_state(CPU(cpu
));
400 cpu
->env
.spr
[SPR_DABRX
] = dabrx
;
401 cpu
->env
.spr
[SPR_DABR
] = args
[0];
406 static target_ulong
h_page_init(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
407 target_ulong opcode
, target_ulong
*args
)
409 target_ulong flags
= args
[0];
410 hwaddr dst
= args
[1];
411 hwaddr src
= args
[2];
412 hwaddr len
= TARGET_PAGE_SIZE
;
413 uint8_t *pdst
, *psrc
;
414 target_long ret
= H_SUCCESS
;
416 if (flags
& ~(H_ICACHE_SYNCHRONIZE
| H_ICACHE_INVALIDATE
417 | H_COPY_PAGE
| H_ZERO_PAGE
)) {
418 qemu_log_mask(LOG_UNIMP
, "h_page_init: Bad flags (" TARGET_FMT_lx
"\n",
423 /* Map-in destination */
424 if (!is_ram_address(spapr
, dst
) || (dst
& ~TARGET_PAGE_MASK
) != 0) {
427 pdst
= cpu_physical_memory_map(dst
, &len
, 1);
428 if (!pdst
|| len
!= TARGET_PAGE_SIZE
) {
432 if (flags
& H_COPY_PAGE
) {
433 /* Map-in source, copy to destination, and unmap source again */
434 if (!is_ram_address(spapr
, src
) || (src
& ~TARGET_PAGE_MASK
) != 0) {
438 psrc
= cpu_physical_memory_map(src
, &len
, 0);
439 if (!psrc
|| len
!= TARGET_PAGE_SIZE
) {
443 memcpy(pdst
, psrc
, len
);
444 cpu_physical_memory_unmap(psrc
, len
, 0, len
);
445 } else if (flags
& H_ZERO_PAGE
) {
446 memset(pdst
, 0, len
); /* Just clear the destination page */
449 if (kvm_enabled() && (flags
& H_ICACHE_SYNCHRONIZE
) != 0) {
450 kvmppc_dcbst_range(cpu
, pdst
, len
);
452 if (flags
& (H_ICACHE_SYNCHRONIZE
| H_ICACHE_INVALIDATE
)) {
454 kvmppc_icbi_range(cpu
, pdst
, len
);
461 cpu_physical_memory_unmap(pdst
, TARGET_PAGE_SIZE
, 1, len
);
465 #define FLAGS_REGISTER_VPA 0x0000200000000000ULL
466 #define FLAGS_REGISTER_DTL 0x0000400000000000ULL
467 #define FLAGS_REGISTER_SLBSHADOW 0x0000600000000000ULL
468 #define FLAGS_DEREGISTER_VPA 0x0000a00000000000ULL
469 #define FLAGS_DEREGISTER_DTL 0x0000c00000000000ULL
470 #define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL
472 #define VPA_MIN_SIZE 640
473 #define VPA_SIZE_OFFSET 0x4
474 #define VPA_SHARED_PROC_OFFSET 0x9
475 #define VPA_SHARED_PROC_VAL 0x2
477 static target_ulong
register_vpa(CPUPPCState
*env
, target_ulong vpa
)
479 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
484 hcall_dprintf("Can't cope with registering a VPA at logical 0\n");
488 if (vpa
% env
->dcache_line_size
) {
491 /* FIXME: bounds check the address */
493 size
= lduw_be_phys(cs
->as
, vpa
+ 0x4);
495 if (size
< VPA_MIN_SIZE
) {
499 /* VPA is not allowed to cross a page boundary */
500 if ((vpa
/ 4096) != ((vpa
+ size
- 1) / 4096)) {
506 tmp
= ldub_phys(cs
->as
, env
->vpa_addr
+ VPA_SHARED_PROC_OFFSET
);
507 tmp
|= VPA_SHARED_PROC_VAL
;
508 stb_phys(cs
->as
, env
->vpa_addr
+ VPA_SHARED_PROC_OFFSET
, tmp
);
513 static target_ulong
deregister_vpa(CPUPPCState
*env
, target_ulong vpa
)
515 if (env
->slb_shadow_addr
) {
527 static target_ulong
register_slb_shadow(CPUPPCState
*env
, target_ulong addr
)
529 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
533 hcall_dprintf("Can't cope with SLB shadow at logical 0\n");
537 size
= ldl_be_phys(cs
->as
, addr
+ 0x4);
542 if ((addr
/ 4096) != ((addr
+ size
- 1) / 4096)) {
546 if (!env
->vpa_addr
) {
550 env
->slb_shadow_addr
= addr
;
551 env
->slb_shadow_size
= size
;
556 static target_ulong
deregister_slb_shadow(CPUPPCState
*env
, target_ulong addr
)
558 env
->slb_shadow_addr
= 0;
559 env
->slb_shadow_size
= 0;
563 static target_ulong
register_dtl(CPUPPCState
*env
, target_ulong addr
)
565 CPUState
*cs
= CPU(ppc_env_get_cpu(env
));
569 hcall_dprintf("Can't cope with DTL at logical 0\n");
573 size
= ldl_be_phys(cs
->as
, addr
+ 0x4);
579 if (!env
->vpa_addr
) {
583 env
->dtl_addr
= addr
;
584 env
->dtl_size
= size
;
589 static target_ulong
deregister_dtl(CPUPPCState
*env
, target_ulong addr
)
597 static target_ulong
h_register_vpa(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
598 target_ulong opcode
, target_ulong
*args
)
600 target_ulong flags
= args
[0];
601 target_ulong procno
= args
[1];
602 target_ulong vpa
= args
[2];
603 target_ulong ret
= H_PARAMETER
;
607 tcpu
= ppc_get_vcpu_by_dt_id(procno
);
614 case FLAGS_REGISTER_VPA
:
615 ret
= register_vpa(tenv
, vpa
);
618 case FLAGS_DEREGISTER_VPA
:
619 ret
= deregister_vpa(tenv
, vpa
);
622 case FLAGS_REGISTER_SLBSHADOW
:
623 ret
= register_slb_shadow(tenv
, vpa
);
626 case FLAGS_DEREGISTER_SLBSHADOW
:
627 ret
= deregister_slb_shadow(tenv
, vpa
);
630 case FLAGS_REGISTER_DTL
:
631 ret
= register_dtl(tenv
, vpa
);
634 case FLAGS_DEREGISTER_DTL
:
635 ret
= deregister_dtl(tenv
, vpa
);
642 static target_ulong
h_cede(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
643 target_ulong opcode
, target_ulong
*args
)
645 CPUPPCState
*env
= &cpu
->env
;
646 CPUState
*cs
= CPU(cpu
);
648 env
->msr
|= (1ULL << MSR_EE
);
649 hreg_compute_hflags(env
);
650 if (!cpu_has_work(cs
)) {
652 cs
->exception_index
= EXCP_HLT
;
653 cs
->exit_request
= 1;
658 static target_ulong
h_rtas(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
659 target_ulong opcode
, target_ulong
*args
)
661 target_ulong rtas_r3
= args
[0];
662 uint32_t token
= rtas_ld(rtas_r3
, 0);
663 uint32_t nargs
= rtas_ld(rtas_r3
, 1);
664 uint32_t nret
= rtas_ld(rtas_r3
, 2);
666 return spapr_rtas_call(cpu
, spapr
, token
, nargs
, rtas_r3
+ 12,
667 nret
, rtas_r3
+ 12 + 4*nargs
);
670 static target_ulong
h_logical_load(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
671 target_ulong opcode
, target_ulong
*args
)
673 CPUState
*cs
= CPU(cpu
);
674 target_ulong size
= args
[0];
675 target_ulong addr
= args
[1];
679 args
[0] = ldub_phys(cs
->as
, addr
);
682 args
[0] = lduw_phys(cs
->as
, addr
);
685 args
[0] = ldl_phys(cs
->as
, addr
);
688 args
[0] = ldq_phys(cs
->as
, addr
);
694 static target_ulong
h_logical_store(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
695 target_ulong opcode
, target_ulong
*args
)
697 CPUState
*cs
= CPU(cpu
);
699 target_ulong size
= args
[0];
700 target_ulong addr
= args
[1];
701 target_ulong val
= args
[2];
705 stb_phys(cs
->as
, addr
, val
);
708 stw_phys(cs
->as
, addr
, val
);
711 stl_phys(cs
->as
, addr
, val
);
714 stq_phys(cs
->as
, addr
, val
);
720 static target_ulong
h_logical_memop(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
721 target_ulong opcode
, target_ulong
*args
)
723 CPUState
*cs
= CPU(cpu
);
725 target_ulong dst
= args
[0]; /* Destination address */
726 target_ulong src
= args
[1]; /* Source address */
727 target_ulong esize
= args
[2]; /* Element size (0=1,1=2,2=4,3=8) */
728 target_ulong count
= args
[3]; /* Element count */
729 target_ulong op
= args
[4]; /* 0 = copy, 1 = invert */
731 unsigned int mask
= (1 << esize
) - 1;
732 int step
= 1 << esize
;
734 if (count
> 0x80000000) {
738 if ((dst
& mask
) || (src
& mask
) || (op
> 1)) {
742 if (dst
>= src
&& dst
< (src
+ (count
<< esize
))) {
743 dst
= dst
+ ((count
- 1) << esize
);
744 src
= src
+ ((count
- 1) << esize
);
751 tmp
= ldub_phys(cs
->as
, src
);
754 tmp
= lduw_phys(cs
->as
, src
);
757 tmp
= ldl_phys(cs
->as
, src
);
760 tmp
= ldq_phys(cs
->as
, src
);
770 stb_phys(cs
->as
, dst
, tmp
);
773 stw_phys(cs
->as
, dst
, tmp
);
776 stl_phys(cs
->as
, dst
, tmp
);
779 stq_phys(cs
->as
, dst
, tmp
);
789 static target_ulong
h_logical_icbi(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
790 target_ulong opcode
, target_ulong
*args
)
792 /* Nothing to do on emulation, KVM will trap this in the kernel */
796 static target_ulong
h_logical_dcbf(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
797 target_ulong opcode
, target_ulong
*args
)
799 /* Nothing to do on emulation, KVM will trap this in the kernel */
803 static target_ulong
h_set_mode_resource_le(PowerPCCPU
*cpu
,
818 case H_SET_MODE_ENDIAN_BIG
:
820 set_spr(cs
, SPR_LPCR
, 0, LPCR_ILE
);
822 spapr_pci_switch_vga(true);
825 case H_SET_MODE_ENDIAN_LITTLE
:
827 set_spr(cs
, SPR_LPCR
, LPCR_ILE
, LPCR_ILE
);
829 spapr_pci_switch_vga(false);
833 return H_UNSUPPORTED_FLAG
;
836 static target_ulong
h_set_mode_resource_addr_trans_mode(PowerPCCPU
*cpu
,
842 PowerPCCPUClass
*pcc
= POWERPC_CPU_GET_CLASS(cpu
);
844 if (!(pcc
->insns_flags2
& PPC2_ISA207S
)) {
854 if (mflags
== AIL_RESERVED
) {
855 return H_UNSUPPORTED_FLAG
;
859 set_spr(cs
, SPR_LPCR
, mflags
<< LPCR_AIL_SHIFT
, LPCR_AIL
);
865 static target_ulong
h_set_mode(PowerPCCPU
*cpu
, sPAPRMachineState
*spapr
,
866 target_ulong opcode
, target_ulong
*args
)
868 target_ulong resource
= args
[1];
869 target_ulong ret
= H_P2
;
872 case H_SET_MODE_RESOURCE_LE
:
873 ret
= h_set_mode_resource_le(cpu
, args
[0], args
[2], args
[3]);
875 case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE
:
876 ret
= h_set_mode_resource_addr_trans_mode(cpu
, args
[0],
885 uint32_t cpu_version
;
889 static void do_set_compat(CPUState
*cs
, run_on_cpu_data arg
)
891 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
892 SetCompatState
*s
= arg
.host_ptr
;
894 cpu_synchronize_state(cs
);
895 ppc_set_compat(cpu
, s
->cpu_version
, &s
->err
);
898 #define get_compat_level(cpuver) ( \
899 ((cpuver) == CPU_POWERPC_LOGICAL_2_05) ? 2050 : \
900 ((cpuver) == CPU_POWERPC_LOGICAL_2_06) ? 2060 : \
901 ((cpuver) == CPU_POWERPC_LOGICAL_2_06_PLUS) ? 2061 : \
902 ((cpuver) == CPU_POWERPC_LOGICAL_2_07) ? 2070 : 0)
904 static void cas_handle_compat_cpu(PowerPCCPUClass
*pcc
, uint32_t pvr
,
905 unsigned max_lvl
, unsigned *compat_lvl
,
906 unsigned *cpu_version
)
908 unsigned lvl
= get_compat_level(pvr
);
909 bool is205
, is206
, is207
;
915 /* If it is a logical PVR, try to determine the highest level */
916 is205
= (pcc
->pcr_supported
& PCR_COMPAT_2_05
) &&
917 (lvl
== get_compat_level(CPU_POWERPC_LOGICAL_2_05
));
918 is206
= (pcc
->pcr_supported
& PCR_COMPAT_2_06
) &&
919 ((lvl
== get_compat_level(CPU_POWERPC_LOGICAL_2_06
)) ||
920 (lvl
== get_compat_level(CPU_POWERPC_LOGICAL_2_06_PLUS
)));
921 is207
= (pcc
->pcr_supported
& PCR_COMPAT_2_07
) &&
922 (lvl
== get_compat_level(CPU_POWERPC_LOGICAL_2_07
));
924 if (is205
|| is206
|| is207
) {
926 /* User did not set the level, choose the highest */
927 if (*compat_lvl
<= lvl
) {
931 } else if (max_lvl
>= lvl
) {
932 /* User chose the level, don't set higher than this */
939 static target_ulong
h_client_architecture_support(PowerPCCPU
*cpu_
,
940 sPAPRMachineState
*spapr
,
944 target_ulong list
= ppc64_phys_to_real(args
[0]);
945 target_ulong ov_table
;
946 PowerPCCPUClass
*pcc
= POWERPC_CPU_GET_CLASS(cpu_
);
948 bool cpu_match
= false, cpu_update
= true;
949 unsigned old_cpu_version
= cpu_
->cpu_version
;
950 unsigned compat_lvl
= 0, cpu_version
= 0;
951 unsigned max_lvl
= get_compat_level(cpu_
->max_compat
);
953 sPAPROptionVector
*ov5_guest
, *ov5_cas_old
, *ov5_updates
;
956 for (counter
= 0; counter
< 512; ++counter
) {
957 uint32_t pvr
, pvr_mask
;
959 pvr_mask
= ldl_be_phys(&address_space_memory
, list
);
961 pvr
= ldl_be_phys(&address_space_memory
, list
);
964 trace_spapr_cas_pvr_try(pvr
);
966 ((cpu_
->env
.spr
[SPR_PVR
] & pvr_mask
) == (pvr
& pvr_mask
))) {
969 } else if (pvr
== cpu_
->cpu_version
) {
971 cpu_version
= cpu_
->cpu_version
;
972 } else if (!cpu_match
) {
973 cas_handle_compat_cpu(pcc
, pvr
, max_lvl
, &compat_lvl
, &cpu_version
);
975 /* Terminator record */
976 if (~pvr_mask
& pvr
) {
981 /* Parsing finished */
982 trace_spapr_cas_pvr(cpu_
->cpu_version
, cpu_match
,
983 cpu_version
, pcc
->pcr_mask
);
986 if (old_cpu_version
!= cpu_version
) {
989 .cpu_version
= cpu_version
,
993 run_on_cpu(cs
, do_set_compat
, RUN_ON_CPU_HOST_PTR(&s
));
996 error_report_err(s
.err
);
1006 /* For the future use: here @ov_table points to the first option vector */
1009 ov5_guest
= spapr_ovec_parse_vector(ov_table
, 5);
1011 /* NOTE: there are actually a number of ov5 bits where input from the
1012 * guest is always zero, and the platform/QEMU enables them independently
1013 * of guest input. To model these properly we'd want some sort of mask,
1014 * but since they only currently apply to memory migration as defined
1015 * by LoPAPR 1.1, 14.5.4.8, which QEMU doesn't implement, we don't need
1016 * to worry about this for now.
1018 ov5_cas_old
= spapr_ovec_clone(spapr
->ov5_cas
);
1019 /* full range of negotiated ov5 capabilities */
1020 spapr_ovec_intersect(spapr
->ov5_cas
, spapr
->ov5
, ov5_guest
);
1021 spapr_ovec_cleanup(ov5_guest
);
1022 /* capabilities that have been added since CAS-generated guest reset.
1023 * if capabilities have since been removed, generate another reset
1025 ov5_updates
= spapr_ovec_new();
1026 spapr
->cas_reboot
= spapr_ovec_diff(ov5_updates
,
1027 ov5_cas_old
, spapr
->ov5_cas
);
1029 if (!spapr
->cas_reboot
) {
1031 (spapr_h_cas_compose_response(spapr
, args
[1], args
[2], cpu_update
,
1034 spapr_ovec_cleanup(ov5_updates
);
1036 if (spapr
->cas_reboot
) {
1037 qemu_system_reset_request();
1043 static spapr_hcall_fn papr_hypercall_table
[(MAX_HCALL_OPCODE
/ 4) + 1];
1044 static spapr_hcall_fn kvmppc_hypercall_table
[KVMPPC_HCALL_MAX
- KVMPPC_HCALL_BASE
+ 1];
1046 void spapr_register_hypercall(target_ulong opcode
, spapr_hcall_fn fn
)
1048 spapr_hcall_fn
*slot
;
1050 if (opcode
<= MAX_HCALL_OPCODE
) {
1051 assert((opcode
& 0x3) == 0);
1053 slot
= &papr_hypercall_table
[opcode
/ 4];
1055 assert((opcode
>= KVMPPC_HCALL_BASE
) && (opcode
<= KVMPPC_HCALL_MAX
));
1057 slot
= &kvmppc_hypercall_table
[opcode
- KVMPPC_HCALL_BASE
];
1064 target_ulong
spapr_hypercall(PowerPCCPU
*cpu
, target_ulong opcode
,
1067 sPAPRMachineState
*spapr
= SPAPR_MACHINE(qdev_get_machine());
1069 if ((opcode
<= MAX_HCALL_OPCODE
)
1070 && ((opcode
& 0x3) == 0)) {
1071 spapr_hcall_fn fn
= papr_hypercall_table
[opcode
/ 4];
1074 return fn(cpu
, spapr
, opcode
, args
);
1076 } else if ((opcode
>= KVMPPC_HCALL_BASE
) &&
1077 (opcode
<= KVMPPC_HCALL_MAX
)) {
1078 spapr_hcall_fn fn
= kvmppc_hypercall_table
[opcode
- KVMPPC_HCALL_BASE
];
1081 return fn(cpu
, spapr
, opcode
, args
);
1085 qemu_log_mask(LOG_UNIMP
, "Unimplemented SPAPR hcall 0x" TARGET_FMT_lx
"\n",
1090 static void hypercall_register_types(void)
1093 spapr_register_hypercall(H_ENTER
, h_enter
);
1094 spapr_register_hypercall(H_REMOVE
, h_remove
);
1095 spapr_register_hypercall(H_PROTECT
, h_protect
);
1096 spapr_register_hypercall(H_READ
, h_read
);
1099 spapr_register_hypercall(H_BULK_REMOVE
, h_bulk_remove
);
1102 spapr_register_hypercall(H_REGISTER_VPA
, h_register_vpa
);
1103 spapr_register_hypercall(H_CEDE
, h_cede
);
1105 /* processor register resource access h-calls */
1106 spapr_register_hypercall(H_SET_SPRG0
, h_set_sprg0
);
1107 spapr_register_hypercall(H_SET_DABR
, h_set_dabr
);
1108 spapr_register_hypercall(H_SET_XDABR
, h_set_xdabr
);
1109 spapr_register_hypercall(H_PAGE_INIT
, h_page_init
);
1110 spapr_register_hypercall(H_SET_MODE
, h_set_mode
);
1112 /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate
1113 * here between the "CI" and the "CACHE" variants, they will use whatever
1114 * mapping attributes qemu is using. When using KVM, the kernel will
1115 * enforce the attributes more strongly
1117 spapr_register_hypercall(H_LOGICAL_CI_LOAD
, h_logical_load
);
1118 spapr_register_hypercall(H_LOGICAL_CI_STORE
, h_logical_store
);
1119 spapr_register_hypercall(H_LOGICAL_CACHE_LOAD
, h_logical_load
);
1120 spapr_register_hypercall(H_LOGICAL_CACHE_STORE
, h_logical_store
);
1121 spapr_register_hypercall(H_LOGICAL_ICBI
, h_logical_icbi
);
1122 spapr_register_hypercall(H_LOGICAL_DCBF
, h_logical_dcbf
);
1123 spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP
, h_logical_memop
);
1125 /* qemu/KVM-PPC specific hcalls */
1126 spapr_register_hypercall(KVMPPC_H_RTAS
, h_rtas
);
1128 /* ibm,client-architecture-support support */
1129 spapr_register_hypercall(KVMPPC_H_CAS
, h_client_architecture_support
);
1132 type_init(hypercall_register_types
)