2 * PowerPC memory access emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/exec-all.h"
23 #include "qemu/host-utils.h"
24 #include "qemu/main-loop.h"
25 #include "exec/helper-proto.h"
26 #include "helper_regs.h"
27 #include "exec/cpu_ldst.h"
30 #include "qemu/atomic128.h"
32 /* #define DEBUG_OP */
34 static inline bool needs_byteswap(const CPUPPCState
*env
)
36 #if defined(TARGET_WORDS_BIGENDIAN)
43 /*****************************************************************************/
44 /* Memory load and stores */
46 static inline target_ulong
addr_add(CPUPPCState
*env
, target_ulong addr
,
49 #if defined(TARGET_PPC64)
50 if (!msr_is_64bit(env
, env
->msr
)) {
51 return (uint32_t)(addr
+ arg
);
59 static void *probe_contiguous(CPUPPCState
*env
, target_ulong addr
, uint32_t nb
,
60 MMUAccessType access_type
, int mmu_idx
,
64 uint32_t nb_pg1
, nb_pg2
;
66 nb_pg1
= -(addr
| TARGET_PAGE_MASK
);
67 if (likely(nb
<= nb_pg1
)) {
68 /* The entire operation is on a single page. */
69 return probe_access(env
, addr
, nb
, access_type
, mmu_idx
, raddr
);
72 /* The operation spans two pages. */
74 host1
= probe_access(env
, addr
, nb_pg1
, access_type
, mmu_idx
, raddr
);
75 addr
= addr_add(env
, addr
, nb_pg1
);
76 host2
= probe_access(env
, addr
, nb_pg2
, access_type
, mmu_idx
, raddr
);
78 /* If the two host pages are contiguous, optimize. */
79 if (host2
== host1
+ nb_pg1
) {
85 void helper_lmw(CPUPPCState
*env
, target_ulong addr
, uint32_t reg
)
87 for (; reg
< 32; reg
++) {
88 if (needs_byteswap(env
)) {
89 env
->gpr
[reg
] = bswap32(cpu_ldl_data_ra(env
, addr
, GETPC()));
91 env
->gpr
[reg
] = cpu_ldl_data_ra(env
, addr
, GETPC());
93 addr
= addr_add(env
, addr
, 4);
97 void helper_stmw(CPUPPCState
*env
, target_ulong addr
, uint32_t reg
)
99 for (; reg
< 32; reg
++) {
100 if (needs_byteswap(env
)) {
101 cpu_stl_data_ra(env
, addr
, bswap32((uint32_t)env
->gpr
[reg
]),
104 cpu_stl_data_ra(env
, addr
, (uint32_t)env
->gpr
[reg
], GETPC());
106 addr
= addr_add(env
, addr
, 4);
110 static void do_lsw(CPUPPCState
*env
, target_ulong addr
, uint32_t nb
,
111 uint32_t reg
, uintptr_t raddr
)
117 if (unlikely(nb
== 0)) {
121 mmu_idx
= cpu_mmu_index(env
, false);
122 host
= probe_contiguous(env
, addr
, nb
, MMU_DATA_LOAD
, mmu_idx
, raddr
);
125 /* Fast path -- the entire operation is in RAM at host. */
126 for (; nb
> 3; nb
-= 4) {
127 env
->gpr
[reg
] = (uint32_t)ldl_be_p(host
);
128 reg
= (reg
+ 1) % 32;
135 val
= ldub_p(host
) << 24;
138 val
= lduw_be_p(host
) << 16;
141 val
= (lduw_be_p(host
) << 16) | (ldub_p(host
+ 2) << 8);
145 /* Slow path -- at least some of the operation requires i/o. */
146 for (; nb
> 3; nb
-= 4) {
147 env
->gpr
[reg
] = cpu_ldl_mmuidx_ra(env
, addr
, mmu_idx
, raddr
);
148 reg
= (reg
+ 1) % 32;
149 addr
= addr_add(env
, addr
, 4);
155 val
= cpu_ldub_mmuidx_ra(env
, addr
, mmu_idx
, raddr
) << 24;
158 val
= cpu_lduw_mmuidx_ra(env
, addr
, mmu_idx
, raddr
) << 16;
161 val
= cpu_lduw_mmuidx_ra(env
, addr
, mmu_idx
, raddr
) << 16;
162 addr
= addr_add(env
, addr
, 2);
163 val
|= cpu_ldub_mmuidx_ra(env
, addr
, mmu_idx
, raddr
) << 8;
170 void helper_lsw(CPUPPCState
*env
, target_ulong addr
,
171 uint32_t nb
, uint32_t reg
)
173 do_lsw(env
, addr
, nb
, reg
, GETPC());
177 * PPC32 specification says we must generate an exception if rA is in
178 * the range of registers to be loaded. In an other hand, IBM says
179 * this is valid, but rA won't be loaded. For now, I'll follow the
182 void helper_lswx(CPUPPCState
*env
, target_ulong addr
, uint32_t reg
,
183 uint32_t ra
, uint32_t rb
)
185 if (likely(xer_bc
!= 0)) {
186 int num_used_regs
= DIV_ROUND_UP(xer_bc
, 4);
187 if (unlikely((ra
!= 0 && lsw_reg_in_range(reg
, num_used_regs
, ra
)) ||
188 lsw_reg_in_range(reg
, num_used_regs
, rb
))) {
189 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
191 POWERPC_EXCP_INVAL_LSWX
, GETPC());
193 do_lsw(env
, addr
, xer_bc
, reg
, GETPC());
198 void helper_stsw(CPUPPCState
*env
, target_ulong addr
, uint32_t nb
,
201 uintptr_t raddr
= GETPC();
206 if (unlikely(nb
== 0)) {
210 mmu_idx
= cpu_mmu_index(env
, false);
211 host
= probe_contiguous(env
, addr
, nb
, MMU_DATA_STORE
, mmu_idx
, raddr
);
214 /* Fast path -- the entire operation is in RAM at host. */
215 for (; nb
> 3; nb
-= 4) {
216 stl_be_p(host
, env
->gpr
[reg
]);
217 reg
= (reg
+ 1) % 32;
223 stb_p(host
, val
>> 24);
226 stw_be_p(host
, val
>> 16);
229 stw_be_p(host
, val
>> 16);
230 stb_p(host
+ 2, val
>> 8);
234 for (; nb
> 3; nb
-= 4) {
235 cpu_stl_mmuidx_ra(env
, addr
, env
->gpr
[reg
], mmu_idx
, raddr
);
236 reg
= (reg
+ 1) % 32;
237 addr
= addr_add(env
, addr
, 4);
242 cpu_stb_mmuidx_ra(env
, addr
, val
>> 24, mmu_idx
, raddr
);
245 cpu_stw_mmuidx_ra(env
, addr
, val
>> 16, mmu_idx
, raddr
);
248 cpu_stw_mmuidx_ra(env
, addr
, val
>> 16, mmu_idx
, raddr
);
249 addr
= addr_add(env
, addr
, 2);
250 cpu_stb_mmuidx_ra(env
, addr
, val
>> 8, mmu_idx
, raddr
);
256 static void dcbz_common(CPUPPCState
*env
, target_ulong addr
,
257 uint32_t opcode
, bool epid
, uintptr_t retaddr
)
259 target_ulong mask
, dcbz_size
= env
->dcache_line_size
;
262 int mmu_idx
= epid
? PPC_TLB_EPID_STORE
: env
->dmmu_idx
;
264 #if defined(TARGET_PPC64)
265 /* Check for dcbz vs dcbzl on 970 */
266 if (env
->excp_model
== POWERPC_EXCP_970
&&
267 !(opcode
& 0x00200000) && ((env
->spr
[SPR_970_HID5
] >> 7) & 0x3) == 1) {
273 mask
= ~(dcbz_size
- 1);
276 /* Check reservation */
277 if ((env
->reserve_addr
& mask
) == (addr
& mask
)) {
278 env
->reserve_addr
= (target_ulong
)-1ULL;
281 /* Try fast path translate */
282 haddr
= tlb_vaddr_to_host(env
, addr
, MMU_DATA_STORE
, mmu_idx
);
284 memset(haddr
, 0, dcbz_size
);
287 for (i
= 0; i
< dcbz_size
; i
+= 8) {
288 cpu_stq_mmuidx_ra(env
, addr
+ i
, 0, mmu_idx
, retaddr
);
293 void helper_dcbz(CPUPPCState
*env
, target_ulong addr
, uint32_t opcode
)
295 dcbz_common(env
, addr
, opcode
, false, GETPC());
298 void helper_dcbzep(CPUPPCState
*env
, target_ulong addr
, uint32_t opcode
)
300 dcbz_common(env
, addr
, opcode
, true, GETPC());
303 void helper_icbi(CPUPPCState
*env
, target_ulong addr
)
305 addr
&= ~(env
->dcache_line_size
- 1);
307 * Invalidate one cache line :
308 * PowerPC specification says this is to be treated like a load
309 * (not a fetch) by the MMU. To be sure it will be so,
310 * do the load "by hand".
312 cpu_ldl_data_ra(env
, addr
, GETPC());
315 void helper_icbiep(CPUPPCState
*env
, target_ulong addr
)
317 #if !defined(CONFIG_USER_ONLY)
318 /* See comments above */
319 addr
&= ~(env
->dcache_line_size
- 1);
320 cpu_ldl_mmuidx_ra(env
, addr
, PPC_TLB_EPID_LOAD
, GETPC());
324 /* XXX: to be tested */
325 target_ulong
helper_lscbx(CPUPPCState
*env
, target_ulong addr
, uint32_t reg
,
326 uint32_t ra
, uint32_t rb
)
331 for (i
= 0; i
< xer_bc
; i
++) {
332 c
= cpu_ldub_data_ra(env
, addr
, GETPC());
333 addr
= addr_add(env
, addr
, 1);
334 /* ra (if not 0) and rb are never modified */
335 if (likely(reg
!= rb
&& (ra
== 0 || reg
!= ra
))) {
336 env
->gpr
[reg
] = (env
->gpr
[reg
] & ~(0xFF << d
)) | (c
<< d
);
338 if (unlikely(c
== xer_cmp
)) {
341 if (likely(d
!= 0)) {
353 uint64_t helper_lq_le_parallel(CPUPPCState
*env
, target_ulong addr
,
358 /* We will have raised EXCP_ATOMIC from the translator. */
359 assert(HAVE_ATOMIC128
);
360 ret
= helper_atomic_ldo_le_mmu(env
, addr
, opidx
, GETPC());
361 env
->retxh
= int128_gethi(ret
);
362 return int128_getlo(ret
);
365 uint64_t helper_lq_be_parallel(CPUPPCState
*env
, target_ulong addr
,
370 /* We will have raised EXCP_ATOMIC from the translator. */
371 assert(HAVE_ATOMIC128
);
372 ret
= helper_atomic_ldo_be_mmu(env
, addr
, opidx
, GETPC());
373 env
->retxh
= int128_gethi(ret
);
374 return int128_getlo(ret
);
377 void helper_stq_le_parallel(CPUPPCState
*env
, target_ulong addr
,
378 uint64_t lo
, uint64_t hi
, uint32_t opidx
)
382 /* We will have raised EXCP_ATOMIC from the translator. */
383 assert(HAVE_ATOMIC128
);
384 val
= int128_make128(lo
, hi
);
385 helper_atomic_sto_le_mmu(env
, addr
, val
, opidx
, GETPC());
388 void helper_stq_be_parallel(CPUPPCState
*env
, target_ulong addr
,
389 uint64_t lo
, uint64_t hi
, uint32_t opidx
)
393 /* We will have raised EXCP_ATOMIC from the translator. */
394 assert(HAVE_ATOMIC128
);
395 val
= int128_make128(lo
, hi
);
396 helper_atomic_sto_be_mmu(env
, addr
, val
, opidx
, GETPC());
399 uint32_t helper_stqcx_le_parallel(CPUPPCState
*env
, target_ulong addr
,
400 uint64_t new_lo
, uint64_t new_hi
,
403 bool success
= false;
405 /* We will have raised EXCP_ATOMIC from the translator. */
406 assert(HAVE_CMPXCHG128
);
408 if (likely(addr
== env
->reserve_addr
)) {
409 Int128 oldv
, cmpv
, newv
;
411 cmpv
= int128_make128(env
->reserve_val2
, env
->reserve_val
);
412 newv
= int128_make128(new_lo
, new_hi
);
413 oldv
= helper_atomic_cmpxchgo_le_mmu(env
, addr
, cmpv
, newv
,
415 success
= int128_eq(oldv
, cmpv
);
417 env
->reserve_addr
= -1;
418 return env
->so
+ success
* CRF_EQ_BIT
;
421 uint32_t helper_stqcx_be_parallel(CPUPPCState
*env
, target_ulong addr
,
422 uint64_t new_lo
, uint64_t new_hi
,
425 bool success
= false;
427 /* We will have raised EXCP_ATOMIC from the translator. */
428 assert(HAVE_CMPXCHG128
);
430 if (likely(addr
== env
->reserve_addr
)) {
431 Int128 oldv
, cmpv
, newv
;
433 cmpv
= int128_make128(env
->reserve_val2
, env
->reserve_val
);
434 newv
= int128_make128(new_lo
, new_hi
);
435 oldv
= helper_atomic_cmpxchgo_be_mmu(env
, addr
, cmpv
, newv
,
437 success
= int128_eq(oldv
, cmpv
);
439 env
->reserve_addr
= -1;
440 return env
->so
+ success
* CRF_EQ_BIT
;
444 /*****************************************************************************/
445 /* Altivec extension helpers */
446 #if defined(HOST_WORDS_BIGENDIAN)
455 * We use msr_le to determine index ordering in a vector. However,
456 * byteswapping is not simply controlled by msr_le. We also need to
457 * take into account endianness of the target. This is done for the
458 * little-endian PPC64 user-mode target.
461 #define LVE(name, access, swap, element) \
462 void helper_##name(CPUPPCState *env, ppc_avr_t *r, \
465 size_t n_elems = ARRAY_SIZE(r->element); \
466 int adjust = HI_IDX * (n_elems - 1); \
467 int sh = sizeof(r->element[0]) >> 1; \
468 int index = (addr & 0xf) >> sh; \
470 index = n_elems - index - 1; \
473 if (needs_byteswap(env)) { \
474 r->element[LO_IDX ? index : (adjust - index)] = \
475 swap(access(env, addr, GETPC())); \
477 r->element[LO_IDX ? index : (adjust - index)] = \
478 access(env, addr, GETPC()); \
482 LVE(lvebx
, cpu_ldub_data_ra
, I
, u8
)
483 LVE(lvehx
, cpu_lduw_data_ra
, bswap16
, u16
)
484 LVE(lvewx
, cpu_ldl_data_ra
, bswap32
, u32
)
488 #define STVE(name, access, swap, element) \
489 void helper_##name(CPUPPCState *env, ppc_avr_t *r, \
492 size_t n_elems = ARRAY_SIZE(r->element); \
493 int adjust = HI_IDX * (n_elems - 1); \
494 int sh = sizeof(r->element[0]) >> 1; \
495 int index = (addr & 0xf) >> sh; \
497 index = n_elems - index - 1; \
500 if (needs_byteswap(env)) { \
501 access(env, addr, swap(r->element[LO_IDX ? index : \
502 (adjust - index)]), \
505 access(env, addr, r->element[LO_IDX ? index : \
506 (adjust - index)], GETPC()); \
510 STVE(stvebx
, cpu_stb_data_ra
, I
, u8
)
511 STVE(stvehx
, cpu_stw_data_ra
, bswap16
, u16
)
512 STVE(stvewx
, cpu_stl_data_ra
, bswap32
, u32
)
517 #define GET_NB(rb) ((rb >> 56) & 0xFF)
519 #define VSX_LXVL(name, lj) \
520 void helper_##name(CPUPPCState *env, target_ulong addr, \
521 ppc_vsr_t *xt, target_ulong rb) \
524 uint64_t nb = GET_NB(rb); \
527 t.s128 = int128_zero(); \
529 nb = (nb >= 16) ? 16 : nb; \
530 if (msr_le && !lj) { \
531 for (i = 16; i > 16 - nb; i--) { \
532 t.VsrB(i - 1) = cpu_ldub_data_ra(env, addr, GETPC()); \
533 addr = addr_add(env, addr, 1); \
536 for (i = 0; i < nb; i++) { \
537 t.VsrB(i) = cpu_ldub_data_ra(env, addr, GETPC()); \
538 addr = addr_add(env, addr, 1); \
549 #define VSX_STXVL(name, lj) \
550 void helper_##name(CPUPPCState *env, target_ulong addr, \
551 ppc_vsr_t *xt, target_ulong rb) \
553 target_ulong nb = GET_NB(rb); \
560 nb = (nb >= 16) ? 16 : nb; \
561 if (msr_le && !lj) { \
562 for (i = 16; i > 16 - nb; i--) { \
563 cpu_stb_data_ra(env, addr, xt->VsrB(i - 1), GETPC()); \
564 addr = addr_add(env, addr, 1); \
567 for (i = 0; i < nb; i++) { \
568 cpu_stb_data_ra(env, addr, xt->VsrB(i), GETPC()); \
569 addr = addr_add(env, addr, 1); \
578 #endif /* TARGET_PPC64 */
583 void helper_tbegin(CPUPPCState
*env
)
586 * As a degenerate implementation, always fail tbegin. The reason
587 * given is "Nesting overflow". The "persistent" bit is set,
588 * providing a hint to the error handler to not retry. The TFIAR
589 * captures the address of the failure, which is this tbegin
590 * instruction. Instruction execution will continue with the next
591 * instruction in memory, which is precisely what we want.
594 env
->spr
[SPR_TEXASR
] =
595 (1ULL << TEXASR_FAILURE_PERSISTENT
) |
596 (1ULL << TEXASR_NESTING_OVERFLOW
) |
597 (msr_hv
<< TEXASR_PRIVILEGE_HV
) |
598 (msr_pr
<< TEXASR_PRIVILEGE_PR
) |
599 (1ULL << TEXASR_FAILURE_SUMMARY
) |
600 (1ULL << TEXASR_TFIAR_EXACT
);
601 env
->spr
[SPR_TFIAR
] = env
->nip
| (msr_hv
<< 1) | msr_pr
;
602 env
->spr
[SPR_TFHAR
] = env
->nip
+ 4;
603 env
->crf
[0] = 0xB; /* 0b1010 = transaction failure */