2 * PowerPC memory access emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/exec-all.h"
23 #include "qemu/host-utils.h"
24 #include "qemu/main-loop.h"
25 #include "exec/helper-proto.h"
26 #include "helper_regs.h"
27 #include "exec/cpu_ldst.h"
30 #include "qemu/atomic128.h"
32 /* #define DEBUG_OP */
34 static inline bool needs_byteswap(const CPUPPCState
*env
)
36 #if defined(TARGET_WORDS_BIGENDIAN)
43 /*****************************************************************************/
44 /* Memory load and stores */
46 static inline target_ulong
addr_add(CPUPPCState
*env
, target_ulong addr
,
49 #if defined(TARGET_PPC64)
50 if (!msr_is_64bit(env
, env
->msr
)) {
51 return (uint32_t)(addr
+ arg
);
59 static void *probe_contiguous(CPUPPCState
*env
, target_ulong addr
, uint32_t nb
,
60 MMUAccessType access_type
, int mmu_idx
,
64 uint32_t nb_pg1
, nb_pg2
;
66 nb_pg1
= -(addr
| TARGET_PAGE_MASK
);
67 if (likely(nb
<= nb_pg1
)) {
68 /* The entire operation is on a single page. */
69 return probe_access(env
, addr
, nb
, access_type
, mmu_idx
, raddr
);
72 /* The operation spans two pages. */
74 host1
= probe_access(env
, addr
, nb_pg1
, access_type
, mmu_idx
, raddr
);
75 addr
= addr_add(env
, addr
, nb_pg1
);
76 host2
= probe_access(env
, addr
, nb_pg2
, access_type
, mmu_idx
, raddr
);
78 /* If the two host pages are contiguous, optimize. */
79 if (host2
== host1
+ nb_pg1
) {
85 void helper_lmw(CPUPPCState
*env
, target_ulong addr
, uint32_t reg
)
87 uintptr_t raddr
= GETPC();
88 int mmu_idx
= cpu_mmu_index(env
, false);
89 void *host
= probe_contiguous(env
, addr
, (32 - reg
) * 4,
90 MMU_DATA_LOAD
, mmu_idx
, raddr
);
93 /* Fast path -- the entire operation is in RAM at host. */
94 for (; reg
< 32; reg
++) {
95 env
->gpr
[reg
] = (uint32_t)ldl_be_p(host
);
99 /* Slow path -- at least some of the operation requires i/o. */
100 for (; reg
< 32; reg
++) {
101 env
->gpr
[reg
] = cpu_ldl_mmuidx_ra(env
, addr
, mmu_idx
, raddr
);
102 addr
= addr_add(env
, addr
, 4);
107 void helper_stmw(CPUPPCState
*env
, target_ulong addr
, uint32_t reg
)
109 uintptr_t raddr
= GETPC();
110 int mmu_idx
= cpu_mmu_index(env
, false);
111 void *host
= probe_contiguous(env
, addr
, (32 - reg
) * 4,
112 MMU_DATA_STORE
, mmu_idx
, raddr
);
115 /* Fast path -- the entire operation is in RAM at host. */
116 for (; reg
< 32; reg
++) {
117 stl_be_p(host
, env
->gpr
[reg
]);
121 /* Slow path -- at least some of the operation requires i/o. */
122 for (; reg
< 32; reg
++) {
123 cpu_stl_mmuidx_ra(env
, addr
, env
->gpr
[reg
], mmu_idx
, raddr
);
124 addr
= addr_add(env
, addr
, 4);
129 static void do_lsw(CPUPPCState
*env
, target_ulong addr
, uint32_t nb
,
130 uint32_t reg
, uintptr_t raddr
)
136 if (unlikely(nb
== 0)) {
140 mmu_idx
= cpu_mmu_index(env
, false);
141 host
= probe_contiguous(env
, addr
, nb
, MMU_DATA_LOAD
, mmu_idx
, raddr
);
144 /* Fast path -- the entire operation is in RAM at host. */
145 for (; nb
> 3; nb
-= 4) {
146 env
->gpr
[reg
] = (uint32_t)ldl_be_p(host
);
147 reg
= (reg
+ 1) % 32;
154 val
= ldub_p(host
) << 24;
157 val
= lduw_be_p(host
) << 16;
160 val
= (lduw_be_p(host
) << 16) | (ldub_p(host
+ 2) << 8);
164 /* Slow path -- at least some of the operation requires i/o. */
165 for (; nb
> 3; nb
-= 4) {
166 env
->gpr
[reg
] = cpu_ldl_mmuidx_ra(env
, addr
, mmu_idx
, raddr
);
167 reg
= (reg
+ 1) % 32;
168 addr
= addr_add(env
, addr
, 4);
174 val
= cpu_ldub_mmuidx_ra(env
, addr
, mmu_idx
, raddr
) << 24;
177 val
= cpu_lduw_mmuidx_ra(env
, addr
, mmu_idx
, raddr
) << 16;
180 val
= cpu_lduw_mmuidx_ra(env
, addr
, mmu_idx
, raddr
) << 16;
181 addr
= addr_add(env
, addr
, 2);
182 val
|= cpu_ldub_mmuidx_ra(env
, addr
, mmu_idx
, raddr
) << 8;
189 void helper_lsw(CPUPPCState
*env
, target_ulong addr
,
190 uint32_t nb
, uint32_t reg
)
192 do_lsw(env
, addr
, nb
, reg
, GETPC());
196 * PPC32 specification says we must generate an exception if rA is in
197 * the range of registers to be loaded. In an other hand, IBM says
198 * this is valid, but rA won't be loaded. For now, I'll follow the
201 void helper_lswx(CPUPPCState
*env
, target_ulong addr
, uint32_t reg
,
202 uint32_t ra
, uint32_t rb
)
204 if (likely(xer_bc
!= 0)) {
205 int num_used_regs
= DIV_ROUND_UP(xer_bc
, 4);
206 if (unlikely((ra
!= 0 && lsw_reg_in_range(reg
, num_used_regs
, ra
)) ||
207 lsw_reg_in_range(reg
, num_used_regs
, rb
))) {
208 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
210 POWERPC_EXCP_INVAL_LSWX
, GETPC());
212 do_lsw(env
, addr
, xer_bc
, reg
, GETPC());
217 void helper_stsw(CPUPPCState
*env
, target_ulong addr
, uint32_t nb
,
220 uintptr_t raddr
= GETPC();
225 if (unlikely(nb
== 0)) {
229 mmu_idx
= cpu_mmu_index(env
, false);
230 host
= probe_contiguous(env
, addr
, nb
, MMU_DATA_STORE
, mmu_idx
, raddr
);
233 /* Fast path -- the entire operation is in RAM at host. */
234 for (; nb
> 3; nb
-= 4) {
235 stl_be_p(host
, env
->gpr
[reg
]);
236 reg
= (reg
+ 1) % 32;
242 stb_p(host
, val
>> 24);
245 stw_be_p(host
, val
>> 16);
248 stw_be_p(host
, val
>> 16);
249 stb_p(host
+ 2, val
>> 8);
253 for (; nb
> 3; nb
-= 4) {
254 cpu_stl_mmuidx_ra(env
, addr
, env
->gpr
[reg
], mmu_idx
, raddr
);
255 reg
= (reg
+ 1) % 32;
256 addr
= addr_add(env
, addr
, 4);
261 cpu_stb_mmuidx_ra(env
, addr
, val
>> 24, mmu_idx
, raddr
);
264 cpu_stw_mmuidx_ra(env
, addr
, val
>> 16, mmu_idx
, raddr
);
267 cpu_stw_mmuidx_ra(env
, addr
, val
>> 16, mmu_idx
, raddr
);
268 addr
= addr_add(env
, addr
, 2);
269 cpu_stb_mmuidx_ra(env
, addr
, val
>> 8, mmu_idx
, raddr
);
275 static void dcbz_common(CPUPPCState
*env
, target_ulong addr
,
276 uint32_t opcode
, bool epid
, uintptr_t retaddr
)
278 target_ulong mask
, dcbz_size
= env
->dcache_line_size
;
281 int mmu_idx
= epid
? PPC_TLB_EPID_STORE
: env
->dmmu_idx
;
283 #if defined(TARGET_PPC64)
284 /* Check for dcbz vs dcbzl on 970 */
285 if (env
->excp_model
== POWERPC_EXCP_970
&&
286 !(opcode
& 0x00200000) && ((env
->spr
[SPR_970_HID5
] >> 7) & 0x3) == 1) {
292 mask
= ~(dcbz_size
- 1);
295 /* Check reservation */
296 if ((env
->reserve_addr
& mask
) == addr
) {
297 env
->reserve_addr
= (target_ulong
)-1ULL;
300 /* Try fast path translate */
301 haddr
= probe_write(env
, addr
, dcbz_size
, mmu_idx
, retaddr
);
303 memset(haddr
, 0, dcbz_size
);
306 for (i
= 0; i
< dcbz_size
; i
+= 8) {
307 cpu_stq_mmuidx_ra(env
, addr
+ i
, 0, mmu_idx
, retaddr
);
312 void helper_dcbz(CPUPPCState
*env
, target_ulong addr
, uint32_t opcode
)
314 dcbz_common(env
, addr
, opcode
, false, GETPC());
317 void helper_dcbzep(CPUPPCState
*env
, target_ulong addr
, uint32_t opcode
)
319 dcbz_common(env
, addr
, opcode
, true, GETPC());
322 void helper_icbi(CPUPPCState
*env
, target_ulong addr
)
324 addr
&= ~(env
->dcache_line_size
- 1);
326 * Invalidate one cache line :
327 * PowerPC specification says this is to be treated like a load
328 * (not a fetch) by the MMU. To be sure it will be so,
329 * do the load "by hand".
331 cpu_ldl_data_ra(env
, addr
, GETPC());
334 void helper_icbiep(CPUPPCState
*env
, target_ulong addr
)
336 #if !defined(CONFIG_USER_ONLY)
337 /* See comments above */
338 addr
&= ~(env
->dcache_line_size
- 1);
339 cpu_ldl_mmuidx_ra(env
, addr
, PPC_TLB_EPID_LOAD
, GETPC());
343 /* XXX: to be tested */
344 target_ulong
helper_lscbx(CPUPPCState
*env
, target_ulong addr
, uint32_t reg
,
345 uint32_t ra
, uint32_t rb
)
350 for (i
= 0; i
< xer_bc
; i
++) {
351 c
= cpu_ldub_data_ra(env
, addr
, GETPC());
352 addr
= addr_add(env
, addr
, 1);
353 /* ra (if not 0) and rb are never modified */
354 if (likely(reg
!= rb
&& (ra
== 0 || reg
!= ra
))) {
355 env
->gpr
[reg
] = (env
->gpr
[reg
] & ~(0xFF << d
)) | (c
<< d
);
357 if (unlikely(c
== xer_cmp
)) {
360 if (likely(d
!= 0)) {
372 uint64_t helper_lq_le_parallel(CPUPPCState
*env
, target_ulong addr
,
377 /* We will have raised EXCP_ATOMIC from the translator. */
378 assert(HAVE_ATOMIC128
);
379 ret
= helper_atomic_ldo_le_mmu(env
, addr
, opidx
, GETPC());
380 env
->retxh
= int128_gethi(ret
);
381 return int128_getlo(ret
);
384 uint64_t helper_lq_be_parallel(CPUPPCState
*env
, target_ulong addr
,
389 /* We will have raised EXCP_ATOMIC from the translator. */
390 assert(HAVE_ATOMIC128
);
391 ret
= helper_atomic_ldo_be_mmu(env
, addr
, opidx
, GETPC());
392 env
->retxh
= int128_gethi(ret
);
393 return int128_getlo(ret
);
396 void helper_stq_le_parallel(CPUPPCState
*env
, target_ulong addr
,
397 uint64_t lo
, uint64_t hi
, uint32_t opidx
)
401 /* We will have raised EXCP_ATOMIC from the translator. */
402 assert(HAVE_ATOMIC128
);
403 val
= int128_make128(lo
, hi
);
404 helper_atomic_sto_le_mmu(env
, addr
, val
, opidx
, GETPC());
407 void helper_stq_be_parallel(CPUPPCState
*env
, target_ulong addr
,
408 uint64_t lo
, uint64_t hi
, uint32_t opidx
)
412 /* We will have raised EXCP_ATOMIC from the translator. */
413 assert(HAVE_ATOMIC128
);
414 val
= int128_make128(lo
, hi
);
415 helper_atomic_sto_be_mmu(env
, addr
, val
, opidx
, GETPC());
418 uint32_t helper_stqcx_le_parallel(CPUPPCState
*env
, target_ulong addr
,
419 uint64_t new_lo
, uint64_t new_hi
,
422 bool success
= false;
424 /* We will have raised EXCP_ATOMIC from the translator. */
425 assert(HAVE_CMPXCHG128
);
427 if (likely(addr
== env
->reserve_addr
)) {
428 Int128 oldv
, cmpv
, newv
;
430 cmpv
= int128_make128(env
->reserve_val2
, env
->reserve_val
);
431 newv
= int128_make128(new_lo
, new_hi
);
432 oldv
= helper_atomic_cmpxchgo_le_mmu(env
, addr
, cmpv
, newv
,
434 success
= int128_eq(oldv
, cmpv
);
436 env
->reserve_addr
= -1;
437 return env
->so
+ success
* CRF_EQ_BIT
;
440 uint32_t helper_stqcx_be_parallel(CPUPPCState
*env
, target_ulong addr
,
441 uint64_t new_lo
, uint64_t new_hi
,
444 bool success
= false;
446 /* We will have raised EXCP_ATOMIC from the translator. */
447 assert(HAVE_CMPXCHG128
);
449 if (likely(addr
== env
->reserve_addr
)) {
450 Int128 oldv
, cmpv
, newv
;
452 cmpv
= int128_make128(env
->reserve_val2
, env
->reserve_val
);
453 newv
= int128_make128(new_lo
, new_hi
);
454 oldv
= helper_atomic_cmpxchgo_be_mmu(env
, addr
, cmpv
, newv
,
456 success
= int128_eq(oldv
, cmpv
);
458 env
->reserve_addr
= -1;
459 return env
->so
+ success
* CRF_EQ_BIT
;
463 /*****************************************************************************/
464 /* Altivec extension helpers */
465 #if defined(HOST_WORDS_BIGENDIAN)
474 * We use msr_le to determine index ordering in a vector. However,
475 * byteswapping is not simply controlled by msr_le. We also need to
476 * take into account endianness of the target. This is done for the
477 * little-endian PPC64 user-mode target.
480 #define LVE(name, access, swap, element) \
481 void helper_##name(CPUPPCState *env, ppc_avr_t *r, \
484 size_t n_elems = ARRAY_SIZE(r->element); \
485 int adjust = HI_IDX * (n_elems - 1); \
486 int sh = sizeof(r->element[0]) >> 1; \
487 int index = (addr & 0xf) >> sh; \
489 index = n_elems - index - 1; \
492 if (needs_byteswap(env)) { \
493 r->element[LO_IDX ? index : (adjust - index)] = \
494 swap(access(env, addr, GETPC())); \
496 r->element[LO_IDX ? index : (adjust - index)] = \
497 access(env, addr, GETPC()); \
501 LVE(lvebx
, cpu_ldub_data_ra
, I
, u8
)
502 LVE(lvehx
, cpu_lduw_data_ra
, bswap16
, u16
)
503 LVE(lvewx
, cpu_ldl_data_ra
, bswap32
, u32
)
507 #define STVE(name, access, swap, element) \
508 void helper_##name(CPUPPCState *env, ppc_avr_t *r, \
511 size_t n_elems = ARRAY_SIZE(r->element); \
512 int adjust = HI_IDX * (n_elems - 1); \
513 int sh = sizeof(r->element[0]) >> 1; \
514 int index = (addr & 0xf) >> sh; \
516 index = n_elems - index - 1; \
519 if (needs_byteswap(env)) { \
520 access(env, addr, swap(r->element[LO_IDX ? index : \
521 (adjust - index)]), \
524 access(env, addr, r->element[LO_IDX ? index : \
525 (adjust - index)], GETPC()); \
529 STVE(stvebx
, cpu_stb_data_ra
, I
, u8
)
530 STVE(stvehx
, cpu_stw_data_ra
, bswap16
, u16
)
531 STVE(stvewx
, cpu_stl_data_ra
, bswap32
, u32
)
536 #define GET_NB(rb) ((rb >> 56) & 0xFF)
538 #define VSX_LXVL(name, lj) \
539 void helper_##name(CPUPPCState *env, target_ulong addr, \
540 ppc_vsr_t *xt, target_ulong rb) \
543 uint64_t nb = GET_NB(rb); \
546 t.s128 = int128_zero(); \
548 nb = (nb >= 16) ? 16 : nb; \
549 if (msr_le && !lj) { \
550 for (i = 16; i > 16 - nb; i--) { \
551 t.VsrB(i - 1) = cpu_ldub_data_ra(env, addr, GETPC()); \
552 addr = addr_add(env, addr, 1); \
555 for (i = 0; i < nb; i++) { \
556 t.VsrB(i) = cpu_ldub_data_ra(env, addr, GETPC()); \
557 addr = addr_add(env, addr, 1); \
568 #define VSX_STXVL(name, lj) \
569 void helper_##name(CPUPPCState *env, target_ulong addr, \
570 ppc_vsr_t *xt, target_ulong rb) \
572 target_ulong nb = GET_NB(rb); \
579 nb = (nb >= 16) ? 16 : nb; \
580 if (msr_le && !lj) { \
581 for (i = 16; i > 16 - nb; i--) { \
582 cpu_stb_data_ra(env, addr, xt->VsrB(i - 1), GETPC()); \
583 addr = addr_add(env, addr, 1); \
586 for (i = 0; i < nb; i++) { \
587 cpu_stb_data_ra(env, addr, xt->VsrB(i), GETPC()); \
588 addr = addr_add(env, addr, 1); \
597 #endif /* TARGET_PPC64 */
602 void helper_tbegin(CPUPPCState
*env
)
605 * As a degenerate implementation, always fail tbegin. The reason
606 * given is "Nesting overflow". The "persistent" bit is set,
607 * providing a hint to the error handler to not retry. The TFIAR
608 * captures the address of the failure, which is this tbegin
609 * instruction. Instruction execution will continue with the next
610 * instruction in memory, which is precisely what we want.
613 env
->spr
[SPR_TEXASR
] =
614 (1ULL << TEXASR_FAILURE_PERSISTENT
) |
615 (1ULL << TEXASR_NESTING_OVERFLOW
) |
616 (msr_hv
<< TEXASR_PRIVILEGE_HV
) |
617 (msr_pr
<< TEXASR_PRIVILEGE_PR
) |
618 (1ULL << TEXASR_FAILURE_SUMMARY
) |
619 (1ULL << TEXASR_TFIAR_EXACT
);
620 env
->spr
[SPR_TFIAR
] = env
->nip
| (msr_hv
<< 1) | msr_pr
;
621 env
->spr
[SPR_TFHAR
] = env
->nip
+ 4;
622 env
->crf
[0] = 0xB; /* 0b1010 = transaction failure */