2 * PowerPC memory access emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/exec-all.h"
23 #include "qemu/host-utils.h"
24 #include "exec/helper-proto.h"
25 #include "helper_regs.h"
26 #include "exec/cpu_ldst.h"
28 #include "qemu/atomic128.h"
30 /* #define DEBUG_OP */
32 static inline bool needs_byteswap(const CPUPPCState
*env
)
35 return FIELD_EX64(env
->msr
, MSR
, LE
);
37 return !FIELD_EX64(env
->msr
, MSR
, LE
);
41 /*****************************************************************************/
42 /* Memory load and stores */
44 static inline target_ulong
addr_add(CPUPPCState
*env
, target_ulong addr
,
47 #if defined(TARGET_PPC64)
48 if (!msr_is_64bit(env
, env
->msr
)) {
49 return (uint32_t)(addr
+ arg
);
57 static void *probe_contiguous(CPUPPCState
*env
, target_ulong addr
, uint32_t nb
,
58 MMUAccessType access_type
, int mmu_idx
,
62 uint32_t nb_pg1
, nb_pg2
;
64 nb_pg1
= -(addr
| TARGET_PAGE_MASK
);
65 if (likely(nb
<= nb_pg1
)) {
66 /* The entire operation is on a single page. */
67 return probe_access(env
, addr
, nb
, access_type
, mmu_idx
, raddr
);
70 /* The operation spans two pages. */
72 host1
= probe_access(env
, addr
, nb_pg1
, access_type
, mmu_idx
, raddr
);
73 addr
= addr_add(env
, addr
, nb_pg1
);
74 host2
= probe_access(env
, addr
, nb_pg2
, access_type
, mmu_idx
, raddr
);
76 /* If the two host pages are contiguous, optimize. */
77 if (host2
== host1
+ nb_pg1
) {
83 void helper_lmw(CPUPPCState
*env
, target_ulong addr
, uint32_t reg
)
85 uintptr_t raddr
= GETPC();
86 int mmu_idx
= ppc_env_mmu_index(env
, false);
87 void *host
= probe_contiguous(env
, addr
, (32 - reg
) * 4,
88 MMU_DATA_LOAD
, mmu_idx
, raddr
);
91 /* Fast path -- the entire operation is in RAM at host. */
92 for (; reg
< 32; reg
++) {
93 env
->gpr
[reg
] = (uint32_t)ldl_be_p(host
);
97 /* Slow path -- at least some of the operation requires i/o. */
98 for (; reg
< 32; reg
++) {
99 env
->gpr
[reg
] = cpu_ldl_mmuidx_ra(env
, addr
, mmu_idx
, raddr
);
100 addr
= addr_add(env
, addr
, 4);
105 void helper_stmw(CPUPPCState
*env
, target_ulong addr
, uint32_t reg
)
107 uintptr_t raddr
= GETPC();
108 int mmu_idx
= ppc_env_mmu_index(env
, false);
109 void *host
= probe_contiguous(env
, addr
, (32 - reg
) * 4,
110 MMU_DATA_STORE
, mmu_idx
, raddr
);
113 /* Fast path -- the entire operation is in RAM at host. */
114 for (; reg
< 32; reg
++) {
115 stl_be_p(host
, env
->gpr
[reg
]);
119 /* Slow path -- at least some of the operation requires i/o. */
120 for (; reg
< 32; reg
++) {
121 cpu_stl_mmuidx_ra(env
, addr
, env
->gpr
[reg
], mmu_idx
, raddr
);
122 addr
= addr_add(env
, addr
, 4);
127 static void do_lsw(CPUPPCState
*env
, target_ulong addr
, uint32_t nb
,
128 uint32_t reg
, uintptr_t raddr
)
134 if (unlikely(nb
== 0)) {
138 mmu_idx
= ppc_env_mmu_index(env
, false);
139 host
= probe_contiguous(env
, addr
, nb
, MMU_DATA_LOAD
, mmu_idx
, raddr
);
142 /* Fast path -- the entire operation is in RAM at host. */
143 for (; nb
> 3; nb
-= 4) {
144 env
->gpr
[reg
] = (uint32_t)ldl_be_p(host
);
145 reg
= (reg
+ 1) % 32;
152 val
= ldub_p(host
) << 24;
155 val
= lduw_be_p(host
) << 16;
158 val
= (lduw_be_p(host
) << 16) | (ldub_p(host
+ 2) << 8);
162 /* Slow path -- at least some of the operation requires i/o. */
163 for (; nb
> 3; nb
-= 4) {
164 env
->gpr
[reg
] = cpu_ldl_mmuidx_ra(env
, addr
, mmu_idx
, raddr
);
165 reg
= (reg
+ 1) % 32;
166 addr
= addr_add(env
, addr
, 4);
172 val
= cpu_ldub_mmuidx_ra(env
, addr
, mmu_idx
, raddr
) << 24;
175 val
= cpu_lduw_mmuidx_ra(env
, addr
, mmu_idx
, raddr
) << 16;
178 val
= cpu_lduw_mmuidx_ra(env
, addr
, mmu_idx
, raddr
) << 16;
179 addr
= addr_add(env
, addr
, 2);
180 val
|= cpu_ldub_mmuidx_ra(env
, addr
, mmu_idx
, raddr
) << 8;
187 void helper_lsw(CPUPPCState
*env
, target_ulong addr
,
188 uint32_t nb
, uint32_t reg
)
190 do_lsw(env
, addr
, nb
, reg
, GETPC());
194 * PPC32 specification says we must generate an exception if rA is in
195 * the range of registers to be loaded. In an other hand, IBM says
196 * this is valid, but rA won't be loaded. For now, I'll follow the
199 void helper_lswx(CPUPPCState
*env
, target_ulong addr
, uint32_t reg
,
200 uint32_t ra
, uint32_t rb
)
202 if (likely(xer_bc
!= 0)) {
203 int num_used_regs
= DIV_ROUND_UP(xer_bc
, 4);
204 if (unlikely((ra
!= 0 && lsw_reg_in_range(reg
, num_used_regs
, ra
)) ||
205 lsw_reg_in_range(reg
, num_used_regs
, rb
))) {
206 raise_exception_err_ra(env
, POWERPC_EXCP_PROGRAM
,
208 POWERPC_EXCP_INVAL_LSWX
, GETPC());
210 do_lsw(env
, addr
, xer_bc
, reg
, GETPC());
215 void helper_stsw(CPUPPCState
*env
, target_ulong addr
, uint32_t nb
,
218 uintptr_t raddr
= GETPC();
223 if (unlikely(nb
== 0)) {
227 mmu_idx
= ppc_env_mmu_index(env
, false);
228 host
= probe_contiguous(env
, addr
, nb
, MMU_DATA_STORE
, mmu_idx
, raddr
);
231 /* Fast path -- the entire operation is in RAM at host. */
232 for (; nb
> 3; nb
-= 4) {
233 stl_be_p(host
, env
->gpr
[reg
]);
234 reg
= (reg
+ 1) % 32;
240 stb_p(host
, val
>> 24);
243 stw_be_p(host
, val
>> 16);
246 stw_be_p(host
, val
>> 16);
247 stb_p(host
+ 2, val
>> 8);
251 for (; nb
> 3; nb
-= 4) {
252 cpu_stl_mmuidx_ra(env
, addr
, env
->gpr
[reg
], mmu_idx
, raddr
);
253 reg
= (reg
+ 1) % 32;
254 addr
= addr_add(env
, addr
, 4);
259 cpu_stb_mmuidx_ra(env
, addr
, val
>> 24, mmu_idx
, raddr
);
262 cpu_stw_mmuidx_ra(env
, addr
, val
>> 16, mmu_idx
, raddr
);
265 cpu_stw_mmuidx_ra(env
, addr
, val
>> 16, mmu_idx
, raddr
);
266 addr
= addr_add(env
, addr
, 2);
267 cpu_stb_mmuidx_ra(env
, addr
, val
>> 8, mmu_idx
, raddr
);
273 static void dcbz_common(CPUPPCState
*env
, target_ulong addr
,
274 uint32_t opcode
, bool epid
, uintptr_t retaddr
)
276 target_ulong mask
, dcbz_size
= env
->dcache_line_size
;
279 int mmu_idx
= epid
? PPC_TLB_EPID_STORE
: ppc_env_mmu_index(env
, false);
281 #if defined(TARGET_PPC64)
282 /* Check for dcbz vs dcbzl on 970 */
283 if (env
->excp_model
== POWERPC_EXCP_970
&&
284 !(opcode
& 0x00200000) && ((env
->spr
[SPR_970_HID5
] >> 7) & 0x3) == 1) {
290 mask
= ~(dcbz_size
- 1);
293 /* Check reservation */
294 if ((env
->reserve_addr
& mask
) == addr
) {
295 env
->reserve_addr
= (target_ulong
)-1ULL;
298 /* Try fast path translate */
299 haddr
= probe_write(env
, addr
, dcbz_size
, mmu_idx
, retaddr
);
301 memset(haddr
, 0, dcbz_size
);
304 for (i
= 0; i
< dcbz_size
; i
+= 8) {
305 cpu_stq_mmuidx_ra(env
, addr
+ i
, 0, mmu_idx
, retaddr
);
310 void helper_dcbz(CPUPPCState
*env
, target_ulong addr
, uint32_t opcode
)
312 dcbz_common(env
, addr
, opcode
, false, GETPC());
315 void helper_dcbzep(CPUPPCState
*env
, target_ulong addr
, uint32_t opcode
)
317 dcbz_common(env
, addr
, opcode
, true, GETPC());
320 void helper_icbi(CPUPPCState
*env
, target_ulong addr
)
322 addr
&= ~(env
->dcache_line_size
- 1);
324 * Invalidate one cache line :
325 * PowerPC specification says this is to be treated like a load
326 * (not a fetch) by the MMU. To be sure it will be so,
327 * do the load "by hand".
329 cpu_ldl_data_ra(env
, addr
, GETPC());
332 void helper_icbiep(CPUPPCState
*env
, target_ulong addr
)
334 #if !defined(CONFIG_USER_ONLY)
335 /* See comments above */
336 addr
&= ~(env
->dcache_line_size
- 1);
337 cpu_ldl_mmuidx_ra(env
, addr
, PPC_TLB_EPID_LOAD
, GETPC());
341 /* XXX: to be tested */
342 target_ulong
helper_lscbx(CPUPPCState
*env
, target_ulong addr
, uint32_t reg
,
343 uint32_t ra
, uint32_t rb
)
348 for (i
= 0; i
< xer_bc
; i
++) {
349 c
= cpu_ldub_data_ra(env
, addr
, GETPC());
350 addr
= addr_add(env
, addr
, 1);
351 /* ra (if not 0) and rb are never modified */
352 if (likely(reg
!= rb
&& (ra
== 0 || reg
!= ra
))) {
353 env
->gpr
[reg
] = (env
->gpr
[reg
] & ~(0xFF << d
)) | (c
<< d
);
355 if (unlikely(c
== xer_cmp
)) {
358 if (likely(d
!= 0)) {
369 /*****************************************************************************/
370 /* Altivec extension helpers */
380 * We use MSR_LE to determine index ordering in a vector. However,
381 * byteswapping is not simply controlled by MSR_LE. We also need to
382 * take into account endianness of the target. This is done for the
383 * little-endian PPC64 user-mode target.
386 #define LVE(name, access, swap, element) \
387 void helper_##name(CPUPPCState *env, ppc_avr_t *r, \
390 size_t n_elems = ARRAY_SIZE(r->element); \
391 int adjust = HI_IDX * (n_elems - 1); \
392 int sh = sizeof(r->element[0]) >> 1; \
393 int index = (addr & 0xf) >> sh; \
394 if (FIELD_EX64(env->msr, MSR, LE)) { \
395 index = n_elems - index - 1; \
398 if (needs_byteswap(env)) { \
399 r->element[LO_IDX ? index : (adjust - index)] = \
400 swap(access(env, addr, GETPC())); \
402 r->element[LO_IDX ? index : (adjust - index)] = \
403 access(env, addr, GETPC()); \
407 LVE(lvebx
, cpu_ldub_data_ra
, I
, u8
)
408 LVE(lvehx
, cpu_lduw_data_ra
, bswap16
, u16
)
409 LVE(lvewx
, cpu_ldl_data_ra
, bswap32
, u32
)
413 #define STVE(name, access, swap, element) \
414 void helper_##name(CPUPPCState *env, ppc_avr_t *r, \
417 size_t n_elems = ARRAY_SIZE(r->element); \
418 int adjust = HI_IDX * (n_elems - 1); \
419 int sh = sizeof(r->element[0]) >> 1; \
420 int index = (addr & 0xf) >> sh; \
421 if (FIELD_EX64(env->msr, MSR, LE)) { \
422 index = n_elems - index - 1; \
425 if (needs_byteswap(env)) { \
426 access(env, addr, swap(r->element[LO_IDX ? index : \
427 (adjust - index)]), \
430 access(env, addr, r->element[LO_IDX ? index : \
431 (adjust - index)], GETPC()); \
435 STVE(stvebx
, cpu_stb_data_ra
, I
, u8
)
436 STVE(stvehx
, cpu_stw_data_ra
, bswap16
, u16
)
437 STVE(stvewx
, cpu_stl_data_ra
, bswap32
, u32
)
442 #define GET_NB(rb) ((rb >> 56) & 0xFF)
444 #define VSX_LXVL(name, lj) \
445 void helper_##name(CPUPPCState *env, target_ulong addr, \
446 ppc_vsr_t *xt, target_ulong rb) \
449 uint64_t nb = GET_NB(rb); \
452 t.s128 = int128_zero(); \
454 nb = (nb >= 16) ? 16 : nb; \
455 if (FIELD_EX64(env->msr, MSR, LE) && !lj) { \
456 for (i = 16; i > 16 - nb; i--) { \
457 t.VsrB(i - 1) = cpu_ldub_data_ra(env, addr, GETPC()); \
458 addr = addr_add(env, addr, 1); \
461 for (i = 0; i < nb; i++) { \
462 t.VsrB(i) = cpu_ldub_data_ra(env, addr, GETPC()); \
463 addr = addr_add(env, addr, 1); \
474 #define VSX_STXVL(name, lj) \
475 void helper_##name(CPUPPCState *env, target_ulong addr, \
476 ppc_vsr_t *xt, target_ulong rb) \
478 target_ulong nb = GET_NB(rb); \
485 nb = (nb >= 16) ? 16 : nb; \
486 if (FIELD_EX64(env->msr, MSR, LE) && !lj) { \
487 for (i = 16; i > 16 - nb; i--) { \
488 cpu_stb_data_ra(env, addr, xt->VsrB(i - 1), GETPC()); \
489 addr = addr_add(env, addr, 1); \
492 for (i = 0; i < nb; i++) { \
493 cpu_stb_data_ra(env, addr, xt->VsrB(i), GETPC()); \
494 addr = addr_add(env, addr, 1); \
503 #endif /* TARGET_PPC64 */
508 void helper_tbegin(CPUPPCState
*env
)
511 * As a degenerate implementation, always fail tbegin. The reason
512 * given is "Nesting overflow". The "persistent" bit is set,
513 * providing a hint to the error handler to not retry. The TFIAR
514 * captures the address of the failure, which is this tbegin
515 * instruction. Instruction execution will continue with the next
516 * instruction in memory, which is precisely what we want.
519 env
->spr
[SPR_TEXASR
] =
520 (1ULL << TEXASR_FAILURE_PERSISTENT
) |
521 (1ULL << TEXASR_NESTING_OVERFLOW
) |
522 (FIELD_EX64_HV(env
->msr
) << TEXASR_PRIVILEGE_HV
) |
523 (FIELD_EX64(env
->msr
, MSR
, PR
) << TEXASR_PRIVILEGE_PR
) |
524 (1ULL << TEXASR_FAILURE_SUMMARY
) |
525 (1ULL << TEXASR_TFIAR_EXACT
);
526 env
->spr
[SPR_TFIAR
] = env
->nip
| (FIELD_EX64_HV(env
->msr
) << 1) |
527 FIELD_EX64(env
->msr
, MSR
, PR
);
528 env
->spr
[SPR_TFHAR
] = env
->nip
+ 4;
529 env
->crf
[0] = 0xB; /* 0b1010 = transaction failure */