2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "s390x-internal.h"
25 #include "tcg_s390x.h"
26 #include "exec/helper-proto.h"
27 #include "exec/exec-all.h"
28 #include "exec/cpu_ldst.h"
29 #include "qemu/int128.h"
30 #include "qemu/atomic128.h"
33 #if !defined(CONFIG_USER_ONLY)
34 #include "hw/s390x/storage-keys.h"
35 #include "hw/boards.h"
38 /*****************************************************************************/
41 /* #define DEBUG_HELPER */
43 #define HELPER_LOG(x...) qemu_log(x)
45 #define HELPER_LOG(x...)
48 static inline bool psw_key_valid(CPUS390XState
*env
, uint8_t psw_key
)
50 uint16_t pkm
= env
->cregs
[3] >> 16;
52 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
53 /* PSW key has range 0..15, it is valid if the bit is 1 in the PKM */
54 return pkm
& (0x80 >> psw_key
);
59 static bool is_destructive_overlap(CPUS390XState
*env
, uint64_t dest
,
60 uint64_t src
, uint32_t len
)
62 if (!len
|| src
== dest
) {
65 /* Take care of wrapping at the end of address space. */
66 if (unlikely(wrap_address(env
, src
+ len
- 1) < src
)) {
67 return dest
> src
|| dest
<= wrap_address(env
, src
+ len
- 1);
69 return dest
> src
&& dest
<= src
+ len
- 1;
72 /* Trigger a SPECIFICATION exception if an address or a length is not
74 static inline void check_alignment(CPUS390XState
*env
, uint64_t v
,
75 int wordsize
, uintptr_t ra
)
78 tcg_s390_program_interrupt(env
, PGM_SPECIFICATION
, ra
);
82 /* Load a value from memory according to its size. */
83 static inline uint64_t cpu_ldusize_data_ra(CPUS390XState
*env
, uint64_t addr
,
84 int wordsize
, uintptr_t ra
)
88 return cpu_ldub_data_ra(env
, addr
, ra
);
90 return cpu_lduw_data_ra(env
, addr
, ra
);
96 /* Store a to memory according to its size. */
97 static inline void cpu_stsize_data_ra(CPUS390XState
*env
, uint64_t addr
,
98 uint64_t value
, int wordsize
,
103 cpu_stb_data_ra(env
, addr
, value
, ra
);
106 cpu_stw_data_ra(env
, addr
, value
, ra
);
113 /* An access covers at most 4096 bytes and therefore at most two pages. */
114 typedef struct S390Access
{
122 * If we can't access the host page directly, we'll have to do I/O access
123 * via ld/st helpers. These are internal details, so we store the
124 * mmu idx to do the access here instead of passing it around in the
125 * helpers. Maybe, one day we can get rid of ld/st access - once we can
126 * handle TLB_NOTDIRTY differently. We don't expect these special accesses
127 * to trigger exceptions - only if we would have TLB_NOTDIRTY on LAP
128 * pages, we might trigger a new MMU translation - very unlikely that
129 * the mapping changes in between and we would trigger a fault.
135 * With nonfault=1, return the PGM_ exception that would have been injected
136 * into the guest; return 0 if no exception was detected.
138 * For !CONFIG_USER_ONLY, the TEC is stored stored to env->tlb_fill_tec.
139 * For CONFIG_USER_ONLY, the faulting address is stored to env->__excp_addr.
141 static int s390_probe_access(CPUArchState
*env
, target_ulong addr
, int size
,
142 MMUAccessType access_type
, int mmu_idx
,
143 bool nonfault
, void **phost
, uintptr_t ra
)
145 #if defined(CONFIG_USER_ONLY)
146 return probe_access_flags(env
, addr
, access_type
, mmu_idx
,
147 nonfault
, phost
, ra
);
152 * For !CONFIG_USER_ONLY, we cannot rely on TLB_INVALID_MASK or haddr==NULL
153 * to detect if there was an exception during tlb_fill().
155 env
->tlb_fill_exc
= 0;
156 flags
= probe_access_flags(env
, addr
, access_type
, mmu_idx
, nonfault
, phost
,
158 if (env
->tlb_fill_exc
) {
159 return env
->tlb_fill_exc
;
162 if (unlikely(flags
& TLB_WATCHPOINT
)) {
163 /* S390 does not presently use transaction attributes. */
164 cpu_check_watchpoint(env_cpu(env
), addr
, size
,
165 MEMTXATTRS_UNSPECIFIED
,
166 (access_type
== MMU_DATA_STORE
167 ? BP_MEM_WRITE
: BP_MEM_READ
), ra
);
173 static int access_prepare_nf(S390Access
*access
, CPUS390XState
*env
,
174 bool nonfault
, vaddr vaddr1
, int size
,
175 MMUAccessType access_type
,
176 int mmu_idx
, uintptr_t ra
)
178 void *haddr1
, *haddr2
= NULL
;
179 int size1
, size2
, exc
;
182 assert(size
> 0 && size
<= 4096);
184 size1
= MIN(size
, -(vaddr1
| TARGET_PAGE_MASK
)),
185 size2
= size
- size1
;
187 exc
= s390_probe_access(env
, vaddr1
, size1
, access_type
, mmu_idx
, nonfault
,
192 if (unlikely(size2
)) {
193 /* The access crosses page boundaries. */
194 vaddr2
= wrap_address(env
, vaddr1
+ size1
);
195 exc
= s390_probe_access(env
, vaddr2
, size2
, access_type
, mmu_idx
,
196 nonfault
, &haddr2
, ra
);
202 *access
= (S390Access
) {
214 static S390Access
access_prepare(CPUS390XState
*env
, vaddr vaddr
, int size
,
215 MMUAccessType access_type
, int mmu_idx
,
219 int exc
= access_prepare_nf(&ret
, env
, false, vaddr
, size
,
220 access_type
, mmu_idx
, ra
);
225 /* Helper to handle memset on a single page. */
226 static void do_access_memset(CPUS390XState
*env
, vaddr vaddr
, char *haddr
,
227 uint8_t byte
, uint16_t size
, int mmu_idx
,
230 #ifdef CONFIG_USER_ONLY
232 memset(haddr
, byte
, size
);
234 MemOpIdx oi
= make_memop_idx(MO_UB
, mmu_idx
);
238 memset(haddr
, byte
, size
);
241 * Do a single access and test if we can then get access to the
242 * page. This is especially relevant to speed up TLB_NOTDIRTY.
245 cpu_stb_mmu(env
, vaddr
, byte
, oi
, ra
);
246 haddr
= tlb_vaddr_to_host(env
, vaddr
, MMU_DATA_STORE
, mmu_idx
);
248 memset(haddr
+ 1, byte
, size
- 1);
250 for (i
= 1; i
< size
; i
++) {
251 cpu_stb_mmu(env
, vaddr
+ i
, byte
, oi
, ra
);
258 static void access_memset(CPUS390XState
*env
, S390Access
*desta
,
259 uint8_t byte
, uintptr_t ra
)
262 do_access_memset(env
, desta
->vaddr1
, desta
->haddr1
, byte
, desta
->size1
,
264 if (likely(!desta
->size2
)) {
267 do_access_memset(env
, desta
->vaddr2
, desta
->haddr2
, byte
, desta
->size2
,
271 static uint8_t do_access_get_byte(CPUS390XState
*env
, vaddr vaddr
, char **haddr
,
272 int offset
, int mmu_idx
, uintptr_t ra
)
274 #ifdef CONFIG_USER_ONLY
275 return ldub_p(*haddr
+ offset
);
277 MemOpIdx oi
= make_memop_idx(MO_UB
, mmu_idx
);
280 if (likely(*haddr
)) {
281 return ldub_p(*haddr
+ offset
);
284 * Do a single access and test if we can then get access to the
285 * page. This is especially relevant to speed up TLB_NOTDIRTY.
287 byte
= cpu_ldb_mmu(env
, vaddr
+ offset
, oi
, ra
);
288 *haddr
= tlb_vaddr_to_host(env
, vaddr
, MMU_DATA_LOAD
, mmu_idx
);
293 static uint8_t access_get_byte(CPUS390XState
*env
, S390Access
*access
,
294 int offset
, uintptr_t ra
)
296 if (offset
< access
->size1
) {
297 return do_access_get_byte(env
, access
->vaddr1
, &access
->haddr1
,
298 offset
, access
->mmu_idx
, ra
);
300 return do_access_get_byte(env
, access
->vaddr2
, &access
->haddr2
,
301 offset
- access
->size1
, access
->mmu_idx
, ra
);
304 static void do_access_set_byte(CPUS390XState
*env
, vaddr vaddr
, char **haddr
,
305 int offset
, uint8_t byte
, int mmu_idx
,
308 #ifdef CONFIG_USER_ONLY
309 stb_p(*haddr
+ offset
, byte
);
311 MemOpIdx oi
= make_memop_idx(MO_UB
, mmu_idx
);
313 if (likely(*haddr
)) {
314 stb_p(*haddr
+ offset
, byte
);
318 * Do a single access and test if we can then get access to the
319 * page. This is especially relevant to speed up TLB_NOTDIRTY.
321 cpu_stb_mmu(env
, vaddr
+ offset
, byte
, oi
, ra
);
322 *haddr
= tlb_vaddr_to_host(env
, vaddr
, MMU_DATA_STORE
, mmu_idx
);
326 static void access_set_byte(CPUS390XState
*env
, S390Access
*access
,
327 int offset
, uint8_t byte
, uintptr_t ra
)
329 if (offset
< access
->size1
) {
330 do_access_set_byte(env
, access
->vaddr1
, &access
->haddr1
, offset
, byte
,
331 access
->mmu_idx
, ra
);
333 do_access_set_byte(env
, access
->vaddr2
, &access
->haddr2
,
334 offset
- access
->size1
, byte
, access
->mmu_idx
, ra
);
339 * Move data with the same semantics as memmove() in case ranges don't overlap
340 * or src > dest. Undefined behavior on destructive overlaps.
342 static void access_memmove(CPUS390XState
*env
, S390Access
*desta
,
343 S390Access
*srca
, uintptr_t ra
)
347 g_assert(desta
->size1
+ desta
->size2
== srca
->size1
+ srca
->size2
);
349 /* Fallback to slow access in case we don't have access to all host pages */
350 if (unlikely(!desta
->haddr1
|| (desta
->size2
&& !desta
->haddr2
) ||
351 !srca
->haddr1
|| (srca
->size2
&& !srca
->haddr2
))) {
354 for (i
= 0; i
< desta
->size1
+ desta
->size2
; i
++) {
355 uint8_t byte
= access_get_byte(env
, srca
, i
, ra
);
357 access_set_byte(env
, desta
, i
, byte
, ra
);
362 if (srca
->size1
== desta
->size1
) {
363 memmove(desta
->haddr1
, srca
->haddr1
, srca
->size1
);
364 if (unlikely(srca
->size2
)) {
365 memmove(desta
->haddr2
, srca
->haddr2
, srca
->size2
);
367 } else if (srca
->size1
< desta
->size1
) {
368 diff
= desta
->size1
- srca
->size1
;
369 memmove(desta
->haddr1
, srca
->haddr1
, srca
->size1
);
370 memmove(desta
->haddr1
+ srca
->size1
, srca
->haddr2
, diff
);
371 if (likely(desta
->size2
)) {
372 memmove(desta
->haddr2
, srca
->haddr2
+ diff
, desta
->size2
);
375 diff
= srca
->size1
- desta
->size1
;
376 memmove(desta
->haddr1
, srca
->haddr1
, desta
->size1
);
377 memmove(desta
->haddr2
, srca
->haddr1
+ desta
->size1
, diff
);
378 if (likely(srca
->size2
)) {
379 memmove(desta
->haddr2
+ diff
, srca
->haddr2
, srca
->size2
);
384 static int mmu_idx_from_as(uint8_t as
)
388 return MMU_PRIMARY_IDX
;
390 return MMU_SECONDARY_IDX
;
394 /* FIXME AS_ACCREG */
395 g_assert_not_reached();
400 static uint32_t do_helper_nc(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
401 uint64_t src
, uintptr_t ra
)
403 const int mmu_idx
= cpu_mmu_index(env
, false);
404 S390Access srca1
, srca2
, desta
;
408 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
409 __func__
, l
, dest
, src
);
411 /* NC always processes one more byte than specified - maximum is 256 */
414 srca1
= access_prepare(env
, src
, l
, MMU_DATA_LOAD
, mmu_idx
, ra
);
415 srca2
= access_prepare(env
, dest
, l
, MMU_DATA_LOAD
, mmu_idx
, ra
);
416 desta
= access_prepare(env
, dest
, l
, MMU_DATA_STORE
, mmu_idx
, ra
);
417 for (i
= 0; i
< l
; i
++) {
418 const uint8_t x
= access_get_byte(env
, &srca1
, i
, ra
) &
419 access_get_byte(env
, &srca2
, i
, ra
);
422 access_set_byte(env
, &desta
, i
, x
, ra
);
427 uint32_t HELPER(nc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
430 return do_helper_nc(env
, l
, dest
, src
, GETPC());
434 static uint32_t do_helper_xc(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
435 uint64_t src
, uintptr_t ra
)
437 const int mmu_idx
= cpu_mmu_index(env
, false);
438 S390Access srca1
, srca2
, desta
;
442 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
443 __func__
, l
, dest
, src
);
445 /* XC always processes one more byte than specified - maximum is 256 */
448 srca1
= access_prepare(env
, src
, l
, MMU_DATA_LOAD
, mmu_idx
, ra
);
449 srca2
= access_prepare(env
, dest
, l
, MMU_DATA_LOAD
, mmu_idx
, ra
);
450 desta
= access_prepare(env
, dest
, l
, MMU_DATA_STORE
, mmu_idx
, ra
);
452 /* xor with itself is the same as memset(0) */
454 access_memset(env
, &desta
, 0, ra
);
458 for (i
= 0; i
< l
; i
++) {
459 const uint8_t x
= access_get_byte(env
, &srca1
, i
, ra
) ^
460 access_get_byte(env
, &srca2
, i
, ra
);
463 access_set_byte(env
, &desta
, i
, x
, ra
);
468 uint32_t HELPER(xc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
471 return do_helper_xc(env
, l
, dest
, src
, GETPC());
475 static uint32_t do_helper_oc(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
476 uint64_t src
, uintptr_t ra
)
478 const int mmu_idx
= cpu_mmu_index(env
, false);
479 S390Access srca1
, srca2
, desta
;
483 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
484 __func__
, l
, dest
, src
);
486 /* OC always processes one more byte than specified - maximum is 256 */
489 srca1
= access_prepare(env
, src
, l
, MMU_DATA_LOAD
, mmu_idx
, ra
);
490 srca2
= access_prepare(env
, dest
, l
, MMU_DATA_LOAD
, mmu_idx
, ra
);
491 desta
= access_prepare(env
, dest
, l
, MMU_DATA_STORE
, mmu_idx
, ra
);
492 for (i
= 0; i
< l
; i
++) {
493 const uint8_t x
= access_get_byte(env
, &srca1
, i
, ra
) |
494 access_get_byte(env
, &srca2
, i
, ra
);
497 access_set_byte(env
, &desta
, i
, x
, ra
);
502 uint32_t HELPER(oc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
505 return do_helper_oc(env
, l
, dest
, src
, GETPC());
509 static uint32_t do_helper_mvc(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
510 uint64_t src
, uintptr_t ra
)
512 const int mmu_idx
= cpu_mmu_index(env
, false);
513 S390Access srca
, desta
;
516 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
517 __func__
, l
, dest
, src
);
519 /* MVC always copies one more byte than specified - maximum is 256 */
522 srca
= access_prepare(env
, src
, l
, MMU_DATA_LOAD
, mmu_idx
, ra
);
523 desta
= access_prepare(env
, dest
, l
, MMU_DATA_STORE
, mmu_idx
, ra
);
526 * "When the operands overlap, the result is obtained as if the operands
527 * were processed one byte at a time". Only non-destructive overlaps
528 * behave like memmove().
530 if (dest
== src
+ 1) {
531 access_memset(env
, &desta
, access_get_byte(env
, &srca
, 0, ra
), ra
);
532 } else if (!is_destructive_overlap(env
, dest
, src
, l
)) {
533 access_memmove(env
, &desta
, &srca
, ra
);
535 for (i
= 0; i
< l
; i
++) {
536 uint8_t byte
= access_get_byte(env
, &srca
, i
, ra
);
538 access_set_byte(env
, &desta
, i
, byte
, ra
);
545 void HELPER(mvc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
547 do_helper_mvc(env
, l
, dest
, src
, GETPC());
551 void HELPER(mvcin
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
553 const int mmu_idx
= cpu_mmu_index(env
, false);
554 S390Access srca
, desta
;
555 uintptr_t ra
= GETPC();
558 /* MVCIN always copies one more byte than specified - maximum is 256 */
561 src
= wrap_address(env
, src
- l
+ 1);
562 srca
= access_prepare(env
, src
, l
, MMU_DATA_LOAD
, mmu_idx
, ra
);
563 desta
= access_prepare(env
, dest
, l
, MMU_DATA_STORE
, mmu_idx
, ra
);
564 for (i
= 0; i
< l
; i
++) {
565 const uint8_t x
= access_get_byte(env
, &srca
, l
- i
- 1, ra
);
567 access_set_byte(env
, &desta
, i
, x
, ra
);
572 void HELPER(mvn
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
574 const int mmu_idx
= cpu_mmu_index(env
, false);
575 S390Access srca1
, srca2
, desta
;
576 uintptr_t ra
= GETPC();
579 /* MVN always copies one more byte than specified - maximum is 256 */
582 srca1
= access_prepare(env
, src
, l
, MMU_DATA_LOAD
, mmu_idx
, ra
);
583 srca2
= access_prepare(env
, dest
, l
, MMU_DATA_LOAD
, mmu_idx
, ra
);
584 desta
= access_prepare(env
, dest
, l
, MMU_DATA_STORE
, mmu_idx
, ra
);
585 for (i
= 0; i
< l
; i
++) {
586 const uint8_t x
= (access_get_byte(env
, &srca1
, i
, ra
) & 0x0f) |
587 (access_get_byte(env
, &srca2
, i
, ra
) & 0xf0);
589 access_set_byte(env
, &desta
, i
, x
, ra
);
593 /* move with offset */
594 void HELPER(mvo
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
596 const int mmu_idx
= cpu_mmu_index(env
, false);
597 /* MVO always processes one more byte than specified - maximum is 16 */
598 const int len_dest
= (l
>> 4) + 1;
599 const int len_src
= (l
& 0xf) + 1;
600 uintptr_t ra
= GETPC();
601 uint8_t byte_dest
, byte_src
;
602 S390Access srca
, desta
;
605 srca
= access_prepare(env
, src
, len_src
, MMU_DATA_LOAD
, mmu_idx
, ra
);
606 desta
= access_prepare(env
, dest
, len_dest
, MMU_DATA_STORE
, mmu_idx
, ra
);
608 /* Handle rightmost byte */
609 byte_dest
= cpu_ldub_data_ra(env
, dest
+ len_dest
- 1, ra
);
610 byte_src
= access_get_byte(env
, &srca
, len_src
- 1, ra
);
611 byte_dest
= (byte_dest
& 0x0f) | (byte_src
<< 4);
612 access_set_byte(env
, &desta
, len_dest
- 1, byte_dest
, ra
);
614 /* Process remaining bytes from right to left */
615 for (i
= len_dest
- 2, j
= len_src
- 2; i
>= 0; i
--, j
--) {
616 byte_dest
= byte_src
>> 4;
618 byte_src
= access_get_byte(env
, &srca
, j
, ra
);
622 byte_dest
|= byte_src
<< 4;
623 access_set_byte(env
, &desta
, i
, byte_dest
, ra
);
628 void HELPER(mvz
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
630 const int mmu_idx
= cpu_mmu_index(env
, false);
631 S390Access srca1
, srca2
, desta
;
632 uintptr_t ra
= GETPC();
635 /* MVZ always copies one more byte than specified - maximum is 256 */
638 srca1
= access_prepare(env
, src
, l
, MMU_DATA_LOAD
, mmu_idx
, ra
);
639 srca2
= access_prepare(env
, dest
, l
, MMU_DATA_LOAD
, mmu_idx
, ra
);
640 desta
= access_prepare(env
, dest
, l
, MMU_DATA_STORE
, mmu_idx
, ra
);
641 for (i
= 0; i
< l
; i
++) {
642 const uint8_t x
= (access_get_byte(env
, &srca1
, i
, ra
) & 0xf0) |
643 (access_get_byte(env
, &srca2
, i
, ra
) & 0x0f);
645 access_set_byte(env
, &desta
, i
, x
, ra
);
649 /* compare unsigned byte arrays */
650 static uint32_t do_helper_clc(CPUS390XState
*env
, uint32_t l
, uint64_t s1
,
651 uint64_t s2
, uintptr_t ra
)
656 HELPER_LOG("%s l %d s1 %" PRIx64
" s2 %" PRIx64
"\n",
657 __func__
, l
, s1
, s2
);
659 for (i
= 0; i
<= l
; i
++) {
660 uint8_t x
= cpu_ldub_data_ra(env
, s1
+ i
, ra
);
661 uint8_t y
= cpu_ldub_data_ra(env
, s2
+ i
, ra
);
662 HELPER_LOG("%02x (%c)/%02x (%c) ", x
, x
, y
, y
);
676 uint32_t HELPER(clc
)(CPUS390XState
*env
, uint32_t l
, uint64_t s1
, uint64_t s2
)
678 return do_helper_clc(env
, l
, s1
, s2
, GETPC());
681 /* compare logical under mask */
682 uint32_t HELPER(clm
)(CPUS390XState
*env
, uint32_t r1
, uint32_t mask
,
685 uintptr_t ra
= GETPC();
688 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64
"\n", __func__
, r1
,
693 uint8_t d
= cpu_ldub_data_ra(env
, addr
, ra
);
694 uint8_t r
= extract32(r1
, 24, 8);
695 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64
") ", mask
, r
, d
,
706 mask
= (mask
<< 1) & 0xf;
714 static inline uint64_t get_address(CPUS390XState
*env
, int reg
)
716 return wrap_address(env
, env
->regs
[reg
]);
720 * Store the address to the given register, zeroing out unused leftmost
721 * bits in bit positions 32-63 (24-bit and 31-bit mode only).
723 static inline void set_address_zero(CPUS390XState
*env
, int reg
,
726 if (env
->psw
.mask
& PSW_MASK_64
) {
727 env
->regs
[reg
] = address
;
729 if (!(env
->psw
.mask
& PSW_MASK_32
)) {
730 address
&= 0x00ffffff;
732 address
&= 0x7fffffff;
734 env
->regs
[reg
] = deposit64(env
->regs
[reg
], 0, 32, address
);
738 static inline void set_address(CPUS390XState
*env
, int reg
, uint64_t address
)
740 if (env
->psw
.mask
& PSW_MASK_64
) {
742 env
->regs
[reg
] = address
;
744 if (!(env
->psw
.mask
& PSW_MASK_32
)) {
745 /* 24-Bit mode. According to the PoO it is implementation
746 dependent if bits 32-39 remain unchanged or are set to
747 zeros. Choose the former so that the function can also be
749 env
->regs
[reg
] = deposit64(env
->regs
[reg
], 0, 24, address
);
751 /* 31-Bit mode. According to the PoO it is implementation
752 dependent if bit 32 remains unchanged or is set to zero.
753 Choose the latter so that the function can also be used for
755 address
&= 0x7fffffff;
756 env
->regs
[reg
] = deposit64(env
->regs
[reg
], 0, 32, address
);
761 static inline uint64_t wrap_length32(CPUS390XState
*env
, uint64_t length
)
763 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
764 return (uint32_t)length
;
769 static inline uint64_t wrap_length31(CPUS390XState
*env
, uint64_t length
)
771 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
772 /* 24-Bit and 31-Bit mode */
773 length
&= 0x7fffffff;
778 static inline uint64_t get_length(CPUS390XState
*env
, int reg
)
780 return wrap_length31(env
, env
->regs
[reg
]);
783 static inline void set_length(CPUS390XState
*env
, int reg
, uint64_t length
)
785 if (env
->psw
.mask
& PSW_MASK_64
) {
787 env
->regs
[reg
] = length
;
789 /* 24-Bit and 31-Bit mode */
790 env
->regs
[reg
] = deposit64(env
->regs
[reg
], 0, 32, length
);
794 /* search string (c is byte to search, r2 is string, r1 end of string) */
795 void HELPER(srst
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
797 uintptr_t ra
= GETPC();
800 uint8_t v
, c
= env
->regs
[0];
802 /* Bits 32-55 must contain all 0. */
803 if (env
->regs
[0] & 0xffffff00u
) {
804 tcg_s390_program_interrupt(env
, PGM_SPECIFICATION
, ra
);
807 str
= get_address(env
, r2
);
808 end
= get_address(env
, r1
);
810 /* Lest we fail to service interrupts in a timely manner, limit the
811 amount of work we're willing to do. For now, let's cap at 8k. */
812 for (len
= 0; len
< 0x2000; ++len
) {
813 if (str
+ len
== end
) {
814 /* Character not found. R1 & R2 are unmodified. */
818 v
= cpu_ldub_data_ra(env
, str
+ len
, ra
);
820 /* Character found. Set R1 to the location; R2 is unmodified. */
822 set_address(env
, r1
, str
+ len
);
827 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
829 set_address(env
, r2
, str
+ len
);
832 void HELPER(srstu
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
834 uintptr_t ra
= GETPC();
836 uint16_t v
, c
= env
->regs
[0];
837 uint64_t end
, str
, adj_end
;
839 /* Bits 32-47 of R0 must be zero. */
840 if (env
->regs
[0] & 0xffff0000u
) {
841 tcg_s390_program_interrupt(env
, PGM_SPECIFICATION
, ra
);
844 str
= get_address(env
, r2
);
845 end
= get_address(env
, r1
);
847 /* If the LSB of the two addresses differ, use one extra byte. */
848 adj_end
= end
+ ((str
^ end
) & 1);
850 /* Lest we fail to service interrupts in a timely manner, limit the
851 amount of work we're willing to do. For now, let's cap at 8k. */
852 for (len
= 0; len
< 0x2000; len
+= 2) {
853 if (str
+ len
== adj_end
) {
854 /* End of input found. */
858 v
= cpu_lduw_data_ra(env
, str
+ len
, ra
);
860 /* Character found. Set R1 to the location; R2 is unmodified. */
862 set_address(env
, r1
, str
+ len
);
867 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
869 set_address(env
, r2
, str
+ len
);
872 /* unsigned string compare (c is string terminator) */
873 uint64_t HELPER(clst
)(CPUS390XState
*env
, uint64_t c
, uint64_t s1
, uint64_t s2
)
875 uintptr_t ra
= GETPC();
879 s1
= wrap_address(env
, s1
);
880 s2
= wrap_address(env
, s2
);
882 /* Lest we fail to service interrupts in a timely manner, limit the
883 amount of work we're willing to do. For now, let's cap at 8k. */
884 for (len
= 0; len
< 0x2000; ++len
) {
885 uint8_t v1
= cpu_ldub_data_ra(env
, s1
+ len
, ra
);
886 uint8_t v2
= cpu_ldub_data_ra(env
, s2
+ len
, ra
);
889 /* Equal. CC=0, and don't advance the registers. */
895 /* Unequal. CC={1,2}, and advance the registers. Note that
896 the terminator need not be zero, but the string that contains
897 the terminator is by definition "low". */
898 env
->cc_op
= (v1
== c
? 1 : v2
== c
? 2 : v1
< v2
? 1 : 2);
899 env
->retxl
= s2
+ len
;
904 /* CPU-determined bytes equal; advance the registers. */
906 env
->retxl
= s2
+ len
;
911 uint32_t HELPER(mvpg
)(CPUS390XState
*env
, uint64_t r0
, uint32_t r1
, uint32_t r2
)
913 const uint64_t src
= get_address(env
, r2
) & TARGET_PAGE_MASK
;
914 const uint64_t dst
= get_address(env
, r1
) & TARGET_PAGE_MASK
;
915 const int mmu_idx
= cpu_mmu_index(env
, false);
916 const bool f
= extract64(r0
, 11, 1);
917 const bool s
= extract64(r0
, 10, 1);
918 const bool cco
= extract64(r0
, 8, 1);
919 uintptr_t ra
= GETPC();
920 S390Access srca
, desta
;
923 if ((f
&& s
) || extract64(r0
, 12, 4)) {
924 tcg_s390_program_interrupt(env
, PGM_SPECIFICATION
, GETPC());
928 * We always manually handle exceptions such that we can properly store
929 * r1/r2 to the lowcore on page-translation exceptions.
931 * TODO: Access key handling
933 exc
= access_prepare_nf(&srca
, env
, true, src
, TARGET_PAGE_SIZE
,
934 MMU_DATA_LOAD
, mmu_idx
, ra
);
941 exc
= access_prepare_nf(&desta
, env
, true, dst
, TARGET_PAGE_SIZE
,
942 MMU_DATA_STORE
, mmu_idx
, ra
);
944 if (cco
&& exc
!= PGM_PROTECTION
) {
949 access_memmove(env
, &desta
, &srca
, ra
);
950 return 0; /* data moved */
952 #if !defined(CONFIG_USER_ONLY)
953 if (exc
!= PGM_ADDRESSING
) {
954 stq_phys(env_cpu(env
)->as
, env
->psa
+ offsetof(LowCore
, trans_exc_code
),
957 if (exc
== PGM_PAGE_TRANS
) {
958 stb_phys(env_cpu(env
)->as
, env
->psa
+ offsetof(LowCore
, op_access_id
),
962 tcg_s390_program_interrupt(env
, exc
, ra
);
966 uint32_t HELPER(mvst
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
968 const int mmu_idx
= cpu_mmu_index(env
, false);
969 const uint64_t d
= get_address(env
, r1
);
970 const uint64_t s
= get_address(env
, r2
);
971 const uint8_t c
= env
->regs
[0];
972 const int len
= MIN(-(d
| TARGET_PAGE_MASK
), -(s
| TARGET_PAGE_MASK
));
973 S390Access srca
, desta
;
974 uintptr_t ra
= GETPC();
977 if (env
->regs
[0] & 0xffffff00ull
) {
978 tcg_s390_program_interrupt(env
, PGM_SPECIFICATION
, ra
);
982 * Our access should not exceed single pages, as we must not report access
983 * exceptions exceeding the actually copied range (which we don't know at
984 * this point). We might over-indicate watchpoints within the pages
985 * (if we ever care, we have to limit processing to a single byte).
987 srca
= access_prepare(env
, s
, len
, MMU_DATA_LOAD
, mmu_idx
, ra
);
988 desta
= access_prepare(env
, d
, len
, MMU_DATA_STORE
, mmu_idx
, ra
);
989 for (i
= 0; i
< len
; i
++) {
990 const uint8_t v
= access_get_byte(env
, &srca
, i
, ra
);
992 access_set_byte(env
, &desta
, i
, v
, ra
);
994 set_address_zero(env
, r1
, d
+ i
);
998 set_address_zero(env
, r1
, d
+ len
);
999 set_address_zero(env
, r2
, s
+ len
);
1003 /* load access registers r1 to r3 from memory at a2 */
1004 void HELPER(lam
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
1006 uintptr_t ra
= GETPC();
1010 tcg_s390_program_interrupt(env
, PGM_SPECIFICATION
, ra
);
1013 for (i
= r1
;; i
= (i
+ 1) % 16) {
1014 env
->aregs
[i
] = cpu_ldl_data_ra(env
, a2
, ra
);
1023 /* store access registers r1 to r3 in memory at a2 */
1024 void HELPER(stam
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
1026 uintptr_t ra
= GETPC();
1030 tcg_s390_program_interrupt(env
, PGM_SPECIFICATION
, ra
);
1033 for (i
= r1
;; i
= (i
+ 1) % 16) {
1034 cpu_stl_data_ra(env
, a2
, env
->aregs
[i
], ra
);
1043 /* move long helper */
1044 static inline uint32_t do_mvcl(CPUS390XState
*env
,
1045 uint64_t *dest
, uint64_t *destlen
,
1046 uint64_t *src
, uint64_t *srclen
,
1047 uint16_t pad
, int wordsize
, uintptr_t ra
)
1049 const int mmu_idx
= cpu_mmu_index(env
, false);
1050 int len
= MIN(*destlen
, -(*dest
| TARGET_PAGE_MASK
));
1051 S390Access srca
, desta
;
1054 if (*destlen
== *srclen
) {
1056 } else if (*destlen
< *srclen
) {
1067 * Only perform one type of type of operation (move/pad) at a time.
1068 * Stay within single pages.
1071 /* Copy the src array */
1072 len
= MIN(MIN(*srclen
, -(*src
| TARGET_PAGE_MASK
)), len
);
1075 srca
= access_prepare(env
, *src
, len
, MMU_DATA_LOAD
, mmu_idx
, ra
);
1076 desta
= access_prepare(env
, *dest
, len
, MMU_DATA_STORE
, mmu_idx
, ra
);
1077 access_memmove(env
, &desta
, &srca
, ra
);
1078 *src
= wrap_address(env
, *src
+ len
);
1079 *dest
= wrap_address(env
, *dest
+ len
);
1080 } else if (wordsize
== 1) {
1081 /* Pad the remaining area */
1083 desta
= access_prepare(env
, *dest
, len
, MMU_DATA_STORE
, mmu_idx
, ra
);
1084 access_memset(env
, &desta
, pad
, ra
);
1085 *dest
= wrap_address(env
, *dest
+ len
);
1087 desta
= access_prepare(env
, *dest
, len
, MMU_DATA_STORE
, mmu_idx
, ra
);
1089 /* The remaining length selects the padding byte. */
1090 for (i
= 0; i
< len
; (*destlen
)--, i
++) {
1092 access_set_byte(env
, &desta
, i
, pad
, ra
);
1094 access_set_byte(env
, &desta
, i
, pad
>> 8, ra
);
1097 *dest
= wrap_address(env
, *dest
+ len
);
1100 return *destlen
? 3 : cc
;
1104 uint32_t HELPER(mvcl
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
1106 const int mmu_idx
= cpu_mmu_index(env
, false);
1107 uintptr_t ra
= GETPC();
1108 uint64_t destlen
= env
->regs
[r1
+ 1] & 0xffffff;
1109 uint64_t dest
= get_address(env
, r1
);
1110 uint64_t srclen
= env
->regs
[r2
+ 1] & 0xffffff;
1111 uint64_t src
= get_address(env
, r2
);
1112 uint8_t pad
= env
->regs
[r2
+ 1] >> 24;
1113 CPUState
*cs
= env_cpu(env
);
1114 S390Access srca
, desta
;
1115 uint32_t cc
, cur_len
;
1117 if (is_destructive_overlap(env
, dest
, src
, MIN(srclen
, destlen
))) {
1119 } else if (srclen
== destlen
) {
1121 } else if (destlen
< srclen
) {
1127 /* We might have to zero-out some bits even if there was no action. */
1128 if (unlikely(!destlen
|| cc
== 3)) {
1129 set_address_zero(env
, r2
, src
);
1130 set_address_zero(env
, r1
, dest
);
1132 } else if (!srclen
) {
1133 set_address_zero(env
, r2
, src
);
1137 * Only perform one type of type of operation (move/pad) in one step.
1138 * Stay within single pages.
1141 cur_len
= MIN(destlen
, -(dest
| TARGET_PAGE_MASK
));
1143 desta
= access_prepare(env
, dest
, cur_len
, MMU_DATA_STORE
, mmu_idx
,
1145 access_memset(env
, &desta
, pad
, ra
);
1147 cur_len
= MIN(MIN(srclen
, -(src
| TARGET_PAGE_MASK
)), cur_len
);
1149 srca
= access_prepare(env
, src
, cur_len
, MMU_DATA_LOAD
, mmu_idx
,
1151 desta
= access_prepare(env
, dest
, cur_len
, MMU_DATA_STORE
, mmu_idx
,
1153 access_memmove(env
, &desta
, &srca
, ra
);
1154 src
= wrap_address(env
, src
+ cur_len
);
1156 env
->regs
[r2
+ 1] = deposit64(env
->regs
[r2
+ 1], 0, 24, srclen
);
1157 set_address_zero(env
, r2
, src
);
1159 dest
= wrap_address(env
, dest
+ cur_len
);
1161 env
->regs
[r1
+ 1] = deposit64(env
->regs
[r1
+ 1], 0, 24, destlen
);
1162 set_address_zero(env
, r1
, dest
);
1165 * MVCL is interruptible. Return to the main loop if requested after
1166 * writing back all state to registers. If no interrupt will get
1167 * injected, we'll end up back in this handler and continue processing
1168 * the remaining parts.
1170 if (destlen
&& unlikely(cpu_loop_exit_requested(cs
))) {
1171 cpu_loop_exit_restore(cs
, ra
);
1177 /* move long extended */
1178 uint32_t HELPER(mvcle
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
1181 uintptr_t ra
= GETPC();
1182 uint64_t destlen
= get_length(env
, r1
+ 1);
1183 uint64_t dest
= get_address(env
, r1
);
1184 uint64_t srclen
= get_length(env
, r3
+ 1);
1185 uint64_t src
= get_address(env
, r3
);
1189 cc
= do_mvcl(env
, &dest
, &destlen
, &src
, &srclen
, pad
, 1, ra
);
1191 set_length(env
, r1
+ 1, destlen
);
1192 set_length(env
, r3
+ 1, srclen
);
1193 set_address(env
, r1
, dest
);
1194 set_address(env
, r3
, src
);
1199 /* move long unicode */
1200 uint32_t HELPER(mvclu
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
1203 uintptr_t ra
= GETPC();
1204 uint64_t destlen
= get_length(env
, r1
+ 1);
1205 uint64_t dest
= get_address(env
, r1
);
1206 uint64_t srclen
= get_length(env
, r3
+ 1);
1207 uint64_t src
= get_address(env
, r3
);
1211 cc
= do_mvcl(env
, &dest
, &destlen
, &src
, &srclen
, pad
, 2, ra
);
1213 set_length(env
, r1
+ 1, destlen
);
1214 set_length(env
, r3
+ 1, srclen
);
1215 set_address(env
, r1
, dest
);
1216 set_address(env
, r3
, src
);
1221 /* compare logical long helper */
1222 static inline uint32_t do_clcl(CPUS390XState
*env
,
1223 uint64_t *src1
, uint64_t *src1len
,
1224 uint64_t *src3
, uint64_t *src3len
,
1225 uint16_t pad
, uint64_t limit
,
1226 int wordsize
, uintptr_t ra
)
1228 uint64_t len
= MAX(*src1len
, *src3len
);
1231 check_alignment(env
, *src1len
| *src3len
, wordsize
, ra
);
1237 /* Lest we fail to service interrupts in a timely manner, limit the
1238 amount of work we're willing to do. */
1244 for (; len
; len
-= wordsize
) {
1249 v1
= cpu_ldusize_data_ra(env
, *src1
, wordsize
, ra
);
1252 v3
= cpu_ldusize_data_ra(env
, *src3
, wordsize
, ra
);
1256 cc
= (v1
< v3
) ? 1 : 2;
1262 *src1len
-= wordsize
;
1266 *src3len
-= wordsize
;
1274 /* compare logical long */
1275 uint32_t HELPER(clcl
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
1277 uintptr_t ra
= GETPC();
1278 uint64_t src1len
= extract64(env
->regs
[r1
+ 1], 0, 24);
1279 uint64_t src1
= get_address(env
, r1
);
1280 uint64_t src3len
= extract64(env
->regs
[r2
+ 1], 0, 24);
1281 uint64_t src3
= get_address(env
, r2
);
1282 uint8_t pad
= env
->regs
[r2
+ 1] >> 24;
1285 cc
= do_clcl(env
, &src1
, &src1len
, &src3
, &src3len
, pad
, -1, 1, ra
);
1287 env
->regs
[r1
+ 1] = deposit64(env
->regs
[r1
+ 1], 0, 24, src1len
);
1288 env
->regs
[r2
+ 1] = deposit64(env
->regs
[r2
+ 1], 0, 24, src3len
);
1289 set_address(env
, r1
, src1
);
1290 set_address(env
, r2
, src3
);
1295 /* compare logical long extended memcompare insn with padding */
1296 uint32_t HELPER(clcle
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
1299 uintptr_t ra
= GETPC();
1300 uint64_t src1len
= get_length(env
, r1
+ 1);
1301 uint64_t src1
= get_address(env
, r1
);
1302 uint64_t src3len
= get_length(env
, r3
+ 1);
1303 uint64_t src3
= get_address(env
, r3
);
1307 cc
= do_clcl(env
, &src1
, &src1len
, &src3
, &src3len
, pad
, 0x2000, 1, ra
);
1309 set_length(env
, r1
+ 1, src1len
);
1310 set_length(env
, r3
+ 1, src3len
);
1311 set_address(env
, r1
, src1
);
1312 set_address(env
, r3
, src3
);
1317 /* compare logical long unicode memcompare insn with padding */
1318 uint32_t HELPER(clclu
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
1321 uintptr_t ra
= GETPC();
1322 uint64_t src1len
= get_length(env
, r1
+ 1);
1323 uint64_t src1
= get_address(env
, r1
);
1324 uint64_t src3len
= get_length(env
, r3
+ 1);
1325 uint64_t src3
= get_address(env
, r3
);
1329 cc
= do_clcl(env
, &src1
, &src1len
, &src3
, &src3len
, pad
, 0x1000, 2, ra
);
1331 set_length(env
, r1
+ 1, src1len
);
1332 set_length(env
, r3
+ 1, src3len
);
1333 set_address(env
, r1
, src1
);
1334 set_address(env
, r3
, src3
);
1340 uint64_t HELPER(cksm
)(CPUS390XState
*env
, uint64_t r1
,
1341 uint64_t src
, uint64_t src_len
)
1343 uintptr_t ra
= GETPC();
1344 uint64_t max_len
, len
;
1345 uint64_t cksm
= (uint32_t)r1
;
1347 /* Lest we fail to service interrupts in a timely manner, limit the
1348 amount of work we're willing to do. For now, let's cap at 8k. */
1349 max_len
= (src_len
> 0x2000 ? 0x2000 : src_len
);
1351 /* Process full words as available. */
1352 for (len
= 0; len
+ 4 <= max_len
; len
+= 4, src
+= 4) {
1353 cksm
+= (uint32_t)cpu_ldl_data_ra(env
, src
, ra
);
1356 switch (max_len
- len
) {
1358 cksm
+= cpu_ldub_data_ra(env
, src
, ra
) << 24;
1362 cksm
+= cpu_lduw_data_ra(env
, src
, ra
) << 16;
1366 cksm
+= cpu_lduw_data_ra(env
, src
, ra
) << 16;
1367 cksm
+= cpu_ldub_data_ra(env
, src
+ 2, ra
) << 8;
1372 /* Fold the carry from the checksum. Note that we can see carry-out
1373 during folding more than once (but probably not more than twice). */
1374 while (cksm
> 0xffffffffull
) {
1375 cksm
= (uint32_t)cksm
+ (cksm
>> 32);
1378 /* Indicate whether or not we've processed everything. */
1379 env
->cc_op
= (len
== src_len
? 0 : 3);
1381 /* Return both cksm and processed length. */
1386 void HELPER(pack
)(CPUS390XState
*env
, uint32_t len
, uint64_t dest
, uint64_t src
)
1388 uintptr_t ra
= GETPC();
1389 int len_dest
= len
>> 4;
1390 int len_src
= len
& 0xf;
1396 /* last byte is special, it only flips the nibbles */
1397 b
= cpu_ldub_data_ra(env
, src
, ra
);
1398 cpu_stb_data_ra(env
, dest
, (b
<< 4) | (b
>> 4), ra
);
1402 /* now pack every value */
1403 while (len_dest
> 0) {
1407 b
= cpu_ldub_data_ra(env
, src
, ra
) & 0x0f;
1412 b
|= cpu_ldub_data_ra(env
, src
, ra
) << 4;
1419 cpu_stb_data_ra(env
, dest
, b
, ra
);
1423 static inline void do_pkau(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
1424 uint32_t srclen
, int ssize
, uintptr_t ra
)
1427 /* The destination operand is always 16 bytes long. */
1428 const int destlen
= 16;
1430 /* The operands are processed from right to left. */
1432 dest
+= destlen
- 1;
1434 for (i
= 0; i
< destlen
; i
++) {
1437 /* Start with a positive sign */
1440 } else if (srclen
> ssize
) {
1441 b
= cpu_ldub_data_ra(env
, src
, ra
) & 0x0f;
1446 if (srclen
> ssize
) {
1447 b
|= cpu_ldub_data_ra(env
, src
, ra
) << 4;
1452 cpu_stb_data_ra(env
, dest
, b
, ra
);
1458 void HELPER(pka
)(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
1461 do_pkau(env
, dest
, src
, srclen
, 1, GETPC());
1464 void HELPER(pku
)(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
1467 do_pkau(env
, dest
, src
, srclen
, 2, GETPC());
1470 void HELPER(unpk
)(CPUS390XState
*env
, uint32_t len
, uint64_t dest
,
1473 uintptr_t ra
= GETPC();
1474 int len_dest
= len
>> 4;
1475 int len_src
= len
& 0xf;
1477 int second_nibble
= 0;
1482 /* last byte is special, it only flips the nibbles */
1483 b
= cpu_ldub_data_ra(env
, src
, ra
);
1484 cpu_stb_data_ra(env
, dest
, (b
<< 4) | (b
>> 4), ra
);
1488 /* now pad every nibble with 0xf0 */
1490 while (len_dest
> 0) {
1491 uint8_t cur_byte
= 0;
1494 cur_byte
= cpu_ldub_data_ra(env
, src
, ra
);
1500 /* only advance one nibble at a time */
1501 if (second_nibble
) {
1506 second_nibble
= !second_nibble
;
1509 cur_byte
= (cur_byte
& 0xf);
1513 cpu_stb_data_ra(env
, dest
, cur_byte
, ra
);
1517 static inline uint32_t do_unpkau(CPUS390XState
*env
, uint64_t dest
,
1518 uint32_t destlen
, int dsize
, uint64_t src
,
1524 /* The source operand is always 16 bytes long. */
1525 const int srclen
= 16;
1527 /* The operands are processed from right to left. */
1529 dest
+= destlen
- dsize
;
1531 /* Check for the sign. */
1532 b
= cpu_ldub_data_ra(env
, src
, ra
);
1546 cc
= 3; /* invalid */
1550 /* Now pad every nibble with 0x30, advancing one nibble at a time. */
1551 for (i
= 0; i
< destlen
; i
+= dsize
) {
1552 if (i
== (31 * dsize
)) {
1553 /* If length is 32/64 bytes, the leftmost byte is 0. */
1555 } else if (i
% (2 * dsize
)) {
1556 b
= cpu_ldub_data_ra(env
, src
, ra
);
1561 cpu_stsize_data_ra(env
, dest
, 0x30 + (b
& 0xf), dsize
, ra
);
1568 uint32_t HELPER(unpka
)(CPUS390XState
*env
, uint64_t dest
, uint32_t destlen
,
1571 return do_unpkau(env
, dest
, destlen
, 1, src
, GETPC());
1574 uint32_t HELPER(unpku
)(CPUS390XState
*env
, uint64_t dest
, uint32_t destlen
,
1577 return do_unpkau(env
, dest
, destlen
, 2, src
, GETPC());
1580 uint32_t HELPER(tp
)(CPUS390XState
*env
, uint64_t dest
, uint32_t destlen
)
1582 uintptr_t ra
= GETPC();
1586 for (i
= 0; i
< destlen
; i
++) {
1587 uint8_t b
= cpu_ldub_data_ra(env
, dest
+ i
, ra
);
1589 cc
|= (b
& 0xf0) > 0x90 ? 2 : 0;
1591 if (i
== (destlen
- 1)) {
1593 cc
|= (b
& 0xf) < 0xa ? 1 : 0;
1596 cc
|= (b
& 0xf) > 0x9 ? 2 : 0;
1603 static uint32_t do_helper_tr(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
1604 uint64_t trans
, uintptr_t ra
)
1608 for (i
= 0; i
<= len
; i
++) {
1609 uint8_t byte
= cpu_ldub_data_ra(env
, array
+ i
, ra
);
1610 uint8_t new_byte
= cpu_ldub_data_ra(env
, trans
+ byte
, ra
);
1611 cpu_stb_data_ra(env
, array
+ i
, new_byte
, ra
);
1617 void HELPER(tr
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
1620 do_helper_tr(env
, len
, array
, trans
, GETPC());
1623 uint64_t HELPER(tre
)(CPUS390XState
*env
, uint64_t array
,
1624 uint64_t len
, uint64_t trans
)
1626 uintptr_t ra
= GETPC();
1627 uint8_t end
= env
->regs
[0] & 0xff;
1632 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
1633 array
&= 0x7fffffff;
1637 /* Lest we fail to service interrupts in a timely manner, limit the
1638 amount of work we're willing to do. For now, let's cap at 8k. */
1644 for (i
= 0; i
< l
; i
++) {
1645 uint8_t byte
, new_byte
;
1647 byte
= cpu_ldub_data_ra(env
, array
+ i
, ra
);
1654 new_byte
= cpu_ldub_data_ra(env
, trans
+ byte
, ra
);
1655 cpu_stb_data_ra(env
, array
+ i
, new_byte
, ra
);
1659 env
->retxl
= len
- i
;
1663 static inline uint32_t do_helper_trt(CPUS390XState
*env
, int len
,
1664 uint64_t array
, uint64_t trans
,
1665 int inc
, uintptr_t ra
)
1669 for (i
= 0; i
<= len
; i
++) {
1670 uint8_t byte
= cpu_ldub_data_ra(env
, array
+ i
* inc
, ra
);
1671 uint8_t sbyte
= cpu_ldub_data_ra(env
, trans
+ byte
, ra
);
1674 set_address(env
, 1, array
+ i
* inc
);
1675 env
->regs
[2] = deposit64(env
->regs
[2], 0, 8, sbyte
);
1676 return (i
== len
) ? 2 : 1;
1683 static uint32_t do_helper_trt_fwd(CPUS390XState
*env
, uint32_t len
,
1684 uint64_t array
, uint64_t trans
,
1687 return do_helper_trt(env
, len
, array
, trans
, 1, ra
);
1690 uint32_t HELPER(trt
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
1693 return do_helper_trt(env
, len
, array
, trans
, 1, GETPC());
1696 static uint32_t do_helper_trt_bkwd(CPUS390XState
*env
, uint32_t len
,
1697 uint64_t array
, uint64_t trans
,
1700 return do_helper_trt(env
, len
, array
, trans
, -1, ra
);
1703 uint32_t HELPER(trtr
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
1706 return do_helper_trt(env
, len
, array
, trans
, -1, GETPC());
1709 /* Translate one/two to one/two */
1710 uint32_t HELPER(trXX
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
,
1711 uint32_t tst
, uint32_t sizes
)
1713 uintptr_t ra
= GETPC();
1714 int dsize
= (sizes
& 1) ? 1 : 2;
1715 int ssize
= (sizes
& 2) ? 1 : 2;
1716 uint64_t tbl
= get_address(env
, 1);
1717 uint64_t dst
= get_address(env
, r1
);
1718 uint64_t len
= get_length(env
, r1
+ 1);
1719 uint64_t src
= get_address(env
, r2
);
1723 /* The lower address bits of TBL are ignored. For TROO, TROT, it's
1724 the low 3 bits (double-word aligned). For TRTO, TRTT, it's either
1725 the low 12 bits (4K, without ETF2-ENH) or 3 bits (with ETF2-ENH). */
1726 if (ssize
== 2 && !s390_has_feat(S390_FEAT_ETF2_ENH
)) {
1732 check_alignment(env
, len
, ssize
, ra
);
1734 /* Lest we fail to service interrupts in a timely manner, */
1735 /* limit the amount of work we're willing to do. */
1736 for (i
= 0; i
< 0x2000; i
++) {
1737 uint16_t sval
= cpu_ldusize_data_ra(env
, src
, ssize
, ra
);
1738 uint64_t tble
= tbl
+ (sval
* dsize
);
1739 uint16_t dval
= cpu_ldusize_data_ra(env
, tble
, dsize
, ra
);
1744 cpu_stsize_data_ra(env
, dst
, dval
, dsize
, ra
);
1756 set_address(env
, r1
, dst
);
1757 set_length(env
, r1
+ 1, len
);
1758 set_address(env
, r2
, src
);
1763 void HELPER(cdsg
)(CPUS390XState
*env
, uint64_t addr
,
1764 uint32_t r1
, uint32_t r3
)
1766 uintptr_t ra
= GETPC();
1767 Int128 cmpv
= int128_make128(env
->regs
[r1
+ 1], env
->regs
[r1
]);
1768 Int128 newv
= int128_make128(env
->regs
[r3
+ 1], env
->regs
[r3
]);
1770 uint64_t oldh
, oldl
;
1773 check_alignment(env
, addr
, 16, ra
);
1775 oldh
= cpu_ldq_data_ra(env
, addr
+ 0, ra
);
1776 oldl
= cpu_ldq_data_ra(env
, addr
+ 8, ra
);
1778 oldv
= int128_make128(oldl
, oldh
);
1779 fail
= !int128_eq(oldv
, cmpv
);
1784 cpu_stq_data_ra(env
, addr
+ 0, int128_gethi(newv
), ra
);
1785 cpu_stq_data_ra(env
, addr
+ 8, int128_getlo(newv
), ra
);
1788 env
->regs
[r1
] = int128_gethi(oldv
);
1789 env
->regs
[r1
+ 1] = int128_getlo(oldv
);
1792 void HELPER(cdsg_parallel
)(CPUS390XState
*env
, uint64_t addr
,
1793 uint32_t r1
, uint32_t r3
)
1795 uintptr_t ra
= GETPC();
1796 Int128 cmpv
= int128_make128(env
->regs
[r1
+ 1], env
->regs
[r1
]);
1797 Int128 newv
= int128_make128(env
->regs
[r3
+ 1], env
->regs
[r3
]);
1803 assert(HAVE_CMPXCHG128
);
1805 mem_idx
= cpu_mmu_index(env
, false);
1806 oi
= make_memop_idx(MO_TE
| MO_128
| MO_ALIGN
, mem_idx
);
1807 oldv
= cpu_atomic_cmpxchgo_be_mmu(env
, addr
, cmpv
, newv
, oi
, ra
);
1808 fail
= !int128_eq(oldv
, cmpv
);
1811 env
->regs
[r1
] = int128_gethi(oldv
);
1812 env
->regs
[r1
+ 1] = int128_getlo(oldv
);
1815 static uint32_t do_csst(CPUS390XState
*env
, uint32_t r3
, uint64_t a1
,
1816 uint64_t a2
, bool parallel
)
1818 uint32_t mem_idx
= cpu_mmu_index(env
, false);
1819 uintptr_t ra
= GETPC();
1820 uint32_t fc
= extract32(env
->regs
[0], 0, 8);
1821 uint32_t sc
= extract32(env
->regs
[0], 8, 8);
1822 uint64_t pl
= get_address(env
, 1) & -16;
1826 /* Sanity check the function code and storage characteristic. */
1827 if (fc
> 1 || sc
> 3) {
1828 if (!s390_has_feat(S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2
)) {
1829 goto spec_exception
;
1831 if (fc
> 2 || sc
> 4 || (fc
== 2 && (r3
& 1))) {
1832 goto spec_exception
;
1836 /* Sanity check the alignments. */
1837 if (extract32(a1
, 0, fc
+ 2) || extract32(a2
, 0, sc
)) {
1838 goto spec_exception
;
1841 /* Sanity check writability of the store address. */
1842 probe_write(env
, a2
, 1 << sc
, mem_idx
, ra
);
1845 * Note that the compare-and-swap is atomic, and the store is atomic,
1846 * but the complete operation is not. Therefore we do not need to
1847 * assert serial context in order to implement this. That said,
1848 * restart early if we can't support either operation that is supposed
1853 #ifdef CONFIG_ATOMIC64
1856 if ((HAVE_CMPXCHG128
? 0 : fc
+ 2 > max
) ||
1857 (HAVE_ATOMIC128
? 0 : sc
> max
)) {
1858 cpu_loop_exit_atomic(env_cpu(env
), ra
);
1862 /* All loads happen before all stores. For simplicity, load the entire
1863 store value area from the parameter list. */
1864 svh
= cpu_ldq_data_ra(env
, pl
+ 16, ra
);
1865 svl
= cpu_ldq_data_ra(env
, pl
+ 24, ra
);
1870 uint32_t nv
= cpu_ldl_data_ra(env
, pl
, ra
);
1871 uint32_t cv
= env
->regs
[r3
];
1875 #ifdef CONFIG_USER_ONLY
1876 uint32_t *haddr
= g2h(env_cpu(env
), a1
);
1877 ov
= qatomic_cmpxchg__nocheck(haddr
, cv
, nv
);
1879 MemOpIdx oi
= make_memop_idx(MO_TEUL
| MO_ALIGN
, mem_idx
);
1880 ov
= cpu_atomic_cmpxchgl_be_mmu(env
, a1
, cv
, nv
, oi
, ra
);
1883 ov
= cpu_ldl_data_ra(env
, a1
, ra
);
1884 cpu_stl_data_ra(env
, a1
, (ov
== cv
? nv
: ov
), ra
);
1887 env
->regs
[r3
] = deposit64(env
->regs
[r3
], 32, 32, ov
);
1893 uint64_t nv
= cpu_ldq_data_ra(env
, pl
, ra
);
1894 uint64_t cv
= env
->regs
[r3
];
1898 #ifdef CONFIG_ATOMIC64
1899 MemOpIdx oi
= make_memop_idx(MO_TEUQ
| MO_ALIGN
, mem_idx
);
1900 ov
= cpu_atomic_cmpxchgq_be_mmu(env
, a1
, cv
, nv
, oi
, ra
);
1902 /* Note that we asserted !parallel above. */
1903 g_assert_not_reached();
1906 ov
= cpu_ldq_data_ra(env
, a1
, ra
);
1907 cpu_stq_data_ra(env
, a1
, (ov
== cv
? nv
: ov
), ra
);
1916 uint64_t nvh
= cpu_ldq_data_ra(env
, pl
, ra
);
1917 uint64_t nvl
= cpu_ldq_data_ra(env
, pl
+ 8, ra
);
1918 Int128 nv
= int128_make128(nvl
, nvh
);
1919 Int128 cv
= int128_make128(env
->regs
[r3
+ 1], env
->regs
[r3
]);
1923 uint64_t oh
= cpu_ldq_data_ra(env
, a1
+ 0, ra
);
1924 uint64_t ol
= cpu_ldq_data_ra(env
, a1
+ 8, ra
);
1926 ov
= int128_make128(ol
, oh
);
1927 cc
= !int128_eq(ov
, cv
);
1932 cpu_stq_data_ra(env
, a1
+ 0, int128_gethi(nv
), ra
);
1933 cpu_stq_data_ra(env
, a1
+ 8, int128_getlo(nv
), ra
);
1934 } else if (HAVE_CMPXCHG128
) {
1935 MemOpIdx oi
= make_memop_idx(MO_TE
| MO_128
| MO_ALIGN
, mem_idx
);
1936 ov
= cpu_atomic_cmpxchgo_be_mmu(env
, a1
, cv
, nv
, oi
, ra
);
1937 cc
= !int128_eq(ov
, cv
);
1939 /* Note that we asserted !parallel above. */
1940 g_assert_not_reached();
1943 env
->regs
[r3
+ 0] = int128_gethi(ov
);
1944 env
->regs
[r3
+ 1] = int128_getlo(ov
);
1949 g_assert_not_reached();
1952 /* Store only if the comparison succeeded. Note that above we use a pair
1953 of 64-bit big-endian loads, so for sc < 3 we must extract the value
1954 from the most-significant bits of svh. */
1958 cpu_stb_data_ra(env
, a2
, svh
>> 56, ra
);
1961 cpu_stw_data_ra(env
, a2
, svh
>> 48, ra
);
1964 cpu_stl_data_ra(env
, a2
, svh
>> 32, ra
);
1967 cpu_stq_data_ra(env
, a2
, svh
, ra
);
1971 cpu_stq_data_ra(env
, a2
+ 0, svh
, ra
);
1972 cpu_stq_data_ra(env
, a2
+ 8, svl
, ra
);
1973 } else if (HAVE_ATOMIC128
) {
1974 MemOpIdx oi
= make_memop_idx(MO_TEUQ
| MO_ALIGN_16
, mem_idx
);
1975 Int128 sv
= int128_make128(svl
, svh
);
1976 cpu_atomic_sto_be_mmu(env
, a2
, sv
, oi
, ra
);
1978 /* Note that we asserted !parallel above. */
1979 g_assert_not_reached();
1983 g_assert_not_reached();
1990 tcg_s390_program_interrupt(env
, PGM_SPECIFICATION
, ra
);
1993 uint32_t HELPER(csst
)(CPUS390XState
*env
, uint32_t r3
, uint64_t a1
, uint64_t a2
)
1995 return do_csst(env
, r3
, a1
, a2
, false);
1998 uint32_t HELPER(csst_parallel
)(CPUS390XState
*env
, uint32_t r3
, uint64_t a1
,
2001 return do_csst(env
, r3
, a1
, a2
, true);
2004 #if !defined(CONFIG_USER_ONLY)
2005 void HELPER(lctlg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
2007 uintptr_t ra
= GETPC();
2008 bool PERchanged
= false;
2013 tcg_s390_program_interrupt(env
, PGM_SPECIFICATION
, ra
);
2016 for (i
= r1
;; i
= (i
+ 1) % 16) {
2017 uint64_t val
= cpu_ldq_data_ra(env
, src
, ra
);
2018 if (env
->cregs
[i
] != val
&& i
>= 9 && i
<= 11) {
2021 env
->cregs
[i
] = val
;
2022 HELPER_LOG("load ctl %d from 0x%" PRIx64
" == 0x%" PRIx64
"\n",
2024 src
+= sizeof(uint64_t);
2031 if (PERchanged
&& env
->psw
.mask
& PSW_MASK_PER
) {
2032 s390_cpu_recompute_watchpoints(env_cpu(env
));
2035 tlb_flush(env_cpu(env
));
2038 void HELPER(lctl
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
2040 uintptr_t ra
= GETPC();
2041 bool PERchanged
= false;
2046 tcg_s390_program_interrupt(env
, PGM_SPECIFICATION
, ra
);
2049 for (i
= r1
;; i
= (i
+ 1) % 16) {
2050 uint32_t val
= cpu_ldl_data_ra(env
, src
, ra
);
2051 if ((uint32_t)env
->cregs
[i
] != val
&& i
>= 9 && i
<= 11) {
2054 env
->cregs
[i
] = deposit64(env
->cregs
[i
], 0, 32, val
);
2055 HELPER_LOG("load ctl %d from 0x%" PRIx64
" == 0x%x\n", i
, src
, val
);
2056 src
+= sizeof(uint32_t);
2063 if (PERchanged
&& env
->psw
.mask
& PSW_MASK_PER
) {
2064 s390_cpu_recompute_watchpoints(env_cpu(env
));
2067 tlb_flush(env_cpu(env
));
2070 void HELPER(stctg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
2072 uintptr_t ra
= GETPC();
2077 tcg_s390_program_interrupt(env
, PGM_SPECIFICATION
, ra
);
2080 for (i
= r1
;; i
= (i
+ 1) % 16) {
2081 cpu_stq_data_ra(env
, dest
, env
->cregs
[i
], ra
);
2082 dest
+= sizeof(uint64_t);
2090 void HELPER(stctl
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
2092 uintptr_t ra
= GETPC();
2097 tcg_s390_program_interrupt(env
, PGM_SPECIFICATION
, ra
);
2100 for (i
= r1
;; i
= (i
+ 1) % 16) {
2101 cpu_stl_data_ra(env
, dest
, env
->cregs
[i
], ra
);
2102 dest
+= sizeof(uint32_t);
2110 uint32_t HELPER(testblock
)(CPUS390XState
*env
, uint64_t real_addr
)
2112 uintptr_t ra
= GETPC();
2115 real_addr
= wrap_address(env
, real_addr
) & TARGET_PAGE_MASK
;
2117 for (i
= 0; i
< TARGET_PAGE_SIZE
; i
+= 8) {
2118 cpu_stq_mmuidx_ra(env
, real_addr
+ i
, 0, MMU_REAL_IDX
, ra
);
2124 uint32_t HELPER(tprot
)(CPUS390XState
*env
, uint64_t a1
, uint64_t a2
)
2126 S390CPU
*cpu
= env_archcpu(env
);
2127 CPUState
*cs
= env_cpu(env
);
2130 * TODO: we currently don't handle all access protection types
2131 * (including access-list and key-controlled) as well as AR mode.
2133 if (!s390_cpu_virt_mem_check_write(cpu
, a1
, 0, 1)) {
2134 /* Fetching permitted; storing permitted */
2138 if (env
->int_pgm_code
== PGM_PROTECTION
) {
2139 /* retry if reading is possible */
2140 cs
->exception_index
= -1;
2141 if (!s390_cpu_virt_mem_check_read(cpu
, a1
, 0, 1)) {
2142 /* Fetching permitted; storing not permitted */
2147 switch (env
->int_pgm_code
) {
2148 case PGM_PROTECTION
:
2149 /* Fetching not permitted; storing not permitted */
2150 cs
->exception_index
= -1;
2152 case PGM_ADDRESSING
:
2153 case PGM_TRANS_SPEC
:
2154 /* exceptions forwarded to the guest */
2155 s390_cpu_virt_mem_handle_exc(cpu
, GETPC());
2159 /* Translation not available */
2160 cs
->exception_index
= -1;
2164 /* insert storage key extended */
2165 uint64_t HELPER(iske
)(CPUS390XState
*env
, uint64_t r2
)
2167 static S390SKeysState
*ss
;
2168 static S390SKeysClass
*skeyclass
;
2169 uint64_t addr
= wrap_address(env
, r2
);
2173 addr
= mmu_real2abs(env
, addr
);
2174 if (!mmu_absolute_addr_valid(addr
, false)) {
2175 tcg_s390_program_interrupt(env
, PGM_ADDRESSING
, GETPC());
2178 if (unlikely(!ss
)) {
2179 ss
= s390_get_skeys_device();
2180 skeyclass
= S390_SKEYS_GET_CLASS(ss
);
2181 if (skeyclass
->enable_skeys
&& !skeyclass
->enable_skeys(ss
)) {
2182 tlb_flush_all_cpus_synced(env_cpu(env
));
2186 rc
= skeyclass
->get_skeys(ss
, addr
/ TARGET_PAGE_SIZE
, 1, &key
);
2188 trace_get_skeys_nonzero(rc
);
2194 /* set storage key extended */
2195 void HELPER(sske
)(CPUS390XState
*env
, uint64_t r1
, uint64_t r2
)
2197 static S390SKeysState
*ss
;
2198 static S390SKeysClass
*skeyclass
;
2199 uint64_t addr
= wrap_address(env
, r2
);
2203 addr
= mmu_real2abs(env
, addr
);
2204 if (!mmu_absolute_addr_valid(addr
, false)) {
2205 tcg_s390_program_interrupt(env
, PGM_ADDRESSING
, GETPC());
2208 if (unlikely(!ss
)) {
2209 ss
= s390_get_skeys_device();
2210 skeyclass
= S390_SKEYS_GET_CLASS(ss
);
2211 if (skeyclass
->enable_skeys
&& !skeyclass
->enable_skeys(ss
)) {
2212 tlb_flush_all_cpus_synced(env_cpu(env
));
2217 rc
= skeyclass
->set_skeys(ss
, addr
/ TARGET_PAGE_SIZE
, 1, &key
);
2219 trace_set_skeys_nonzero(rc
);
2222 * As we can only flush by virtual address and not all the entries
2223 * that point to a physical address we have to flush the whole TLB.
2225 tlb_flush_all_cpus_synced(env_cpu(env
));
2228 /* reset reference bit extended */
2229 uint32_t HELPER(rrbe
)(CPUS390XState
*env
, uint64_t r2
)
2231 uint64_t addr
= wrap_address(env
, r2
);
2232 static S390SKeysState
*ss
;
2233 static S390SKeysClass
*skeyclass
;
2237 addr
= mmu_real2abs(env
, addr
);
2238 if (!mmu_absolute_addr_valid(addr
, false)) {
2239 tcg_s390_program_interrupt(env
, PGM_ADDRESSING
, GETPC());
2242 if (unlikely(!ss
)) {
2243 ss
= s390_get_skeys_device();
2244 skeyclass
= S390_SKEYS_GET_CLASS(ss
);
2245 if (skeyclass
->enable_skeys
&& !skeyclass
->enable_skeys(ss
)) {
2246 tlb_flush_all_cpus_synced(env_cpu(env
));
2250 rc
= skeyclass
->get_skeys(ss
, addr
/ TARGET_PAGE_SIZE
, 1, &key
);
2252 trace_get_skeys_nonzero(rc
);
2256 re
= key
& (SK_R
| SK_C
);
2259 rc
= skeyclass
->set_skeys(ss
, addr
/ TARGET_PAGE_SIZE
, 1, &key
);
2261 trace_set_skeys_nonzero(rc
);
2265 * As we can only flush by virtual address and not all the entries
2266 * that point to a physical address we have to flush the whole TLB.
2268 tlb_flush_all_cpus_synced(env_cpu(env
));
2273 * 0 Reference bit zero; change bit zero
2274 * 1 Reference bit zero; change bit one
2275 * 2 Reference bit one; change bit zero
2276 * 3 Reference bit one; change bit one
2282 uint32_t HELPER(mvcs
)(CPUS390XState
*env
, uint64_t l
, uint64_t a1
, uint64_t a2
)
2284 const uint8_t psw_as
= (env
->psw
.mask
& PSW_MASK_ASC
) >> PSW_SHIFT_ASC
;
2285 S390Access srca
, desta
;
2286 uintptr_t ra
= GETPC();
2289 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
2290 __func__
, l
, a1
, a2
);
2292 if (!(env
->psw
.mask
& PSW_MASK_DAT
) || !(env
->cregs
[0] & CR0_SECONDARY
) ||
2293 psw_as
== AS_HOME
|| psw_as
== AS_ACCREG
) {
2294 s390_program_interrupt(env
, PGM_SPECIAL_OP
, ra
);
2297 l
= wrap_length32(env
, l
);
2306 /* TODO: Access key handling */
2307 srca
= access_prepare(env
, a2
, l
, MMU_DATA_LOAD
, MMU_PRIMARY_IDX
, ra
);
2308 desta
= access_prepare(env
, a1
, l
, MMU_DATA_STORE
, MMU_SECONDARY_IDX
, ra
);
2309 access_memmove(env
, &desta
, &srca
, ra
);
2313 uint32_t HELPER(mvcp
)(CPUS390XState
*env
, uint64_t l
, uint64_t a1
, uint64_t a2
)
2315 const uint8_t psw_as
= (env
->psw
.mask
& PSW_MASK_ASC
) >> PSW_SHIFT_ASC
;
2316 S390Access srca
, desta
;
2317 uintptr_t ra
= GETPC();
2320 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
2321 __func__
, l
, a1
, a2
);
2323 if (!(env
->psw
.mask
& PSW_MASK_DAT
) || !(env
->cregs
[0] & CR0_SECONDARY
) ||
2324 psw_as
== AS_HOME
|| psw_as
== AS_ACCREG
) {
2325 s390_program_interrupt(env
, PGM_SPECIAL_OP
, ra
);
2328 l
= wrap_length32(env
, l
);
2337 /* TODO: Access key handling */
2338 srca
= access_prepare(env
, a2
, l
, MMU_DATA_LOAD
, MMU_SECONDARY_IDX
, ra
);
2339 desta
= access_prepare(env
, a1
, l
, MMU_DATA_STORE
, MMU_PRIMARY_IDX
, ra
);
2340 access_memmove(env
, &desta
, &srca
, ra
);
2344 void HELPER(idte
)(CPUS390XState
*env
, uint64_t r1
, uint64_t r2
, uint32_t m4
)
2346 CPUState
*cs
= env_cpu(env
);
2347 const uintptr_t ra
= GETPC();
2348 uint64_t table
, entry
, raddr
;
2349 uint16_t entries
, i
, index
= 0;
2352 tcg_s390_program_interrupt(env
, PGM_SPECIFICATION
, ra
);
2355 if (!(r2
& 0x800)) {
2356 /* invalidation-and-clearing operation */
2357 table
= r1
& ASCE_ORIGIN
;
2358 entries
= (r2
& 0x7ff) + 1;
2360 switch (r1
& ASCE_TYPE_MASK
) {
2361 case ASCE_TYPE_REGION1
:
2362 index
= (r2
>> 53) & 0x7ff;
2364 case ASCE_TYPE_REGION2
:
2365 index
= (r2
>> 42) & 0x7ff;
2367 case ASCE_TYPE_REGION3
:
2368 index
= (r2
>> 31) & 0x7ff;
2370 case ASCE_TYPE_SEGMENT
:
2371 index
= (r2
>> 20) & 0x7ff;
2374 for (i
= 0; i
< entries
; i
++) {
2375 /* addresses are not wrapped in 24/31bit mode but table index is */
2376 raddr
= table
+ ((index
+ i
) & 0x7ff) * sizeof(entry
);
2377 entry
= cpu_ldq_mmuidx_ra(env
, raddr
, MMU_REAL_IDX
, ra
);
2378 if (!(entry
& REGION_ENTRY_I
)) {
2379 /* we are allowed to not store if already invalid */
2380 entry
|= REGION_ENTRY_I
;
2381 cpu_stq_mmuidx_ra(env
, raddr
, entry
, MMU_REAL_IDX
, ra
);
2386 /* We simply flush the complete tlb, therefore we can ignore r3. */
2390 tlb_flush_all_cpus_synced(cs
);
2394 /* invalidate pte */
2395 void HELPER(ipte
)(CPUS390XState
*env
, uint64_t pto
, uint64_t vaddr
,
2398 CPUState
*cs
= env_cpu(env
);
2399 const uintptr_t ra
= GETPC();
2400 uint64_t page
= vaddr
& TARGET_PAGE_MASK
;
2401 uint64_t pte_addr
, pte
;
2403 /* Compute the page table entry address */
2404 pte_addr
= (pto
& SEGMENT_ENTRY_ORIGIN
);
2405 pte_addr
+= VADDR_PAGE_TX(vaddr
) * 8;
2407 /* Mark the page table entry as invalid */
2408 pte
= cpu_ldq_mmuidx_ra(env
, pte_addr
, MMU_REAL_IDX
, ra
);
2409 pte
|= PAGE_ENTRY_I
;
2410 cpu_stq_mmuidx_ra(env
, pte_addr
, pte
, MMU_REAL_IDX
, ra
);
2412 /* XXX we exploit the fact that Linux passes the exact virtual
2413 address here - it's not obliged to! */
2415 if (vaddr
& ~VADDR_PAGE_TX_MASK
) {
2416 tlb_flush_page(cs
, page
);
2417 /* XXX 31-bit hack */
2418 tlb_flush_page(cs
, page
^ 0x80000000);
2420 /* looks like we don't have a valid virtual address */
2424 if (vaddr
& ~VADDR_PAGE_TX_MASK
) {
2425 tlb_flush_page_all_cpus_synced(cs
, page
);
2426 /* XXX 31-bit hack */
2427 tlb_flush_page_all_cpus_synced(cs
, page
^ 0x80000000);
2429 /* looks like we don't have a valid virtual address */
2430 tlb_flush_all_cpus_synced(cs
);
2435 /* flush local tlb */
2436 void HELPER(ptlb
)(CPUS390XState
*env
)
2438 tlb_flush(env_cpu(env
));
2441 /* flush global tlb */
2442 void HELPER(purge
)(CPUS390XState
*env
)
2444 tlb_flush_all_cpus_synced(env_cpu(env
));
2447 /* load real address */
2448 uint64_t HELPER(lra
)(CPUS390XState
*env
, uint64_t addr
)
2450 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
2454 /* XXX incomplete - has more corner cases */
2455 if (!(env
->psw
.mask
& PSW_MASK_64
) && (addr
>> 32)) {
2456 tcg_s390_program_interrupt(env
, PGM_SPECIAL_OP
, GETPC());
2459 exc
= mmu_translate(env
, addr
, MMU_S390_LRA
, asc
, &ret
, &flags
, &tec
);
2462 ret
= exc
| 0x80000000;
2465 ret
|= addr
& ~TARGET_PAGE_MASK
;
2473 /* load pair from quadword */
2474 uint64_t HELPER(lpq
)(CPUS390XState
*env
, uint64_t addr
)
2476 uintptr_t ra
= GETPC();
2479 check_alignment(env
, addr
, 16, ra
);
2480 hi
= cpu_ldq_data_ra(env
, addr
+ 0, ra
);
2481 lo
= cpu_ldq_data_ra(env
, addr
+ 8, ra
);
2487 uint64_t HELPER(lpq_parallel
)(CPUS390XState
*env
, uint64_t addr
)
2489 uintptr_t ra
= GETPC();
2495 assert(HAVE_ATOMIC128
);
2497 mem_idx
= cpu_mmu_index(env
, false);
2498 oi
= make_memop_idx(MO_TEUQ
| MO_ALIGN_16
, mem_idx
);
2499 v
= cpu_atomic_ldo_be_mmu(env
, addr
, oi
, ra
);
2500 hi
= int128_gethi(v
);
2501 lo
= int128_getlo(v
);
2507 /* store pair to quadword */
2508 void HELPER(stpq
)(CPUS390XState
*env
, uint64_t addr
,
2509 uint64_t low
, uint64_t high
)
2511 uintptr_t ra
= GETPC();
2513 check_alignment(env
, addr
, 16, ra
);
2514 cpu_stq_data_ra(env
, addr
+ 0, high
, ra
);
2515 cpu_stq_data_ra(env
, addr
+ 8, low
, ra
);
2518 void HELPER(stpq_parallel
)(CPUS390XState
*env
, uint64_t addr
,
2519 uint64_t low
, uint64_t high
)
2521 uintptr_t ra
= GETPC();
2526 assert(HAVE_ATOMIC128
);
2528 mem_idx
= cpu_mmu_index(env
, false);
2529 oi
= make_memop_idx(MO_TEUQ
| MO_ALIGN_16
, mem_idx
);
2530 v
= int128_make128(low
, high
);
2531 cpu_atomic_sto_be_mmu(env
, addr
, v
, oi
, ra
);
2534 /* Execute instruction. This instruction executes an insn modified with
2535 the contents of r1. It does not change the executed instruction in memory;
2536 it does not change the program counter.
2538 Perform this by recording the modified instruction in env->ex_value.
2539 This will be noticed by cpu_get_tb_cpu_state and thus tb translation.
2541 void HELPER(ex
)(CPUS390XState
*env
, uint32_t ilen
, uint64_t r1
, uint64_t addr
)
2543 uint64_t insn
= cpu_lduw_code(env
, addr
);
2544 uint8_t opc
= insn
>> 8;
2546 /* Or in the contents of R1[56:63]. */
2549 /* Load the rest of the instruction. */
2551 switch (get_ilen(opc
)) {
2555 insn
|= (uint64_t)cpu_lduw_code(env
, addr
+ 2) << 32;
2558 insn
|= (uint64_t)(uint32_t)cpu_ldl_code(env
, addr
+ 2) << 16;
2561 g_assert_not_reached();
2564 /* The very most common cases can be sped up by avoiding a new TB. */
2565 if ((opc
& 0xf0) == 0xd0) {
2566 typedef uint32_t (*dx_helper
)(CPUS390XState
*, uint32_t, uint64_t,
2567 uint64_t, uintptr_t);
2568 static const dx_helper dx
[16] = {
2569 [0x0] = do_helper_trt_bkwd
,
2570 [0x2] = do_helper_mvc
,
2571 [0x4] = do_helper_nc
,
2572 [0x5] = do_helper_clc
,
2573 [0x6] = do_helper_oc
,
2574 [0x7] = do_helper_xc
,
2575 [0xc] = do_helper_tr
,
2576 [0xd] = do_helper_trt_fwd
,
2578 dx_helper helper
= dx
[opc
& 0xf];
2581 uint32_t l
= extract64(insn
, 48, 8);
2582 uint32_t b1
= extract64(insn
, 44, 4);
2583 uint32_t d1
= extract64(insn
, 32, 12);
2584 uint32_t b2
= extract64(insn
, 28, 4);
2585 uint32_t d2
= extract64(insn
, 16, 12);
2586 uint64_t a1
= wrap_address(env
, (b1
? env
->regs
[b1
] : 0) + d1
);
2587 uint64_t a2
= wrap_address(env
, (b2
? env
->regs
[b2
] : 0) + d2
);
2589 env
->cc_op
= helper(env
, l
, a1
, a2
, 0);
2590 env
->psw
.addr
+= ilen
;
2593 } else if (opc
== 0x0a) {
2594 env
->int_svc_code
= extract64(insn
, 48, 8);
2595 env
->int_svc_ilen
= ilen
;
2596 helper_exception(env
, EXCP_SVC
);
2597 g_assert_not_reached();
2600 /* Record the insn we want to execute as well as the ilen to use
2601 during the execution of the target insn. This will also ensure
2602 that ex_value is non-zero, which flags that we are in a state
2603 that requires such execution. */
2604 env
->ex_value
= insn
| ilen
;
2607 uint32_t HELPER(mvcos
)(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
2610 const uint8_t psw_key
= (env
->psw
.mask
& PSW_MASK_KEY
) >> PSW_SHIFT_KEY
;
2611 const uint8_t psw_as
= (env
->psw
.mask
& PSW_MASK_ASC
) >> PSW_SHIFT_ASC
;
2612 const uint64_t r0
= env
->regs
[0];
2613 const uintptr_t ra
= GETPC();
2614 uint8_t dest_key
, dest_as
, dest_k
, dest_a
;
2615 uint8_t src_key
, src_as
, src_k
, src_a
;
2619 HELPER_LOG("%s dest %" PRIx64
", src %" PRIx64
", len %" PRIx64
"\n",
2620 __func__
, dest
, src
, len
);
2622 if (!(env
->psw
.mask
& PSW_MASK_DAT
)) {
2623 tcg_s390_program_interrupt(env
, PGM_SPECIAL_OP
, ra
);
2626 /* OAC (operand access control) for the first operand -> dest */
2627 val
= (r0
& 0xffff0000ULL
) >> 16;
2628 dest_key
= (val
>> 12) & 0xf;
2629 dest_as
= (val
>> 6) & 0x3;
2630 dest_k
= (val
>> 1) & 0x1;
2633 /* OAC (operand access control) for the second operand -> src */
2634 val
= (r0
& 0x0000ffffULL
);
2635 src_key
= (val
>> 12) & 0xf;
2636 src_as
= (val
>> 6) & 0x3;
2637 src_k
= (val
>> 1) & 0x1;
2653 if (dest_a
&& dest_as
== AS_HOME
&& (env
->psw
.mask
& PSW_MASK_PSTATE
)) {
2654 tcg_s390_program_interrupt(env
, PGM_SPECIAL_OP
, ra
);
2656 if (!(env
->cregs
[0] & CR0_SECONDARY
) &&
2657 (dest_as
== AS_SECONDARY
|| src_as
== AS_SECONDARY
)) {
2658 tcg_s390_program_interrupt(env
, PGM_SPECIAL_OP
, ra
);
2660 if (!psw_key_valid(env
, dest_key
) || !psw_key_valid(env
, src_key
)) {
2661 tcg_s390_program_interrupt(env
, PGM_PRIVILEGED
, ra
);
2664 len
= wrap_length32(env
, len
);
2670 /* FIXME: AR-mode and proper problem state mode (using PSW keys) missing */
2671 if (src_as
== AS_ACCREG
|| dest_as
== AS_ACCREG
||
2672 (env
->psw
.mask
& PSW_MASK_PSTATE
)) {
2673 qemu_log_mask(LOG_UNIMP
, "%s: AR-mode and PSTATE support missing\n",
2675 tcg_s390_program_interrupt(env
, PGM_ADDRESSING
, ra
);
2678 /* FIXME: Access using correct keys and AR-mode */
2680 S390Access srca
= access_prepare(env
, src
, len
, MMU_DATA_LOAD
,
2681 mmu_idx_from_as(src_as
), ra
);
2682 S390Access desta
= access_prepare(env
, dest
, len
, MMU_DATA_STORE
,
2683 mmu_idx_from_as(dest_as
), ra
);
2685 access_memmove(env
, &desta
, &srca
, ra
);
2691 /* Decode a Unicode character. A return value < 0 indicates success, storing
2692 the UTF-32 result into OCHAR and the input length into OLEN. A return
2693 value >= 0 indicates failure, and the CC value to be returned. */
2694 typedef int (*decode_unicode_fn
)(CPUS390XState
*env
, uint64_t addr
,
2695 uint64_t ilen
, bool enh_check
, uintptr_t ra
,
2696 uint32_t *ochar
, uint32_t *olen
);
2698 /* Encode a Unicode character. A return value < 0 indicates success, storing
2699 the bytes into ADDR and the output length into OLEN. A return value >= 0
2700 indicates failure, and the CC value to be returned. */
2701 typedef int (*encode_unicode_fn
)(CPUS390XState
*env
, uint64_t addr
,
2702 uint64_t ilen
, uintptr_t ra
, uint32_t c
,
2705 static int decode_utf8(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2706 bool enh_check
, uintptr_t ra
,
2707 uint32_t *ochar
, uint32_t *olen
)
2709 uint8_t s0
, s1
, s2
, s3
;
2715 s0
= cpu_ldub_data_ra(env
, addr
, ra
);
2717 /* one byte character */
2720 } else if (s0
<= (enh_check
? 0xc1 : 0xbf)) {
2721 /* invalid character */
2723 } else if (s0
<= 0xdf) {
2724 /* two byte character */
2729 s1
= cpu_ldub_data_ra(env
, addr
+ 1, ra
);
2731 c
= (c
<< 6) | (s1
& 0x3f);
2732 if (enh_check
&& (s1
& 0xc0) != 0x80) {
2735 } else if (s0
<= 0xef) {
2736 /* three byte character */
2741 s1
= cpu_ldub_data_ra(env
, addr
+ 1, ra
);
2742 s2
= cpu_ldub_data_ra(env
, addr
+ 2, ra
);
2744 c
= (c
<< 6) | (s1
& 0x3f);
2745 c
= (c
<< 6) | (s2
& 0x3f);
2746 /* Fold the byte-by-byte range descriptions in the PoO into
2747 tests against the complete value. It disallows encodings
2748 that could be smaller, and the UTF-16 surrogates. */
2750 && ((s1
& 0xc0) != 0x80
2751 || (s2
& 0xc0) != 0x80
2753 || (c
>= 0xd800 && c
<= 0xdfff))) {
2756 } else if (s0
<= (enh_check
? 0xf4 : 0xf7)) {
2757 /* four byte character */
2762 s1
= cpu_ldub_data_ra(env
, addr
+ 1, ra
);
2763 s2
= cpu_ldub_data_ra(env
, addr
+ 2, ra
);
2764 s3
= cpu_ldub_data_ra(env
, addr
+ 3, ra
);
2766 c
= (c
<< 6) | (s1
& 0x3f);
2767 c
= (c
<< 6) | (s2
& 0x3f);
2768 c
= (c
<< 6) | (s3
& 0x3f);
2771 && ((s1
& 0xc0) != 0x80
2772 || (s2
& 0xc0) != 0x80
2773 || (s3
& 0xc0) != 0x80
2779 /* invalid character */
2788 static int decode_utf16(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2789 bool enh_check
, uintptr_t ra
,
2790 uint32_t *ochar
, uint32_t *olen
)
2798 s0
= cpu_lduw_data_ra(env
, addr
, ra
);
2799 if ((s0
& 0xfc00) != 0xd800) {
2800 /* one word character */
2804 /* two word character */
2809 s1
= cpu_lduw_data_ra(env
, addr
+ 2, ra
);
2810 c
= extract32(s0
, 6, 4) + 1;
2811 c
= (c
<< 6) | (s0
& 0x3f);
2812 c
= (c
<< 10) | (s1
& 0x3ff);
2813 if (enh_check
&& (s1
& 0xfc00) != 0xdc00) {
2814 /* invalid surrogate character */
2824 static int decode_utf32(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2825 bool enh_check
, uintptr_t ra
,
2826 uint32_t *ochar
, uint32_t *olen
)
2833 c
= cpu_ldl_data_ra(env
, addr
, ra
);
2834 if ((c
>= 0xd800 && c
<= 0xdbff) || c
> 0x10ffff) {
2835 /* invalid unicode character */
2844 static int encode_utf8(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2845 uintptr_t ra
, uint32_t c
, uint32_t *olen
)
2851 /* one byte character */
2854 } else if (c
<= 0x7ff) {
2855 /* two byte character */
2857 d
[1] = 0x80 | extract32(c
, 0, 6);
2858 d
[0] = 0xc0 | extract32(c
, 6, 5);
2859 } else if (c
<= 0xffff) {
2860 /* three byte character */
2862 d
[2] = 0x80 | extract32(c
, 0, 6);
2863 d
[1] = 0x80 | extract32(c
, 6, 6);
2864 d
[0] = 0xe0 | extract32(c
, 12, 4);
2866 /* four byte character */
2868 d
[3] = 0x80 | extract32(c
, 0, 6);
2869 d
[2] = 0x80 | extract32(c
, 6, 6);
2870 d
[1] = 0x80 | extract32(c
, 12, 6);
2871 d
[0] = 0xf0 | extract32(c
, 18, 3);
2877 for (i
= 0; i
< l
; ++i
) {
2878 cpu_stb_data_ra(env
, addr
+ i
, d
[i
], ra
);
2885 static int encode_utf16(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2886 uintptr_t ra
, uint32_t c
, uint32_t *olen
)
2891 /* one word character */
2895 cpu_stw_data_ra(env
, addr
, c
, ra
);
2898 /* two word character */
2902 d1
= 0xdc00 | extract32(c
, 0, 10);
2903 d0
= 0xd800 | extract32(c
, 10, 6);
2904 d0
= deposit32(d0
, 6, 4, extract32(c
, 16, 5) - 1);
2905 cpu_stw_data_ra(env
, addr
+ 0, d0
, ra
);
2906 cpu_stw_data_ra(env
, addr
+ 2, d1
, ra
);
2913 static int encode_utf32(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2914 uintptr_t ra
, uint32_t c
, uint32_t *olen
)
2919 cpu_stl_data_ra(env
, addr
, c
, ra
);
2924 static inline uint32_t convert_unicode(CPUS390XState
*env
, uint32_t r1
,
2925 uint32_t r2
, uint32_t m3
, uintptr_t ra
,
2926 decode_unicode_fn decode
,
2927 encode_unicode_fn encode
)
2929 uint64_t dst
= get_address(env
, r1
);
2930 uint64_t dlen
= get_length(env
, r1
+ 1);
2931 uint64_t src
= get_address(env
, r2
);
2932 uint64_t slen
= get_length(env
, r2
+ 1);
2933 bool enh_check
= m3
& 1;
2936 /* Lest we fail to service interrupts in a timely manner, limit the
2937 amount of work we're willing to do. For now, let's cap at 256. */
2938 for (i
= 0; i
< 256; ++i
) {
2939 uint32_t c
, ilen
, olen
;
2941 cc
= decode(env
, src
, slen
, enh_check
, ra
, &c
, &ilen
);
2942 if (unlikely(cc
>= 0)) {
2945 cc
= encode(env
, dst
, dlen
, ra
, c
, &olen
);
2946 if (unlikely(cc
>= 0)) {
2957 set_address(env
, r1
, dst
);
2958 set_length(env
, r1
+ 1, dlen
);
2959 set_address(env
, r2
, src
);
2960 set_length(env
, r2
+ 1, slen
);
2965 uint32_t HELPER(cu12
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2967 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2968 decode_utf8
, encode_utf16
);
2971 uint32_t HELPER(cu14
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2973 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2974 decode_utf8
, encode_utf32
);
2977 uint32_t HELPER(cu21
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2979 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2980 decode_utf16
, encode_utf8
);
2983 uint32_t HELPER(cu24
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2985 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2986 decode_utf16
, encode_utf32
);
2989 uint32_t HELPER(cu41
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2991 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2992 decode_utf32
, encode_utf8
);
2995 uint32_t HELPER(cu42
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2997 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2998 decode_utf32
, encode_utf16
);
3001 void probe_write_access(CPUS390XState
*env
, uint64_t addr
, uint64_t len
,
3004 /* test the actual access, not just any access to the page due to LAP */
3006 const uint64_t pagelen
= -(addr
| TARGET_PAGE_MASK
);
3007 const uint64_t curlen
= MIN(pagelen
, len
);
3009 probe_write(env
, addr
, curlen
, cpu_mmu_index(env
, false), ra
);
3010 addr
= wrap_address(env
, addr
+ curlen
);
3015 void HELPER(probe_write_access
)(CPUS390XState
*env
, uint64_t addr
, uint64_t len
)
3017 probe_write_access(env
, addr
, len
, GETPC());