2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "exec/address-spaces.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "qemu/int128.h"
30 #if !defined(CONFIG_USER_ONLY)
31 #include "hw/s390x/storage-keys.h"
34 /*****************************************************************************/
36 #if !defined(CONFIG_USER_ONLY)
38 /* try to fill the TLB and return an exception if error. If retaddr is
39 NULL, it means that the function was called in C code (i.e. not
40 from generated code or from helper.c) */
41 /* XXX: fix it to restore all registers */
42 void tlb_fill(CPUState
*cs
, target_ulong addr
, int size
,
43 MMUAccessType access_type
, int mmu_idx
, uintptr_t retaddr
)
45 int ret
= s390_cpu_handle_mmu_fault(cs
, addr
, size
, access_type
, mmu_idx
);
46 if (unlikely(ret
!= 0)) {
47 cpu_loop_exit_restore(cs
, retaddr
);
53 /* #define DEBUG_HELPER */
55 #define HELPER_LOG(x...) qemu_log(x)
57 #define HELPER_LOG(x...)
60 static inline bool psw_key_valid(CPUS390XState
*env
, uint8_t psw_key
)
62 uint16_t pkm
= env
->cregs
[3] >> 16;
64 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
65 /* PSW key has range 0..15, it is valid if the bit is 1 in the PKM */
66 return pkm
& (0x80 >> psw_key
);
71 /* Reduce the length so that addr + len doesn't cross a page boundary. */
72 static inline uint32_t adj_len_to_page(uint32_t len
, uint64_t addr
)
74 #ifndef CONFIG_USER_ONLY
75 if ((addr
& ~TARGET_PAGE_MASK
) + len
- 1 >= TARGET_PAGE_SIZE
) {
76 return -(addr
| TARGET_PAGE_MASK
);
82 /* Trigger a SPECIFICATION exception if an address or a length is not
84 static inline void check_alignment(CPUS390XState
*env
, uint64_t v
,
85 int wordsize
, uintptr_t ra
)
88 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
92 /* Load a value from memory according to its size. */
93 static inline uint64_t cpu_ldusize_data_ra(CPUS390XState
*env
, uint64_t addr
,
94 int wordsize
, uintptr_t ra
)
98 return cpu_ldub_data_ra(env
, addr
, ra
);
100 return cpu_lduw_data_ra(env
, addr
, ra
);
106 /* Store a to memory according to its size. */
107 static inline void cpu_stsize_data_ra(CPUS390XState
*env
, uint64_t addr
,
108 uint64_t value
, int wordsize
,
113 cpu_stb_data_ra(env
, addr
, value
, ra
);
116 cpu_stw_data_ra(env
, addr
, value
, ra
);
123 static void fast_memset(CPUS390XState
*env
, uint64_t dest
, uint8_t byte
,
124 uint32_t l
, uintptr_t ra
)
126 int mmu_idx
= cpu_mmu_index(env
, false);
129 void *p
= tlb_vaddr_to_host(env
, dest
, MMU_DATA_STORE
, mmu_idx
);
131 /* Access to the whole page in write mode granted. */
132 uint32_t l_adj
= adj_len_to_page(l
, dest
);
133 memset(p
, byte
, l_adj
);
137 /* We failed to get access to the whole page. The next write
138 access will likely fill the QEMU TLB for the next iteration. */
139 cpu_stb_data_ra(env
, dest
, byte
, ra
);
146 #ifndef CONFIG_USER_ONLY
147 static void fast_memmove_idx(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
148 uint32_t len
, int dest_idx
, int src_idx
,
151 TCGMemOpIdx oi_dest
= make_memop_idx(MO_UB
, dest_idx
);
152 TCGMemOpIdx oi_src
= make_memop_idx(MO_UB
, src_idx
);
159 src
= wrap_address(env
, src
);
160 dest
= wrap_address(env
, dest
);
161 src_p
= tlb_vaddr_to_host(env
, src
, MMU_DATA_LOAD
, src_idx
);
162 dest_p
= tlb_vaddr_to_host(env
, dest
, MMU_DATA_STORE
, dest_idx
);
164 if (src_p
&& dest_p
) {
165 /* Access to both whole pages granted. */
166 len_adj
= adj_len_to_page(adj_len_to_page(len
, src
), dest
);
167 memmove(dest_p
, src_p
, len_adj
);
169 /* We failed to get access to one or both whole pages. The next
170 read or write access will likely fill the QEMU TLB for the
173 x
= helper_ret_ldub_mmu(env
, src
, oi_src
, ra
);
174 helper_ret_stb_mmu(env
, dest
, x
, oi_dest
, ra
);
182 static int mmu_idx_from_as(uint8_t as
)
186 return MMU_PRIMARY_IDX
;
188 return MMU_SECONDARY_IDX
;
192 /* FIXME AS_ACCREG */
193 g_assert_not_reached();
197 static void fast_memmove_as(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
198 uint32_t len
, uint8_t dest_as
, uint8_t src_as
,
201 int src_idx
= mmu_idx_from_as(src_as
);
202 int dest_idx
= mmu_idx_from_as(dest_as
);
204 fast_memmove_idx(env
, dest
, src
, len
, dest_idx
, src_idx
, ra
);
208 static void fast_memmove(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
209 uint32_t l
, uintptr_t ra
)
211 int mmu_idx
= cpu_mmu_index(env
, false);
214 void *src_p
= tlb_vaddr_to_host(env
, src
, MMU_DATA_LOAD
, mmu_idx
);
215 void *dest_p
= tlb_vaddr_to_host(env
, dest
, MMU_DATA_STORE
, mmu_idx
);
216 if (src_p
&& dest_p
) {
217 /* Access to both whole pages granted. */
218 uint32_t l_adj
= adj_len_to_page(l
, src
);
219 l_adj
= adj_len_to_page(l_adj
, dest
);
220 memmove(dest_p
, src_p
, l_adj
);
225 /* We failed to get access to one or both whole pages. The next
226 read or write access will likely fill the QEMU TLB for the
228 cpu_stb_data_ra(env
, dest
, cpu_ldub_data_ra(env
, src
, ra
), ra
);
237 static uint32_t do_helper_nc(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
238 uint64_t src
, uintptr_t ra
)
243 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
244 __func__
, l
, dest
, src
);
246 for (i
= 0; i
<= l
; i
++) {
247 uint8_t x
= cpu_ldub_data_ra(env
, src
+ i
, ra
);
248 x
&= cpu_ldub_data_ra(env
, dest
+ i
, ra
);
250 cpu_stb_data_ra(env
, dest
+ i
, x
, ra
);
255 uint32_t HELPER(nc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
258 return do_helper_nc(env
, l
, dest
, src
, GETPC());
262 static uint32_t do_helper_xc(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
263 uint64_t src
, uintptr_t ra
)
268 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
269 __func__
, l
, dest
, src
);
271 /* xor with itself is the same as memset(0) */
273 fast_memset(env
, dest
, 0, l
+ 1, ra
);
277 for (i
= 0; i
<= l
; i
++) {
278 uint8_t x
= cpu_ldub_data_ra(env
, src
+ i
, ra
);
279 x
^= cpu_ldub_data_ra(env
, dest
+ i
, ra
);
281 cpu_stb_data_ra(env
, dest
+ i
, x
, ra
);
286 uint32_t HELPER(xc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
289 return do_helper_xc(env
, l
, dest
, src
, GETPC());
293 static uint32_t do_helper_oc(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
294 uint64_t src
, uintptr_t ra
)
299 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
300 __func__
, l
, dest
, src
);
302 for (i
= 0; i
<= l
; i
++) {
303 uint8_t x
= cpu_ldub_data_ra(env
, src
+ i
, ra
);
304 x
|= cpu_ldub_data_ra(env
, dest
+ i
, ra
);
306 cpu_stb_data_ra(env
, dest
+ i
, x
, ra
);
311 uint32_t HELPER(oc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
314 return do_helper_oc(env
, l
, dest
, src
, GETPC());
318 static uint32_t do_helper_mvc(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
319 uint64_t src
, uintptr_t ra
)
323 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
324 __func__
, l
, dest
, src
);
326 /* mvc and memmove do not behave the same when areas overlap! */
327 /* mvc with source pointing to the byte after the destination is the
328 same as memset with the first source byte */
329 if (dest
== src
+ 1) {
330 fast_memset(env
, dest
, cpu_ldub_data_ra(env
, src
, ra
), l
+ 1, ra
);
331 } else if (dest
< src
|| src
+ l
< dest
) {
332 fast_memmove(env
, dest
, src
, l
+ 1, ra
);
334 /* slow version with byte accesses which always work */
335 for (i
= 0; i
<= l
; i
++) {
336 uint8_t x
= cpu_ldub_data_ra(env
, src
+ i
, ra
);
337 cpu_stb_data_ra(env
, dest
+ i
, x
, ra
);
344 void HELPER(mvc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
346 do_helper_mvc(env
, l
, dest
, src
, GETPC());
350 void HELPER(mvcin
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
352 uintptr_t ra
= GETPC();
355 for (i
= 0; i
<= l
; i
++) {
356 uint8_t v
= cpu_ldub_data_ra(env
, src
- i
, ra
);
357 cpu_stb_data_ra(env
, dest
+ i
, v
, ra
);
362 void HELPER(mvn
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
364 uintptr_t ra
= GETPC();
367 for (i
= 0; i
<= l
; i
++) {
368 uint8_t v
= cpu_ldub_data_ra(env
, dest
+ i
, ra
) & 0xf0;
369 v
|= cpu_ldub_data_ra(env
, src
+ i
, ra
) & 0x0f;
370 cpu_stb_data_ra(env
, dest
+ i
, v
, ra
);
374 /* move with offset */
375 void HELPER(mvo
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
377 uintptr_t ra
= GETPC();
378 int len_dest
= l
>> 4;
379 int len_src
= l
& 0xf;
380 uint8_t byte_dest
, byte_src
;
386 /* Handle rightmost byte */
387 byte_src
= cpu_ldub_data_ra(env
, src
, ra
);
388 byte_dest
= cpu_ldub_data_ra(env
, dest
, ra
);
389 byte_dest
= (byte_dest
& 0x0f) | (byte_src
<< 4);
390 cpu_stb_data_ra(env
, dest
, byte_dest
, ra
);
392 /* Process remaining bytes from right to left */
393 for (i
= 1; i
<= len_dest
; i
++) {
394 byte_dest
= byte_src
>> 4;
395 if (len_src
- i
>= 0) {
396 byte_src
= cpu_ldub_data_ra(env
, src
- i
, ra
);
400 byte_dest
|= byte_src
<< 4;
401 cpu_stb_data_ra(env
, dest
- i
, byte_dest
, ra
);
406 void HELPER(mvz
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
408 uintptr_t ra
= GETPC();
411 for (i
= 0; i
<= l
; i
++) {
412 uint8_t b
= cpu_ldub_data_ra(env
, dest
+ i
, ra
) & 0x0f;
413 b
|= cpu_ldub_data_ra(env
, src
+ i
, ra
) & 0xf0;
414 cpu_stb_data_ra(env
, dest
+ i
, b
, ra
);
418 /* compare unsigned byte arrays */
419 static uint32_t do_helper_clc(CPUS390XState
*env
, uint32_t l
, uint64_t s1
,
420 uint64_t s2
, uintptr_t ra
)
425 HELPER_LOG("%s l %d s1 %" PRIx64
" s2 %" PRIx64
"\n",
426 __func__
, l
, s1
, s2
);
428 for (i
= 0; i
<= l
; i
++) {
429 uint8_t x
= cpu_ldub_data_ra(env
, s1
+ i
, ra
);
430 uint8_t y
= cpu_ldub_data_ra(env
, s2
+ i
, ra
);
431 HELPER_LOG("%02x (%c)/%02x (%c) ", x
, x
, y
, y
);
445 uint32_t HELPER(clc
)(CPUS390XState
*env
, uint32_t l
, uint64_t s1
, uint64_t s2
)
447 return do_helper_clc(env
, l
, s1
, s2
, GETPC());
450 /* compare logical under mask */
451 uint32_t HELPER(clm
)(CPUS390XState
*env
, uint32_t r1
, uint32_t mask
,
454 uintptr_t ra
= GETPC();
457 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64
"\n", __func__
, r1
,
462 uint8_t d
= cpu_ldub_data_ra(env
, addr
, ra
);
463 uint8_t r
= extract32(r1
, 24, 8);
464 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64
") ", mask
, r
, d
,
475 mask
= (mask
<< 1) & 0xf;
483 static inline uint64_t get_address(CPUS390XState
*env
, int reg
)
485 return wrap_address(env
, env
->regs
[reg
]);
488 static inline void set_address(CPUS390XState
*env
, int reg
, uint64_t address
)
490 if (env
->psw
.mask
& PSW_MASK_64
) {
492 env
->regs
[reg
] = address
;
494 if (!(env
->psw
.mask
& PSW_MASK_32
)) {
495 /* 24-Bit mode. According to the PoO it is implementation
496 dependent if bits 32-39 remain unchanged or are set to
497 zeros. Choose the former so that the function can also be
499 env
->regs
[reg
] = deposit64(env
->regs
[reg
], 0, 24, address
);
501 /* 31-Bit mode. According to the PoO it is implementation
502 dependent if bit 32 remains unchanged or is set to zero.
503 Choose the latter so that the function can also be used for
505 address
&= 0x7fffffff;
506 env
->regs
[reg
] = deposit64(env
->regs
[reg
], 0, 32, address
);
511 static inline uint64_t wrap_length(CPUS390XState
*env
, uint64_t length
)
513 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
514 /* 24-Bit and 31-Bit mode */
515 length
&= 0x7fffffff;
520 static inline uint64_t get_length(CPUS390XState
*env
, int reg
)
522 return wrap_length(env
, env
->regs
[reg
]);
525 static inline void set_length(CPUS390XState
*env
, int reg
, uint64_t length
)
527 if (env
->psw
.mask
& PSW_MASK_64
) {
529 env
->regs
[reg
] = length
;
531 /* 24-Bit and 31-Bit mode */
532 env
->regs
[reg
] = deposit64(env
->regs
[reg
], 0, 32, length
);
536 /* search string (c is byte to search, r2 is string, r1 end of string) */
537 void HELPER(srst
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
539 uintptr_t ra
= GETPC();
542 uint8_t v
, c
= env
->regs
[0];
544 /* Bits 32-55 must contain all 0. */
545 if (env
->regs
[0] & 0xffffff00u
) {
546 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
549 str
= get_address(env
, r2
);
550 end
= get_address(env
, r1
);
552 /* Lest we fail to service interrupts in a timely manner, limit the
553 amount of work we're willing to do. For now, let's cap at 8k. */
554 for (len
= 0; len
< 0x2000; ++len
) {
555 if (str
+ len
== end
) {
556 /* Character not found. R1 & R2 are unmodified. */
560 v
= cpu_ldub_data_ra(env
, str
+ len
, ra
);
562 /* Character found. Set R1 to the location; R2 is unmodified. */
564 set_address(env
, r1
, str
+ len
);
569 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
571 set_address(env
, r2
, str
+ len
);
574 void HELPER(srstu
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
576 uintptr_t ra
= GETPC();
578 uint16_t v
, c
= env
->regs
[0];
579 uint64_t end
, str
, adj_end
;
581 /* Bits 32-47 of R0 must be zero. */
582 if (env
->regs
[0] & 0xffff0000u
) {
583 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
586 str
= get_address(env
, r2
);
587 end
= get_address(env
, r1
);
589 /* If the LSB of the two addresses differ, use one extra byte. */
590 adj_end
= end
+ ((str
^ end
) & 1);
592 /* Lest we fail to service interrupts in a timely manner, limit the
593 amount of work we're willing to do. For now, let's cap at 8k. */
594 for (len
= 0; len
< 0x2000; len
+= 2) {
595 if (str
+ len
== adj_end
) {
596 /* End of input found. */
600 v
= cpu_lduw_data_ra(env
, str
+ len
, ra
);
602 /* Character found. Set R1 to the location; R2 is unmodified. */
604 set_address(env
, r1
, str
+ len
);
609 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
611 set_address(env
, r2
, str
+ len
);
614 /* unsigned string compare (c is string terminator) */
615 uint64_t HELPER(clst
)(CPUS390XState
*env
, uint64_t c
, uint64_t s1
, uint64_t s2
)
617 uintptr_t ra
= GETPC();
621 s1
= wrap_address(env
, s1
);
622 s2
= wrap_address(env
, s2
);
624 /* Lest we fail to service interrupts in a timely manner, limit the
625 amount of work we're willing to do. For now, let's cap at 8k. */
626 for (len
= 0; len
< 0x2000; ++len
) {
627 uint8_t v1
= cpu_ldub_data_ra(env
, s1
+ len
, ra
);
628 uint8_t v2
= cpu_ldub_data_ra(env
, s2
+ len
, ra
);
631 /* Equal. CC=0, and don't advance the registers. */
637 /* Unequal. CC={1,2}, and advance the registers. Note that
638 the terminator need not be zero, but the string that contains
639 the terminator is by definition "low". */
640 env
->cc_op
= (v1
== c
? 1 : v2
== c
? 2 : v1
< v2
? 1 : 2);
641 env
->retxl
= s2
+ len
;
646 /* CPU-determined bytes equal; advance the registers. */
648 env
->retxl
= s2
+ len
;
653 uint32_t HELPER(mvpg
)(CPUS390XState
*env
, uint64_t r0
, uint64_t r1
, uint64_t r2
)
655 /* ??? missing r0 handling, which includes access keys, but more
656 importantly optional suppression of the exception! */
657 fast_memmove(env
, r1
, r2
, TARGET_PAGE_SIZE
, GETPC());
658 return 0; /* data moved */
661 /* string copy (c is string terminator) */
662 uint64_t HELPER(mvst
)(CPUS390XState
*env
, uint64_t c
, uint64_t d
, uint64_t s
)
664 uintptr_t ra
= GETPC();
668 d
= wrap_address(env
, d
);
669 s
= wrap_address(env
, s
);
671 /* Lest we fail to service interrupts in a timely manner, limit the
672 amount of work we're willing to do. For now, let's cap at 8k. */
673 for (len
= 0; len
< 0x2000; ++len
) {
674 uint8_t v
= cpu_ldub_data_ra(env
, s
+ len
, ra
);
675 cpu_stb_data_ra(env
, d
+ len
, v
, ra
);
677 /* Complete. Set CC=1 and advance R1. */
684 /* Incomplete. Set CC=3 and signal to advance R1 and R2. */
686 env
->retxl
= s
+ len
;
690 /* load access registers r1 to r3 from memory at a2 */
691 void HELPER(lam
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
693 uintptr_t ra
= GETPC();
697 /* we either came here by lam or lamy, which have different lengths */
698 s390_program_interrupt(env
, PGM_SPECIFICATION
, ILEN_AUTO
, ra
);
701 for (i
= r1
;; i
= (i
+ 1) % 16) {
702 env
->aregs
[i
] = cpu_ldl_data_ra(env
, a2
, ra
);
711 /* store access registers r1 to r3 in memory at a2 */
712 void HELPER(stam
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
714 uintptr_t ra
= GETPC();
718 s390_program_interrupt(env
, PGM_SPECIFICATION
, 4, ra
);
721 for (i
= r1
;; i
= (i
+ 1) % 16) {
722 cpu_stl_data_ra(env
, a2
, env
->aregs
[i
], ra
);
731 /* move long helper */
732 static inline uint32_t do_mvcl(CPUS390XState
*env
,
733 uint64_t *dest
, uint64_t *destlen
,
734 uint64_t *src
, uint64_t *srclen
,
735 uint16_t pad
, int wordsize
, uintptr_t ra
)
737 uint64_t len
= MIN(*srclen
, *destlen
);
740 if (*destlen
== *srclen
) {
742 } else if (*destlen
< *srclen
) {
748 /* Copy the src array */
749 fast_memmove(env
, *dest
, *src
, len
, ra
);
755 /* Pad the remaining area */
757 fast_memset(env
, *dest
, pad
, *destlen
, ra
);
761 /* If remaining length is odd, pad with odd byte first. */
763 cpu_stb_data_ra(env
, *dest
, pad
& 0xff, ra
);
767 /* The remaining length is even, pad using words. */
768 for (; *destlen
; *dest
+= 2, *destlen
-= 2) {
769 cpu_stw_data_ra(env
, *dest
, pad
, ra
);
777 uint32_t HELPER(mvcl
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
779 uintptr_t ra
= GETPC();
780 uint64_t destlen
= env
->regs
[r1
+ 1] & 0xffffff;
781 uint64_t dest
= get_address(env
, r1
);
782 uint64_t srclen
= env
->regs
[r2
+ 1] & 0xffffff;
783 uint64_t src
= get_address(env
, r2
);
784 uint8_t pad
= env
->regs
[r2
+ 1] >> 24;
787 cc
= do_mvcl(env
, &dest
, &destlen
, &src
, &srclen
, pad
, 1, ra
);
789 env
->regs
[r1
+ 1] = deposit64(env
->regs
[r1
+ 1], 0, 24, destlen
);
790 env
->regs
[r2
+ 1] = deposit64(env
->regs
[r2
+ 1], 0, 24, srclen
);
791 set_address(env
, r1
, dest
);
792 set_address(env
, r2
, src
);
797 /* move long extended */
798 uint32_t HELPER(mvcle
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
801 uintptr_t ra
= GETPC();
802 uint64_t destlen
= get_length(env
, r1
+ 1);
803 uint64_t dest
= get_address(env
, r1
);
804 uint64_t srclen
= get_length(env
, r3
+ 1);
805 uint64_t src
= get_address(env
, r3
);
809 cc
= do_mvcl(env
, &dest
, &destlen
, &src
, &srclen
, pad
, 1, ra
);
811 set_length(env
, r1
+ 1, destlen
);
812 set_length(env
, r3
+ 1, srclen
);
813 set_address(env
, r1
, dest
);
814 set_address(env
, r3
, src
);
819 /* move long unicode */
820 uint32_t HELPER(mvclu
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
823 uintptr_t ra
= GETPC();
824 uint64_t destlen
= get_length(env
, r1
+ 1);
825 uint64_t dest
= get_address(env
, r1
);
826 uint64_t srclen
= get_length(env
, r3
+ 1);
827 uint64_t src
= get_address(env
, r3
);
831 cc
= do_mvcl(env
, &dest
, &destlen
, &src
, &srclen
, pad
, 2, ra
);
833 set_length(env
, r1
+ 1, destlen
);
834 set_length(env
, r3
+ 1, srclen
);
835 set_address(env
, r1
, dest
);
836 set_address(env
, r3
, src
);
841 /* compare logical long helper */
842 static inline uint32_t do_clcl(CPUS390XState
*env
,
843 uint64_t *src1
, uint64_t *src1len
,
844 uint64_t *src3
, uint64_t *src3len
,
845 uint16_t pad
, uint64_t limit
,
846 int wordsize
, uintptr_t ra
)
848 uint64_t len
= MAX(*src1len
, *src3len
);
851 check_alignment(env
, *src1len
| *src3len
, wordsize
, ra
);
857 /* Lest we fail to service interrupts in a timely manner, limit the
858 amount of work we're willing to do. */
864 for (; len
; len
-= wordsize
) {
869 v1
= cpu_ldusize_data_ra(env
, *src1
, wordsize
, ra
);
872 v3
= cpu_ldusize_data_ra(env
, *src3
, wordsize
, ra
);
876 cc
= (v1
< v3
) ? 1 : 2;
882 *src1len
-= wordsize
;
886 *src3len
-= wordsize
;
894 /* compare logical long */
895 uint32_t HELPER(clcl
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
897 uintptr_t ra
= GETPC();
898 uint64_t src1len
= extract64(env
->regs
[r1
+ 1], 0, 24);
899 uint64_t src1
= get_address(env
, r1
);
900 uint64_t src3len
= extract64(env
->regs
[r2
+ 1], 0, 24);
901 uint64_t src3
= get_address(env
, r2
);
902 uint8_t pad
= env
->regs
[r2
+ 1] >> 24;
905 cc
= do_clcl(env
, &src1
, &src1len
, &src3
, &src3len
, pad
, -1, 1, ra
);
907 env
->regs
[r1
+ 1] = deposit64(env
->regs
[r1
+ 1], 0, 24, src1len
);
908 env
->regs
[r2
+ 1] = deposit64(env
->regs
[r2
+ 1], 0, 24, src3len
);
909 set_address(env
, r1
, src1
);
910 set_address(env
, r2
, src3
);
915 /* compare logical long extended memcompare insn with padding */
916 uint32_t HELPER(clcle
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
919 uintptr_t ra
= GETPC();
920 uint64_t src1len
= get_length(env
, r1
+ 1);
921 uint64_t src1
= get_address(env
, r1
);
922 uint64_t src3len
= get_length(env
, r3
+ 1);
923 uint64_t src3
= get_address(env
, r3
);
927 cc
= do_clcl(env
, &src1
, &src1len
, &src3
, &src3len
, pad
, 0x2000, 1, ra
);
929 set_length(env
, r1
+ 1, src1len
);
930 set_length(env
, r3
+ 1, src3len
);
931 set_address(env
, r1
, src1
);
932 set_address(env
, r3
, src3
);
937 /* compare logical long unicode memcompare insn with padding */
938 uint32_t HELPER(clclu
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
941 uintptr_t ra
= GETPC();
942 uint64_t src1len
= get_length(env
, r1
+ 1);
943 uint64_t src1
= get_address(env
, r1
);
944 uint64_t src3len
= get_length(env
, r3
+ 1);
945 uint64_t src3
= get_address(env
, r3
);
949 cc
= do_clcl(env
, &src1
, &src1len
, &src3
, &src3len
, pad
, 0x1000, 2, ra
);
951 set_length(env
, r1
+ 1, src1len
);
952 set_length(env
, r3
+ 1, src3len
);
953 set_address(env
, r1
, src1
);
954 set_address(env
, r3
, src3
);
960 uint64_t HELPER(cksm
)(CPUS390XState
*env
, uint64_t r1
,
961 uint64_t src
, uint64_t src_len
)
963 uintptr_t ra
= GETPC();
964 uint64_t max_len
, len
;
965 uint64_t cksm
= (uint32_t)r1
;
967 /* Lest we fail to service interrupts in a timely manner, limit the
968 amount of work we're willing to do. For now, let's cap at 8k. */
969 max_len
= (src_len
> 0x2000 ? 0x2000 : src_len
);
971 /* Process full words as available. */
972 for (len
= 0; len
+ 4 <= max_len
; len
+= 4, src
+= 4) {
973 cksm
+= (uint32_t)cpu_ldl_data_ra(env
, src
, ra
);
976 switch (max_len
- len
) {
978 cksm
+= cpu_ldub_data_ra(env
, src
, ra
) << 24;
982 cksm
+= cpu_lduw_data_ra(env
, src
, ra
) << 16;
986 cksm
+= cpu_lduw_data_ra(env
, src
, ra
) << 16;
987 cksm
+= cpu_ldub_data_ra(env
, src
+ 2, ra
) << 8;
992 /* Fold the carry from the checksum. Note that we can see carry-out
993 during folding more than once (but probably not more than twice). */
994 while (cksm
> 0xffffffffull
) {
995 cksm
= (uint32_t)cksm
+ (cksm
>> 32);
998 /* Indicate whether or not we've processed everything. */
999 env
->cc_op
= (len
== src_len
? 0 : 3);
1001 /* Return both cksm and processed length. */
1006 void HELPER(pack
)(CPUS390XState
*env
, uint32_t len
, uint64_t dest
, uint64_t src
)
1008 uintptr_t ra
= GETPC();
1009 int len_dest
= len
>> 4;
1010 int len_src
= len
& 0xf;
1016 /* last byte is special, it only flips the nibbles */
1017 b
= cpu_ldub_data_ra(env
, src
, ra
);
1018 cpu_stb_data_ra(env
, dest
, (b
<< 4) | (b
>> 4), ra
);
1022 /* now pack every value */
1023 while (len_dest
>= 0) {
1027 b
= cpu_ldub_data_ra(env
, src
, ra
) & 0x0f;
1032 b
|= cpu_ldub_data_ra(env
, src
, ra
) << 4;
1039 cpu_stb_data_ra(env
, dest
, b
, ra
);
1043 static inline void do_pkau(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
1044 uint32_t srclen
, int ssize
, uintptr_t ra
)
1047 /* The destination operand is always 16 bytes long. */
1048 const int destlen
= 16;
1050 /* The operands are processed from right to left. */
1052 dest
+= destlen
- 1;
1054 for (i
= 0; i
< destlen
; i
++) {
1057 /* Start with a positive sign */
1060 } else if (srclen
> ssize
) {
1061 b
= cpu_ldub_data_ra(env
, src
, ra
) & 0x0f;
1066 if (srclen
> ssize
) {
1067 b
|= cpu_ldub_data_ra(env
, src
, ra
) << 4;
1072 cpu_stb_data_ra(env
, dest
, b
, ra
);
1078 void HELPER(pka
)(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
1081 do_pkau(env
, dest
, src
, srclen
, 1, GETPC());
1084 void HELPER(pku
)(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
1087 do_pkau(env
, dest
, src
, srclen
, 2, GETPC());
1090 void HELPER(unpk
)(CPUS390XState
*env
, uint32_t len
, uint64_t dest
,
1093 uintptr_t ra
= GETPC();
1094 int len_dest
= len
>> 4;
1095 int len_src
= len
& 0xf;
1097 int second_nibble
= 0;
1102 /* last byte is special, it only flips the nibbles */
1103 b
= cpu_ldub_data_ra(env
, src
, ra
);
1104 cpu_stb_data_ra(env
, dest
, (b
<< 4) | (b
>> 4), ra
);
1108 /* now pad every nibble with 0xf0 */
1110 while (len_dest
> 0) {
1111 uint8_t cur_byte
= 0;
1114 cur_byte
= cpu_ldub_data_ra(env
, src
, ra
);
1120 /* only advance one nibble at a time */
1121 if (second_nibble
) {
1126 second_nibble
= !second_nibble
;
1129 cur_byte
= (cur_byte
& 0xf);
1133 cpu_stb_data_ra(env
, dest
, cur_byte
, ra
);
1137 static inline uint32_t do_unpkau(CPUS390XState
*env
, uint64_t dest
,
1138 uint32_t destlen
, int dsize
, uint64_t src
,
1144 /* The source operand is always 16 bytes long. */
1145 const int srclen
= 16;
1147 /* The operands are processed from right to left. */
1149 dest
+= destlen
- dsize
;
1151 /* Check for the sign. */
1152 b
= cpu_ldub_data_ra(env
, src
, ra
);
1166 cc
= 3; /* invalid */
1170 /* Now pad every nibble with 0x30, advancing one nibble at a time. */
1171 for (i
= 0; i
< destlen
; i
+= dsize
) {
1172 if (i
== (31 * dsize
)) {
1173 /* If length is 32/64 bytes, the leftmost byte is 0. */
1175 } else if (i
% (2 * dsize
)) {
1176 b
= cpu_ldub_data_ra(env
, src
, ra
);
1181 cpu_stsize_data_ra(env
, dest
, 0x30 + (b
& 0xf), dsize
, ra
);
1188 uint32_t HELPER(unpka
)(CPUS390XState
*env
, uint64_t dest
, uint32_t destlen
,
1191 return do_unpkau(env
, dest
, destlen
, 1, src
, GETPC());
1194 uint32_t HELPER(unpku
)(CPUS390XState
*env
, uint64_t dest
, uint32_t destlen
,
1197 return do_unpkau(env
, dest
, destlen
, 2, src
, GETPC());
1200 uint32_t HELPER(tp
)(CPUS390XState
*env
, uint64_t dest
, uint32_t destlen
)
1202 uintptr_t ra
= GETPC();
1206 for (i
= 0; i
< destlen
; i
++) {
1207 uint8_t b
= cpu_ldub_data_ra(env
, dest
+ i
, ra
);
1209 cc
|= (b
& 0xf0) > 0x90 ? 2 : 0;
1211 if (i
== (destlen
- 1)) {
1213 cc
|= (b
& 0xf) < 0xa ? 1 : 0;
1216 cc
|= (b
& 0xf) > 0x9 ? 2 : 0;
1223 static uint32_t do_helper_tr(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
1224 uint64_t trans
, uintptr_t ra
)
1228 for (i
= 0; i
<= len
; i
++) {
1229 uint8_t byte
= cpu_ldub_data_ra(env
, array
+ i
, ra
);
1230 uint8_t new_byte
= cpu_ldub_data_ra(env
, trans
+ byte
, ra
);
1231 cpu_stb_data_ra(env
, array
+ i
, new_byte
, ra
);
1237 void HELPER(tr
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
1240 do_helper_tr(env
, len
, array
, trans
, GETPC());
1243 uint64_t HELPER(tre
)(CPUS390XState
*env
, uint64_t array
,
1244 uint64_t len
, uint64_t trans
)
1246 uintptr_t ra
= GETPC();
1247 uint8_t end
= env
->regs
[0] & 0xff;
1252 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
1253 array
&= 0x7fffffff;
1257 /* Lest we fail to service interrupts in a timely manner, limit the
1258 amount of work we're willing to do. For now, let's cap at 8k. */
1264 for (i
= 0; i
< l
; i
++) {
1265 uint8_t byte
, new_byte
;
1267 byte
= cpu_ldub_data_ra(env
, array
+ i
, ra
);
1274 new_byte
= cpu_ldub_data_ra(env
, trans
+ byte
, ra
);
1275 cpu_stb_data_ra(env
, array
+ i
, new_byte
, ra
);
1279 env
->retxl
= len
- i
;
1283 static inline uint32_t do_helper_trt(CPUS390XState
*env
, int len
,
1284 uint64_t array
, uint64_t trans
,
1285 int inc
, uintptr_t ra
)
1289 for (i
= 0; i
<= len
; i
++) {
1290 uint8_t byte
= cpu_ldub_data_ra(env
, array
+ i
* inc
, ra
);
1291 uint8_t sbyte
= cpu_ldub_data_ra(env
, trans
+ byte
, ra
);
1294 set_address(env
, 1, array
+ i
* inc
);
1295 env
->regs
[2] = deposit64(env
->regs
[2], 0, 8, sbyte
);
1296 return (i
== len
) ? 2 : 1;
1303 uint32_t HELPER(trt
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
1306 return do_helper_trt(env
, len
, array
, trans
, 1, GETPC());
1309 uint32_t HELPER(trtr
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
1312 return do_helper_trt(env
, len
, array
, trans
, -1, GETPC());
1315 /* Translate one/two to one/two */
1316 uint32_t HELPER(trXX
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
,
1317 uint32_t tst
, uint32_t sizes
)
1319 uintptr_t ra
= GETPC();
1320 int dsize
= (sizes
& 1) ? 1 : 2;
1321 int ssize
= (sizes
& 2) ? 1 : 2;
1322 uint64_t tbl
= get_address(env
, 1);
1323 uint64_t dst
= get_address(env
, r1
);
1324 uint64_t len
= get_length(env
, r1
+ 1);
1325 uint64_t src
= get_address(env
, r2
);
1329 /* The lower address bits of TBL are ignored. For TROO, TROT, it's
1330 the low 3 bits (double-word aligned). For TRTO, TRTT, it's either
1331 the low 12 bits (4K, without ETF2-ENH) or 3 bits (with ETF2-ENH). */
1332 if (ssize
== 2 && !s390_has_feat(S390_FEAT_ETF2_ENH
)) {
1338 check_alignment(env
, len
, ssize
, ra
);
1340 /* Lest we fail to service interrupts in a timely manner, */
1341 /* limit the amount of work we're willing to do. */
1342 for (i
= 0; i
< 0x2000; i
++) {
1343 uint16_t sval
= cpu_ldusize_data_ra(env
, src
, ssize
, ra
);
1344 uint64_t tble
= tbl
+ (sval
* dsize
);
1345 uint16_t dval
= cpu_ldusize_data_ra(env
, tble
, dsize
, ra
);
1350 cpu_stsize_data_ra(env
, dst
, dval
, dsize
, ra
);
1362 set_address(env
, r1
, dst
);
1363 set_length(env
, r1
+ 1, len
);
1364 set_address(env
, r2
, src
);
1369 static void do_cdsg(CPUS390XState
*env
, uint64_t addr
,
1370 uint32_t r1
, uint32_t r3
, bool parallel
)
1372 uintptr_t ra
= GETPC();
1373 Int128 cmpv
= int128_make128(env
->regs
[r1
+ 1], env
->regs
[r1
]);
1374 Int128 newv
= int128_make128(env
->regs
[r3
+ 1], env
->regs
[r3
]);
1379 #ifndef CONFIG_ATOMIC128
1380 cpu_loop_exit_atomic(ENV_GET_CPU(env
), ra
);
1382 int mem_idx
= cpu_mmu_index(env
, false);
1383 TCGMemOpIdx oi
= make_memop_idx(MO_TEQ
| MO_ALIGN_16
, mem_idx
);
1384 oldv
= helper_atomic_cmpxchgo_be_mmu(env
, addr
, cmpv
, newv
, oi
, ra
);
1385 fail
= !int128_eq(oldv
, cmpv
);
1388 uint64_t oldh
, oldl
;
1390 check_alignment(env
, addr
, 16, ra
);
1392 oldh
= cpu_ldq_data_ra(env
, addr
+ 0, ra
);
1393 oldl
= cpu_ldq_data_ra(env
, addr
+ 8, ra
);
1395 oldv
= int128_make128(oldl
, oldh
);
1396 fail
= !int128_eq(oldv
, cmpv
);
1401 cpu_stq_data_ra(env
, addr
+ 0, int128_gethi(newv
), ra
);
1402 cpu_stq_data_ra(env
, addr
+ 8, int128_getlo(newv
), ra
);
1406 env
->regs
[r1
] = int128_gethi(oldv
);
1407 env
->regs
[r1
+ 1] = int128_getlo(oldv
);
1410 void HELPER(cdsg
)(CPUS390XState
*env
, uint64_t addr
,
1411 uint32_t r1
, uint32_t r3
)
1413 do_cdsg(env
, addr
, r1
, r3
, false);
1416 void HELPER(cdsg_parallel
)(CPUS390XState
*env
, uint64_t addr
,
1417 uint32_t r1
, uint32_t r3
)
1419 do_cdsg(env
, addr
, r1
, r3
, true);
1422 static uint32_t do_csst(CPUS390XState
*env
, uint32_t r3
, uint64_t a1
,
1423 uint64_t a2
, bool parallel
)
1425 #if !defined(CONFIG_USER_ONLY) || defined(CONFIG_ATOMIC128)
1426 uint32_t mem_idx
= cpu_mmu_index(env
, false);
1428 uintptr_t ra
= GETPC();
1429 uint32_t fc
= extract32(env
->regs
[0], 0, 8);
1430 uint32_t sc
= extract32(env
->regs
[0], 8, 8);
1431 uint64_t pl
= get_address(env
, 1) & -16;
1435 /* Sanity check the function code and storage characteristic. */
1436 if (fc
> 1 || sc
> 3) {
1437 if (!s390_has_feat(S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2
)) {
1438 goto spec_exception
;
1440 if (fc
> 2 || sc
> 4 || (fc
== 2 && (r3
& 1))) {
1441 goto spec_exception
;
1445 /* Sanity check the alignments. */
1446 if (extract32(a1
, 0, 4 << fc
) || extract32(a2
, 0, 1 << sc
)) {
1447 goto spec_exception
;
1450 /* Sanity check writability of the store address. */
1451 #ifndef CONFIG_USER_ONLY
1452 probe_write(env
, a2
, 0, mem_idx
, ra
);
1455 /* Note that the compare-and-swap is atomic, and the store is atomic, but
1456 the complete operation is not. Therefore we do not need to assert serial
1457 context in order to implement this. That said, restart early if we can't
1458 support either operation that is supposed to be atomic. */
1461 #if !defined(CONFIG_ATOMIC64)
1463 #elif !defined(CONFIG_ATOMIC128)
1466 if (((4 << fc
) | (1 << sc
)) & mask
) {
1467 cpu_loop_exit_atomic(ENV_GET_CPU(env
), ra
);
1471 /* All loads happen before all stores. For simplicity, load the entire
1472 store value area from the parameter list. */
1473 svh
= cpu_ldq_data_ra(env
, pl
+ 16, ra
);
1474 svl
= cpu_ldq_data_ra(env
, pl
+ 24, ra
);
1479 uint32_t nv
= cpu_ldl_data_ra(env
, pl
, ra
);
1480 uint32_t cv
= env
->regs
[r3
];
1484 #ifdef CONFIG_USER_ONLY
1485 uint32_t *haddr
= g2h(a1
);
1486 ov
= atomic_cmpxchg__nocheck(haddr
, cv
, nv
);
1488 TCGMemOpIdx oi
= make_memop_idx(MO_TEUL
| MO_ALIGN
, mem_idx
);
1489 ov
= helper_atomic_cmpxchgl_be_mmu(env
, a1
, cv
, nv
, oi
, ra
);
1492 ov
= cpu_ldl_data_ra(env
, a1
, ra
);
1493 cpu_stl_data_ra(env
, a1
, (ov
== cv
? nv
: ov
), ra
);
1496 env
->regs
[r3
] = deposit64(env
->regs
[r3
], 32, 32, ov
);
1502 uint64_t nv
= cpu_ldq_data_ra(env
, pl
, ra
);
1503 uint64_t cv
= env
->regs
[r3
];
1507 #ifdef CONFIG_ATOMIC64
1508 # ifdef CONFIG_USER_ONLY
1509 uint64_t *haddr
= g2h(a1
);
1510 ov
= atomic_cmpxchg__nocheck(haddr
, cv
, nv
);
1512 TCGMemOpIdx oi
= make_memop_idx(MO_TEQ
| MO_ALIGN
, mem_idx
);
1513 ov
= helper_atomic_cmpxchgq_be_mmu(env
, a1
, cv
, nv
, oi
, ra
);
1516 /* Note that we asserted !parallel above. */
1517 g_assert_not_reached();
1520 ov
= cpu_ldq_data_ra(env
, a1
, ra
);
1521 cpu_stq_data_ra(env
, a1
, (ov
== cv
? nv
: ov
), ra
);
1530 uint64_t nvh
= cpu_ldq_data_ra(env
, pl
, ra
);
1531 uint64_t nvl
= cpu_ldq_data_ra(env
, pl
+ 8, ra
);
1532 Int128 nv
= int128_make128(nvl
, nvh
);
1533 Int128 cv
= int128_make128(env
->regs
[r3
+ 1], env
->regs
[r3
]);
1537 #ifdef CONFIG_ATOMIC128
1538 TCGMemOpIdx oi
= make_memop_idx(MO_TEQ
| MO_ALIGN_16
, mem_idx
);
1539 ov
= helper_atomic_cmpxchgo_be_mmu(env
, a1
, cv
, nv
, oi
, ra
);
1540 cc
= !int128_eq(ov
, cv
);
1542 /* Note that we asserted !parallel above. */
1543 g_assert_not_reached();
1546 uint64_t oh
= cpu_ldq_data_ra(env
, a1
+ 0, ra
);
1547 uint64_t ol
= cpu_ldq_data_ra(env
, a1
+ 8, ra
);
1549 ov
= int128_make128(ol
, oh
);
1550 cc
= !int128_eq(ov
, cv
);
1555 cpu_stq_data_ra(env
, a1
+ 0, int128_gethi(nv
), ra
);
1556 cpu_stq_data_ra(env
, a1
+ 8, int128_getlo(nv
), ra
);
1559 env
->regs
[r3
+ 0] = int128_gethi(ov
);
1560 env
->regs
[r3
+ 1] = int128_getlo(ov
);
1565 g_assert_not_reached();
1568 /* Store only if the comparison succeeded. Note that above we use a pair
1569 of 64-bit big-endian loads, so for sc < 3 we must extract the value
1570 from the most-significant bits of svh. */
1574 cpu_stb_data_ra(env
, a2
, svh
>> 56, ra
);
1577 cpu_stw_data_ra(env
, a2
, svh
>> 48, ra
);
1580 cpu_stl_data_ra(env
, a2
, svh
>> 32, ra
);
1583 cpu_stq_data_ra(env
, a2
, svh
, ra
);
1587 #ifdef CONFIG_ATOMIC128
1588 TCGMemOpIdx oi
= make_memop_idx(MO_TEQ
| MO_ALIGN_16
, mem_idx
);
1589 Int128 sv
= int128_make128(svl
, svh
);
1590 helper_atomic_sto_be_mmu(env
, a2
, sv
, oi
, ra
);
1592 /* Note that we asserted !parallel above. */
1593 g_assert_not_reached();
1596 cpu_stq_data_ra(env
, a2
+ 0, svh
, ra
);
1597 cpu_stq_data_ra(env
, a2
+ 8, svl
, ra
);
1601 g_assert_not_reached();
1608 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
1609 g_assert_not_reached();
1612 uint32_t HELPER(csst
)(CPUS390XState
*env
, uint32_t r3
, uint64_t a1
, uint64_t a2
)
1614 return do_csst(env
, r3
, a1
, a2
, false);
1617 uint32_t HELPER(csst_parallel
)(CPUS390XState
*env
, uint32_t r3
, uint64_t a1
,
1620 return do_csst(env
, r3
, a1
, a2
, true);
1623 #if !defined(CONFIG_USER_ONLY)
1624 void HELPER(lctlg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
1626 uintptr_t ra
= GETPC();
1627 S390CPU
*cpu
= s390_env_get_cpu(env
);
1628 bool PERchanged
= false;
1633 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
1636 for (i
= r1
;; i
= (i
+ 1) % 16) {
1637 uint64_t val
= cpu_ldq_data_ra(env
, src
, ra
);
1638 if (env
->cregs
[i
] != val
&& i
>= 9 && i
<= 11) {
1641 env
->cregs
[i
] = val
;
1642 HELPER_LOG("load ctl %d from 0x%" PRIx64
" == 0x%" PRIx64
"\n",
1644 src
+= sizeof(uint64_t);
1651 if (PERchanged
&& env
->psw
.mask
& PSW_MASK_PER
) {
1652 s390_cpu_recompute_watchpoints(CPU(cpu
));
1655 tlb_flush(CPU(cpu
));
1658 void HELPER(lctl
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
1660 uintptr_t ra
= GETPC();
1661 S390CPU
*cpu
= s390_env_get_cpu(env
);
1662 bool PERchanged
= false;
1667 s390_program_interrupt(env
, PGM_SPECIFICATION
, 4, ra
);
1670 for (i
= r1
;; i
= (i
+ 1) % 16) {
1671 uint32_t val
= cpu_ldl_data_ra(env
, src
, ra
);
1672 if ((uint32_t)env
->cregs
[i
] != val
&& i
>= 9 && i
<= 11) {
1675 env
->cregs
[i
] = deposit64(env
->cregs
[i
], 0, 32, val
);
1676 HELPER_LOG("load ctl %d from 0x%" PRIx64
" == 0x%x\n", i
, src
, val
);
1677 src
+= sizeof(uint32_t);
1684 if (PERchanged
&& env
->psw
.mask
& PSW_MASK_PER
) {
1685 s390_cpu_recompute_watchpoints(CPU(cpu
));
1688 tlb_flush(CPU(cpu
));
1691 void HELPER(stctg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
1693 uintptr_t ra
= GETPC();
1698 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
1701 for (i
= r1
;; i
= (i
+ 1) % 16) {
1702 cpu_stq_data_ra(env
, dest
, env
->cregs
[i
], ra
);
1703 dest
+= sizeof(uint64_t);
1711 void HELPER(stctl
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
1713 uintptr_t ra
= GETPC();
1718 s390_program_interrupt(env
, PGM_SPECIFICATION
, 4, ra
);
1721 for (i
= r1
;; i
= (i
+ 1) % 16) {
1722 cpu_stl_data_ra(env
, dest
, env
->cregs
[i
], ra
);
1723 dest
+= sizeof(uint32_t);
1731 uint32_t HELPER(testblock
)(CPUS390XState
*env
, uint64_t real_addr
)
1733 uintptr_t ra
= GETPC();
1736 real_addr
= wrap_address(env
, real_addr
) & TARGET_PAGE_MASK
;
1738 for (i
= 0; i
< TARGET_PAGE_SIZE
; i
+= 8) {
1739 cpu_stq_real_ra(env
, real_addr
+ i
, 0, ra
);
1745 uint32_t HELPER(tprot
)(CPUS390XState
*env
, uint64_t a1
, uint64_t a2
)
1747 S390CPU
*cpu
= s390_env_get_cpu(env
);
1748 CPUState
*cs
= CPU(cpu
);
1751 * TODO: we currently don't handle all access protection types
1752 * (including access-list and key-controlled) as well as AR mode.
1754 if (!s390_cpu_virt_mem_check_write(cpu
, a1
, 0, 1)) {
1755 /* Fetching permitted; storing permitted */
1759 if (env
->int_pgm_code
== PGM_PROTECTION
) {
1760 /* retry if reading is possible */
1761 cs
->exception_index
= 0;
1762 if (!s390_cpu_virt_mem_check_read(cpu
, a1
, 0, 1)) {
1763 /* Fetching permitted; storing not permitted */
1768 switch (env
->int_pgm_code
) {
1769 case PGM_PROTECTION
:
1770 /* Fetching not permitted; storing not permitted */
1771 cs
->exception_index
= 0;
1773 case PGM_ADDRESSING
:
1774 case PGM_TRANS_SPEC
:
1775 /* exceptions forwarded to the guest */
1776 s390_cpu_virt_mem_handle_exc(cpu
, GETPC());
1780 /* Translation not available */
1781 cs
->exception_index
= 0;
1785 /* insert storage key extended */
1786 uint64_t HELPER(iske
)(CPUS390XState
*env
, uint64_t r2
)
1788 static S390SKeysState
*ss
;
1789 static S390SKeysClass
*skeyclass
;
1790 uint64_t addr
= wrap_address(env
, r2
);
1793 if (addr
> ram_size
) {
1797 if (unlikely(!ss
)) {
1798 ss
= s390_get_skeys_device();
1799 skeyclass
= S390_SKEYS_GET_CLASS(ss
);
1802 if (skeyclass
->get_skeys(ss
, addr
/ TARGET_PAGE_SIZE
, 1, &key
)) {
1808 /* set storage key extended */
1809 void HELPER(sske
)(CPUS390XState
*env
, uint64_t r1
, uint64_t r2
)
1811 static S390SKeysState
*ss
;
1812 static S390SKeysClass
*skeyclass
;
1813 uint64_t addr
= wrap_address(env
, r2
);
1816 if (addr
> ram_size
) {
1820 if (unlikely(!ss
)) {
1821 ss
= s390_get_skeys_device();
1822 skeyclass
= S390_SKEYS_GET_CLASS(ss
);
1826 skeyclass
->set_skeys(ss
, addr
/ TARGET_PAGE_SIZE
, 1, &key
);
1829 /* reset reference bit extended */
1830 uint32_t HELPER(rrbe
)(CPUS390XState
*env
, uint64_t r2
)
1832 static S390SKeysState
*ss
;
1833 static S390SKeysClass
*skeyclass
;
1836 if (r2
> ram_size
) {
1840 if (unlikely(!ss
)) {
1841 ss
= s390_get_skeys_device();
1842 skeyclass
= S390_SKEYS_GET_CLASS(ss
);
1845 if (skeyclass
->get_skeys(ss
, r2
/ TARGET_PAGE_SIZE
, 1, &key
)) {
1849 re
= key
& (SK_R
| SK_C
);
1852 if (skeyclass
->set_skeys(ss
, r2
/ TARGET_PAGE_SIZE
, 1, &key
)) {
1859 * 0 Reference bit zero; change bit zero
1860 * 1 Reference bit zero; change bit one
1861 * 2 Reference bit one; change bit zero
1862 * 3 Reference bit one; change bit one
1868 uint32_t HELPER(mvcs
)(CPUS390XState
*env
, uint64_t l
, uint64_t a1
, uint64_t a2
)
1870 uintptr_t ra
= GETPC();
1873 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
1874 __func__
, l
, a1
, a2
);
1882 /* XXX replace w/ memcpy */
1883 for (i
= 0; i
< l
; i
++) {
1884 uint8_t x
= cpu_ldub_primary_ra(env
, a2
+ i
, ra
);
1885 cpu_stb_secondary_ra(env
, a1
+ i
, x
, ra
);
1891 uint32_t HELPER(mvcp
)(CPUS390XState
*env
, uint64_t l
, uint64_t a1
, uint64_t a2
)
1893 uintptr_t ra
= GETPC();
1896 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
1897 __func__
, l
, a1
, a2
);
1905 /* XXX replace w/ memcpy */
1906 for (i
= 0; i
< l
; i
++) {
1907 uint8_t x
= cpu_ldub_secondary_ra(env
, a2
+ i
, ra
);
1908 cpu_stb_primary_ra(env
, a1
+ i
, x
, ra
);
1914 void HELPER(idte
)(CPUS390XState
*env
, uint64_t r1
, uint64_t r2
, uint32_t m4
)
1916 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1917 const uintptr_t ra
= GETPC();
1918 uint64_t table
, entry
, raddr
;
1919 uint16_t entries
, i
, index
= 0;
1922 s390_program_interrupt(env
, PGM_SPECIFICATION
, 4, ra
);
1925 if (!(r2
& 0x800)) {
1926 /* invalidation-and-clearing operation */
1927 table
= r1
& ASCE_ORIGIN
;
1928 entries
= (r2
& 0x7ff) + 1;
1930 switch (r1
& ASCE_TYPE_MASK
) {
1931 case ASCE_TYPE_REGION1
:
1932 index
= (r2
>> 53) & 0x7ff;
1934 case ASCE_TYPE_REGION2
:
1935 index
= (r2
>> 42) & 0x7ff;
1937 case ASCE_TYPE_REGION3
:
1938 index
= (r2
>> 31) & 0x7ff;
1940 case ASCE_TYPE_SEGMENT
:
1941 index
= (r2
>> 20) & 0x7ff;
1944 for (i
= 0; i
< entries
; i
++) {
1945 /* addresses are not wrapped in 24/31bit mode but table index is */
1946 raddr
= table
+ ((index
+ i
) & 0x7ff) * sizeof(entry
);
1947 entry
= cpu_ldq_real_ra(env
, raddr
, ra
);
1948 if (!(entry
& REGION_ENTRY_INV
)) {
1949 /* we are allowed to not store if already invalid */
1950 entry
|= REGION_ENTRY_INV
;
1951 cpu_stq_real_ra(env
, raddr
, entry
, ra
);
1956 /* We simply flush the complete tlb, therefore we can ignore r3. */
1960 tlb_flush_all_cpus_synced(cs
);
1964 /* invalidate pte */
1965 void HELPER(ipte
)(CPUS390XState
*env
, uint64_t pto
, uint64_t vaddr
,
1968 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1969 const uintptr_t ra
= GETPC();
1970 uint64_t page
= vaddr
& TARGET_PAGE_MASK
;
1971 uint64_t pte_addr
, pte
;
1973 /* Compute the page table entry address */
1974 pte_addr
= (pto
& SEGMENT_ENTRY_ORIGIN
);
1975 pte_addr
+= (vaddr
& VADDR_PX
) >> 9;
1977 /* Mark the page table entry as invalid */
1978 pte
= cpu_ldq_real_ra(env
, pte_addr
, ra
);
1979 pte
|= PAGE_INVALID
;
1980 cpu_stq_real_ra(env
, pte_addr
, pte
, ra
);
1982 /* XXX we exploit the fact that Linux passes the exact virtual
1983 address here - it's not obliged to! */
1985 if (vaddr
& ~VADDR_PX
) {
1986 tlb_flush_page(cs
, page
);
1987 /* XXX 31-bit hack */
1988 tlb_flush_page(cs
, page
^ 0x80000000);
1990 /* looks like we don't have a valid virtual address */
1994 if (vaddr
& ~VADDR_PX
) {
1995 tlb_flush_page_all_cpus_synced(cs
, page
);
1996 /* XXX 31-bit hack */
1997 tlb_flush_page_all_cpus_synced(cs
, page
^ 0x80000000);
1999 /* looks like we don't have a valid virtual address */
2000 tlb_flush_all_cpus_synced(cs
);
2005 /* flush local tlb */
2006 void HELPER(ptlb
)(CPUS390XState
*env
)
2008 S390CPU
*cpu
= s390_env_get_cpu(env
);
2010 tlb_flush(CPU(cpu
));
2013 /* flush global tlb */
2014 void HELPER(purge
)(CPUS390XState
*env
)
2016 S390CPU
*cpu
= s390_env_get_cpu(env
);
2018 tlb_flush_all_cpus_synced(CPU(cpu
));
2021 /* load using real address */
2022 uint64_t HELPER(lura
)(CPUS390XState
*env
, uint64_t addr
)
2024 return cpu_ldl_real_ra(env
, wrap_address(env
, addr
), GETPC());
2027 uint64_t HELPER(lurag
)(CPUS390XState
*env
, uint64_t addr
)
2029 return cpu_ldq_real_ra(env
, wrap_address(env
, addr
), GETPC());
2032 /* store using real address */
2033 void HELPER(stura
)(CPUS390XState
*env
, uint64_t addr
, uint64_t v1
)
2035 cpu_stl_real_ra(env
, wrap_address(env
, addr
), (uint32_t)v1
, GETPC());
2037 if ((env
->psw
.mask
& PSW_MASK_PER
) &&
2038 (env
->cregs
[9] & PER_CR9_EVENT_STORE
) &&
2039 (env
->cregs
[9] & PER_CR9_EVENT_STORE_REAL
)) {
2040 /* PSW is saved just before calling the helper. */
2041 env
->per_address
= env
->psw
.addr
;
2042 env
->per_perc_atmid
= PER_CODE_EVENT_STORE_REAL
| get_per_atmid(env
);
2046 void HELPER(sturg
)(CPUS390XState
*env
, uint64_t addr
, uint64_t v1
)
2048 cpu_stq_real_ra(env
, wrap_address(env
, addr
), v1
, GETPC());
2050 if ((env
->psw
.mask
& PSW_MASK_PER
) &&
2051 (env
->cregs
[9] & PER_CR9_EVENT_STORE
) &&
2052 (env
->cregs
[9] & PER_CR9_EVENT_STORE_REAL
)) {
2053 /* PSW is saved just before calling the helper. */
2054 env
->per_address
= env
->psw
.addr
;
2055 env
->per_perc_atmid
= PER_CODE_EVENT_STORE_REAL
| get_per_atmid(env
);
2059 /* load real address */
2060 uint64_t HELPER(lra
)(CPUS390XState
*env
, uint64_t addr
)
2062 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
2064 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
2068 /* XXX incomplete - has more corner cases */
2069 if (!(env
->psw
.mask
& PSW_MASK_64
) && (addr
>> 32)) {
2070 s390_program_interrupt(env
, PGM_SPECIAL_OP
, 2, GETPC());
2073 old_exc
= cs
->exception_index
;
2074 if (mmu_translate(env
, addr
, 0, asc
, &ret
, &flags
, true)) {
2077 if (cs
->exception_index
== EXCP_PGM
) {
2078 ret
= env
->int_pgm_code
| 0x80000000;
2080 ret
|= addr
& ~TARGET_PAGE_MASK
;
2082 cs
->exception_index
= old_exc
;
2089 /* load pair from quadword */
2090 static uint64_t do_lpq(CPUS390XState
*env
, uint64_t addr
, bool parallel
)
2092 uintptr_t ra
= GETPC();
2096 #ifndef CONFIG_ATOMIC128
2097 cpu_loop_exit_atomic(ENV_GET_CPU(env
), ra
);
2099 int mem_idx
= cpu_mmu_index(env
, false);
2100 TCGMemOpIdx oi
= make_memop_idx(MO_TEQ
| MO_ALIGN_16
, mem_idx
);
2101 Int128 v
= helper_atomic_ldo_be_mmu(env
, addr
, oi
, ra
);
2102 hi
= int128_gethi(v
);
2103 lo
= int128_getlo(v
);
2106 check_alignment(env
, addr
, 16, ra
);
2108 hi
= cpu_ldq_data_ra(env
, addr
+ 0, ra
);
2109 lo
= cpu_ldq_data_ra(env
, addr
+ 8, ra
);
2116 uint64_t HELPER(lpq
)(CPUS390XState
*env
, uint64_t addr
)
2118 return do_lpq(env
, addr
, false);
2121 uint64_t HELPER(lpq_parallel
)(CPUS390XState
*env
, uint64_t addr
)
2123 return do_lpq(env
, addr
, true);
2126 /* store pair to quadword */
2127 static void do_stpq(CPUS390XState
*env
, uint64_t addr
,
2128 uint64_t low
, uint64_t high
, bool parallel
)
2130 uintptr_t ra
= GETPC();
2133 #ifndef CONFIG_ATOMIC128
2134 cpu_loop_exit_atomic(ENV_GET_CPU(env
), ra
);
2136 int mem_idx
= cpu_mmu_index(env
, false);
2137 TCGMemOpIdx oi
= make_memop_idx(MO_TEQ
| MO_ALIGN_16
, mem_idx
);
2139 Int128 v
= int128_make128(low
, high
);
2140 helper_atomic_sto_be_mmu(env
, addr
, v
, oi
, ra
);
2143 check_alignment(env
, addr
, 16, ra
);
2145 cpu_stq_data_ra(env
, addr
+ 0, high
, ra
);
2146 cpu_stq_data_ra(env
, addr
+ 8, low
, ra
);
2150 void HELPER(stpq
)(CPUS390XState
*env
, uint64_t addr
,
2151 uint64_t low
, uint64_t high
)
2153 do_stpq(env
, addr
, low
, high
, false);
2156 void HELPER(stpq_parallel
)(CPUS390XState
*env
, uint64_t addr
,
2157 uint64_t low
, uint64_t high
)
2159 do_stpq(env
, addr
, low
, high
, true);
2162 /* Execute instruction. This instruction executes an insn modified with
2163 the contents of r1. It does not change the executed instruction in memory;
2164 it does not change the program counter.
2166 Perform this by recording the modified instruction in env->ex_value.
2167 This will be noticed by cpu_get_tb_cpu_state and thus tb translation.
2169 void HELPER(ex
)(CPUS390XState
*env
, uint32_t ilen
, uint64_t r1
, uint64_t addr
)
2171 uint64_t insn
= cpu_lduw_code(env
, addr
);
2172 uint8_t opc
= insn
>> 8;
2174 /* Or in the contents of R1[56:63]. */
2177 /* Load the rest of the instruction. */
2179 switch (get_ilen(opc
)) {
2183 insn
|= (uint64_t)cpu_lduw_code(env
, addr
+ 2) << 32;
2186 insn
|= (uint64_t)(uint32_t)cpu_ldl_code(env
, addr
+ 2) << 16;
2189 g_assert_not_reached();
2192 /* The very most common cases can be sped up by avoiding a new TB. */
2193 if ((opc
& 0xf0) == 0xd0) {
2194 typedef uint32_t (*dx_helper
)(CPUS390XState
*, uint32_t, uint64_t,
2195 uint64_t, uintptr_t);
2196 static const dx_helper dx
[16] = {
2197 [0x2] = do_helper_mvc
,
2198 [0x4] = do_helper_nc
,
2199 [0x5] = do_helper_clc
,
2200 [0x6] = do_helper_oc
,
2201 [0x7] = do_helper_xc
,
2202 [0xc] = do_helper_tr
,
2204 dx_helper helper
= dx
[opc
& 0xf];
2207 uint32_t l
= extract64(insn
, 48, 8);
2208 uint32_t b1
= extract64(insn
, 44, 4);
2209 uint32_t d1
= extract64(insn
, 32, 12);
2210 uint32_t b2
= extract64(insn
, 28, 4);
2211 uint32_t d2
= extract64(insn
, 16, 12);
2212 uint64_t a1
= wrap_address(env
, env
->regs
[b1
] + d1
);
2213 uint64_t a2
= wrap_address(env
, env
->regs
[b2
] + d2
);
2215 env
->cc_op
= helper(env
, l
, a1
, a2
, 0);
2216 env
->psw
.addr
+= ilen
;
2219 } else if (opc
== 0x0a) {
2220 env
->int_svc_code
= extract64(insn
, 48, 8);
2221 env
->int_svc_ilen
= ilen
;
2222 helper_exception(env
, EXCP_SVC
);
2223 g_assert_not_reached();
2226 /* Record the insn we want to execute as well as the ilen to use
2227 during the execution of the target insn. This will also ensure
2228 that ex_value is non-zero, which flags that we are in a state
2229 that requires such execution. */
2230 env
->ex_value
= insn
| ilen
;
2233 uint32_t HELPER(mvcos
)(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
2236 const uint8_t psw_key
= (env
->psw
.mask
& PSW_MASK_KEY
) >> PSW_SHIFT_KEY
;
2237 const uint8_t psw_as
= (env
->psw
.mask
& PSW_MASK_ASC
) >> PSW_SHIFT_ASC
;
2238 const uint64_t r0
= env
->regs
[0];
2239 const uintptr_t ra
= GETPC();
2240 uint8_t dest_key
, dest_as
, dest_k
, dest_a
;
2241 uint8_t src_key
, src_as
, src_k
, src_a
;
2245 HELPER_LOG("%s dest %" PRIx64
", src %" PRIx64
", len %" PRIx64
"\n",
2246 __func__
, dest
, src
, len
);
2248 if (!(env
->psw
.mask
& PSW_MASK_DAT
)) {
2249 s390_program_interrupt(env
, PGM_SPECIAL_OP
, 6, ra
);
2252 /* OAC (operand access control) for the first operand -> dest */
2253 val
= (r0
& 0xffff0000ULL
) >> 16;
2254 dest_key
= (val
>> 12) & 0xf;
2255 dest_as
= (val
>> 6) & 0x3;
2256 dest_k
= (val
>> 1) & 0x1;
2259 /* OAC (operand access control) for the second operand -> src */
2260 val
= (r0
& 0x0000ffffULL
);
2261 src_key
= (val
>> 12) & 0xf;
2262 src_as
= (val
>> 6) & 0x3;
2263 src_k
= (val
>> 1) & 0x1;
2279 if (dest_a
&& dest_as
== AS_HOME
&& (env
->psw
.mask
& PSW_MASK_PSTATE
)) {
2280 s390_program_interrupt(env
, PGM_SPECIAL_OP
, 6, ra
);
2282 if (!(env
->cregs
[0] & CR0_SECONDARY
) &&
2283 (dest_as
== AS_SECONDARY
|| src_as
== AS_SECONDARY
)) {
2284 s390_program_interrupt(env
, PGM_SPECIAL_OP
, 6, ra
);
2286 if (!psw_key_valid(env
, dest_key
) || !psw_key_valid(env
, src_key
)) {
2287 s390_program_interrupt(env
, PGM_PRIVILEGED
, 6, ra
);
2290 len
= wrap_length(env
, len
);
2296 /* FIXME: AR-mode and proper problem state mode (using PSW keys) missing */
2297 if (src_as
== AS_ACCREG
|| dest_as
== AS_ACCREG
||
2298 (env
->psw
.mask
& PSW_MASK_PSTATE
)) {
2299 qemu_log_mask(LOG_UNIMP
, "%s: AR-mode and PSTATE support missing\n",
2301 s390_program_interrupt(env
, PGM_ADDRESSING
, 6, ra
);
2305 * b) Access using correct keys
2308 #ifdef CONFIG_USER_ONLY
2309 /* psw keys are never valid in user mode, we will never reach this */
2310 g_assert_not_reached();
2312 fast_memmove_as(env
, dest
, src
, len
, dest_as
, src_as
, ra
);
2318 /* Decode a Unicode character. A return value < 0 indicates success, storing
2319 the UTF-32 result into OCHAR and the input length into OLEN. A return
2320 value >= 0 indicates failure, and the CC value to be returned. */
2321 typedef int (*decode_unicode_fn
)(CPUS390XState
*env
, uint64_t addr
,
2322 uint64_t ilen
, bool enh_check
, uintptr_t ra
,
2323 uint32_t *ochar
, uint32_t *olen
);
2325 /* Encode a Unicode character. A return value < 0 indicates success, storing
2326 the bytes into ADDR and the output length into OLEN. A return value >= 0
2327 indicates failure, and the CC value to be returned. */
2328 typedef int (*encode_unicode_fn
)(CPUS390XState
*env
, uint64_t addr
,
2329 uint64_t ilen
, uintptr_t ra
, uint32_t c
,
2332 static int decode_utf8(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2333 bool enh_check
, uintptr_t ra
,
2334 uint32_t *ochar
, uint32_t *olen
)
2336 uint8_t s0
, s1
, s2
, s3
;
2342 s0
= cpu_ldub_data_ra(env
, addr
, ra
);
2344 /* one byte character */
2347 } else if (s0
<= (enh_check
? 0xc1 : 0xbf)) {
2348 /* invalid character */
2350 } else if (s0
<= 0xdf) {
2351 /* two byte character */
2356 s1
= cpu_ldub_data_ra(env
, addr
+ 1, ra
);
2358 c
= (c
<< 6) | (s1
& 0x3f);
2359 if (enh_check
&& (s1
& 0xc0) != 0x80) {
2362 } else if (s0
<= 0xef) {
2363 /* three byte character */
2368 s1
= cpu_ldub_data_ra(env
, addr
+ 1, ra
);
2369 s2
= cpu_ldub_data_ra(env
, addr
+ 2, ra
);
2371 c
= (c
<< 6) | (s1
& 0x3f);
2372 c
= (c
<< 6) | (s2
& 0x3f);
2373 /* Fold the byte-by-byte range descriptions in the PoO into
2374 tests against the complete value. It disallows encodings
2375 that could be smaller, and the UTF-16 surrogates. */
2377 && ((s1
& 0xc0) != 0x80
2378 || (s2
& 0xc0) != 0x80
2380 || (c
>= 0xd800 && c
<= 0xdfff))) {
2383 } else if (s0
<= (enh_check
? 0xf4 : 0xf7)) {
2384 /* four byte character */
2389 s1
= cpu_ldub_data_ra(env
, addr
+ 1, ra
);
2390 s2
= cpu_ldub_data_ra(env
, addr
+ 2, ra
);
2391 s3
= cpu_ldub_data_ra(env
, addr
+ 3, ra
);
2393 c
= (c
<< 6) | (s1
& 0x3f);
2394 c
= (c
<< 6) | (s2
& 0x3f);
2395 c
= (c
<< 6) | (s3
& 0x3f);
2398 && ((s1
& 0xc0) != 0x80
2399 || (s2
& 0xc0) != 0x80
2400 || (s3
& 0xc0) != 0x80
2406 /* invalid character */
2415 static int decode_utf16(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2416 bool enh_check
, uintptr_t ra
,
2417 uint32_t *ochar
, uint32_t *olen
)
2425 s0
= cpu_lduw_data_ra(env
, addr
, ra
);
2426 if ((s0
& 0xfc00) != 0xd800) {
2427 /* one word character */
2431 /* two word character */
2436 s1
= cpu_lduw_data_ra(env
, addr
+ 2, ra
);
2437 c
= extract32(s0
, 6, 4) + 1;
2438 c
= (c
<< 6) | (s0
& 0x3f);
2439 c
= (c
<< 10) | (s1
& 0x3ff);
2440 if (enh_check
&& (s1
& 0xfc00) != 0xdc00) {
2441 /* invalid surrogate character */
2451 static int decode_utf32(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2452 bool enh_check
, uintptr_t ra
,
2453 uint32_t *ochar
, uint32_t *olen
)
2460 c
= cpu_ldl_data_ra(env
, addr
, ra
);
2461 if ((c
>= 0xd800 && c
<= 0xdbff) || c
> 0x10ffff) {
2462 /* invalid unicode character */
2471 static int encode_utf8(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2472 uintptr_t ra
, uint32_t c
, uint32_t *olen
)
2478 /* one byte character */
2481 } else if (c
<= 0x7ff) {
2482 /* two byte character */
2484 d
[1] = 0x80 | extract32(c
, 0, 6);
2485 d
[0] = 0xc0 | extract32(c
, 6, 5);
2486 } else if (c
<= 0xffff) {
2487 /* three byte character */
2489 d
[2] = 0x80 | extract32(c
, 0, 6);
2490 d
[1] = 0x80 | extract32(c
, 6, 6);
2491 d
[0] = 0xe0 | extract32(c
, 12, 4);
2493 /* four byte character */
2495 d
[3] = 0x80 | extract32(c
, 0, 6);
2496 d
[2] = 0x80 | extract32(c
, 6, 6);
2497 d
[1] = 0x80 | extract32(c
, 12, 6);
2498 d
[0] = 0xf0 | extract32(c
, 18, 3);
2504 for (i
= 0; i
< l
; ++i
) {
2505 cpu_stb_data_ra(env
, addr
+ i
, d
[i
], ra
);
2512 static int encode_utf16(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2513 uintptr_t ra
, uint32_t c
, uint32_t *olen
)
2518 /* one word character */
2522 cpu_stw_data_ra(env
, addr
, c
, ra
);
2525 /* two word character */
2529 d1
= 0xdc00 | extract32(c
, 0, 10);
2530 d0
= 0xd800 | extract32(c
, 10, 6);
2531 d0
= deposit32(d0
, 6, 4, extract32(c
, 16, 5) - 1);
2532 cpu_stw_data_ra(env
, addr
+ 0, d0
, ra
);
2533 cpu_stw_data_ra(env
, addr
+ 2, d1
, ra
);
2540 static int encode_utf32(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2541 uintptr_t ra
, uint32_t c
, uint32_t *olen
)
2546 cpu_stl_data_ra(env
, addr
, c
, ra
);
2551 static inline uint32_t convert_unicode(CPUS390XState
*env
, uint32_t r1
,
2552 uint32_t r2
, uint32_t m3
, uintptr_t ra
,
2553 decode_unicode_fn decode
,
2554 encode_unicode_fn encode
)
2556 uint64_t dst
= get_address(env
, r1
);
2557 uint64_t dlen
= get_length(env
, r1
+ 1);
2558 uint64_t src
= get_address(env
, r2
);
2559 uint64_t slen
= get_length(env
, r2
+ 1);
2560 bool enh_check
= m3
& 1;
2563 /* Lest we fail to service interrupts in a timely manner, limit the
2564 amount of work we're willing to do. For now, let's cap at 256. */
2565 for (i
= 0; i
< 256; ++i
) {
2566 uint32_t c
, ilen
, olen
;
2568 cc
= decode(env
, src
, slen
, enh_check
, ra
, &c
, &ilen
);
2569 if (unlikely(cc
>= 0)) {
2572 cc
= encode(env
, dst
, dlen
, ra
, c
, &olen
);
2573 if (unlikely(cc
>= 0)) {
2584 set_address(env
, r1
, dst
);
2585 set_length(env
, r1
+ 1, dlen
);
2586 set_address(env
, r2
, src
);
2587 set_length(env
, r2
+ 1, slen
);
2592 uint32_t HELPER(cu12
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2594 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2595 decode_utf8
, encode_utf16
);
2598 uint32_t HELPER(cu14
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2600 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2601 decode_utf8
, encode_utf32
);
2604 uint32_t HELPER(cu21
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2606 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2607 decode_utf16
, encode_utf8
);
2610 uint32_t HELPER(cu24
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2612 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2613 decode_utf16
, encode_utf32
);
2616 uint32_t HELPER(cu41
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2618 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2619 decode_utf32
, encode_utf8
);
2622 uint32_t HELPER(cu42
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2624 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2625 decode_utf32
, encode_utf16
);