2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "qemu/int128.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/s390x/storage-keys.h"
33 /*****************************************************************************/
35 #if !defined(CONFIG_USER_ONLY)
37 /* try to fill the TLB and return an exception if error. If retaddr is
38 NULL, it means that the function was called in C code (i.e. not
39 from generated code or from helper.c) */
40 /* XXX: fix it to restore all registers */
41 void tlb_fill(CPUState
*cs
, target_ulong addr
, int size
,
42 MMUAccessType access_type
, int mmu_idx
, uintptr_t retaddr
)
44 int ret
= s390_cpu_handle_mmu_fault(cs
, addr
, size
, access_type
, mmu_idx
);
45 if (unlikely(ret
!= 0)) {
46 cpu_loop_exit_restore(cs
, retaddr
);
52 /* #define DEBUG_HELPER */
54 #define HELPER_LOG(x...) qemu_log(x)
56 #define HELPER_LOG(x...)
59 static inline bool psw_key_valid(CPUS390XState
*env
, uint8_t psw_key
)
61 uint16_t pkm
= env
->cregs
[3] >> 16;
63 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
64 /* PSW key has range 0..15, it is valid if the bit is 1 in the PKM */
65 return pkm
& (0x80 >> psw_key
);
70 /* Reduce the length so that addr + len doesn't cross a page boundary. */
71 static inline uint32_t adj_len_to_page(uint32_t len
, uint64_t addr
)
73 #ifndef CONFIG_USER_ONLY
74 if ((addr
& ~TARGET_PAGE_MASK
) + len
- 1 >= TARGET_PAGE_SIZE
) {
75 return -(addr
| TARGET_PAGE_MASK
);
81 /* Trigger a SPECIFICATION exception if an address or a length is not
83 static inline void check_alignment(CPUS390XState
*env
, uint64_t v
,
84 int wordsize
, uintptr_t ra
)
87 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
91 /* Load a value from memory according to its size. */
92 static inline uint64_t cpu_ldusize_data_ra(CPUS390XState
*env
, uint64_t addr
,
93 int wordsize
, uintptr_t ra
)
97 return cpu_ldub_data_ra(env
, addr
, ra
);
99 return cpu_lduw_data_ra(env
, addr
, ra
);
105 /* Store a to memory according to its size. */
106 static inline void cpu_stsize_data_ra(CPUS390XState
*env
, uint64_t addr
,
107 uint64_t value
, int wordsize
,
112 cpu_stb_data_ra(env
, addr
, value
, ra
);
115 cpu_stw_data_ra(env
, addr
, value
, ra
);
122 static void fast_memset(CPUS390XState
*env
, uint64_t dest
, uint8_t byte
,
123 uint32_t l
, uintptr_t ra
)
125 int mmu_idx
= cpu_mmu_index(env
, false);
128 void *p
= tlb_vaddr_to_host(env
, dest
, MMU_DATA_STORE
, mmu_idx
);
130 /* Access to the whole page in write mode granted. */
131 uint32_t l_adj
= adj_len_to_page(l
, dest
);
132 memset(p
, byte
, l_adj
);
136 /* We failed to get access to the whole page. The next write
137 access will likely fill the QEMU TLB for the next iteration. */
138 cpu_stb_data_ra(env
, dest
, byte
, ra
);
145 #ifndef CONFIG_USER_ONLY
146 static void fast_memmove_idx(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
147 uint32_t len
, int dest_idx
, int src_idx
,
150 TCGMemOpIdx oi_dest
= make_memop_idx(MO_UB
, dest_idx
);
151 TCGMemOpIdx oi_src
= make_memop_idx(MO_UB
, src_idx
);
158 src
= wrap_address(env
, src
);
159 dest
= wrap_address(env
, dest
);
160 src_p
= tlb_vaddr_to_host(env
, src
, MMU_DATA_LOAD
, src_idx
);
161 dest_p
= tlb_vaddr_to_host(env
, dest
, MMU_DATA_STORE
, dest_idx
);
163 if (src_p
&& dest_p
) {
164 /* Access to both whole pages granted. */
165 len_adj
= adj_len_to_page(adj_len_to_page(len
, src
), dest
);
166 memmove(dest_p
, src_p
, len_adj
);
168 /* We failed to get access to one or both whole pages. The next
169 read or write access will likely fill the QEMU TLB for the
172 x
= helper_ret_ldub_mmu(env
, src
, oi_src
, ra
);
173 helper_ret_stb_mmu(env
, dest
, x
, oi_dest
, ra
);
181 static int mmu_idx_from_as(uint8_t as
)
185 return MMU_PRIMARY_IDX
;
187 return MMU_SECONDARY_IDX
;
191 /* FIXME AS_ACCREG */
192 g_assert_not_reached();
196 static void fast_memmove_as(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
197 uint32_t len
, uint8_t dest_as
, uint8_t src_as
,
200 int src_idx
= mmu_idx_from_as(src_as
);
201 int dest_idx
= mmu_idx_from_as(dest_as
);
203 fast_memmove_idx(env
, dest
, src
, len
, dest_idx
, src_idx
, ra
);
207 static void fast_memmove(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
208 uint32_t l
, uintptr_t ra
)
210 int mmu_idx
= cpu_mmu_index(env
, false);
213 void *src_p
= tlb_vaddr_to_host(env
, src
, MMU_DATA_LOAD
, mmu_idx
);
214 void *dest_p
= tlb_vaddr_to_host(env
, dest
, MMU_DATA_STORE
, mmu_idx
);
215 if (src_p
&& dest_p
) {
216 /* Access to both whole pages granted. */
217 uint32_t l_adj
= adj_len_to_page(l
, src
);
218 l_adj
= adj_len_to_page(l_adj
, dest
);
219 memmove(dest_p
, src_p
, l_adj
);
224 /* We failed to get access to one or both whole pages. The next
225 read or write access will likely fill the QEMU TLB for the
227 cpu_stb_data_ra(env
, dest
, cpu_ldub_data_ra(env
, src
, ra
), ra
);
236 static uint32_t do_helper_nc(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
237 uint64_t src
, uintptr_t ra
)
242 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
243 __func__
, l
, dest
, src
);
245 for (i
= 0; i
<= l
; i
++) {
246 uint8_t x
= cpu_ldub_data_ra(env
, src
+ i
, ra
);
247 x
&= cpu_ldub_data_ra(env
, dest
+ i
, ra
);
249 cpu_stb_data_ra(env
, dest
+ i
, x
, ra
);
254 uint32_t HELPER(nc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
257 return do_helper_nc(env
, l
, dest
, src
, GETPC());
261 static uint32_t do_helper_xc(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
262 uint64_t src
, uintptr_t ra
)
267 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
268 __func__
, l
, dest
, src
);
270 /* xor with itself is the same as memset(0) */
272 fast_memset(env
, dest
, 0, l
+ 1, ra
);
276 for (i
= 0; i
<= l
; i
++) {
277 uint8_t x
= cpu_ldub_data_ra(env
, src
+ i
, ra
);
278 x
^= cpu_ldub_data_ra(env
, dest
+ i
, ra
);
280 cpu_stb_data_ra(env
, dest
+ i
, x
, ra
);
285 uint32_t HELPER(xc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
288 return do_helper_xc(env
, l
, dest
, src
, GETPC());
292 static uint32_t do_helper_oc(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
293 uint64_t src
, uintptr_t ra
)
298 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
299 __func__
, l
, dest
, src
);
301 for (i
= 0; i
<= l
; i
++) {
302 uint8_t x
= cpu_ldub_data_ra(env
, src
+ i
, ra
);
303 x
|= cpu_ldub_data_ra(env
, dest
+ i
, ra
);
305 cpu_stb_data_ra(env
, dest
+ i
, x
, ra
);
310 uint32_t HELPER(oc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
313 return do_helper_oc(env
, l
, dest
, src
, GETPC());
317 static uint32_t do_helper_mvc(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
318 uint64_t src
, uintptr_t ra
)
322 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
323 __func__
, l
, dest
, src
);
325 /* mvc and memmove do not behave the same when areas overlap! */
326 /* mvc with source pointing to the byte after the destination is the
327 same as memset with the first source byte */
328 if (dest
== src
+ 1) {
329 fast_memset(env
, dest
, cpu_ldub_data_ra(env
, src
, ra
), l
+ 1, ra
);
330 } else if (dest
< src
|| src
+ l
< dest
) {
331 fast_memmove(env
, dest
, src
, l
+ 1, ra
);
333 /* slow version with byte accesses which always work */
334 for (i
= 0; i
<= l
; i
++) {
335 uint8_t x
= cpu_ldub_data_ra(env
, src
+ i
, ra
);
336 cpu_stb_data_ra(env
, dest
+ i
, x
, ra
);
343 void HELPER(mvc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
345 do_helper_mvc(env
, l
, dest
, src
, GETPC());
349 void HELPER(mvcin
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
351 uintptr_t ra
= GETPC();
354 for (i
= 0; i
<= l
; i
++) {
355 uint8_t v
= cpu_ldub_data_ra(env
, src
- i
, ra
);
356 cpu_stb_data_ra(env
, dest
+ i
, v
, ra
);
361 void HELPER(mvn
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
363 uintptr_t ra
= GETPC();
366 for (i
= 0; i
<= l
; i
++) {
367 uint8_t v
= cpu_ldub_data_ra(env
, dest
+ i
, ra
) & 0xf0;
368 v
|= cpu_ldub_data_ra(env
, src
+ i
, ra
) & 0x0f;
369 cpu_stb_data_ra(env
, dest
+ i
, v
, ra
);
373 /* move with offset */
374 void HELPER(mvo
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
376 uintptr_t ra
= GETPC();
377 int len_dest
= l
>> 4;
378 int len_src
= l
& 0xf;
379 uint8_t byte_dest
, byte_src
;
385 /* Handle rightmost byte */
386 byte_src
= cpu_ldub_data_ra(env
, src
, ra
);
387 byte_dest
= cpu_ldub_data_ra(env
, dest
, ra
);
388 byte_dest
= (byte_dest
& 0x0f) | (byte_src
<< 4);
389 cpu_stb_data_ra(env
, dest
, byte_dest
, ra
);
391 /* Process remaining bytes from right to left */
392 for (i
= 1; i
<= len_dest
; i
++) {
393 byte_dest
= byte_src
>> 4;
394 if (len_src
- i
>= 0) {
395 byte_src
= cpu_ldub_data_ra(env
, src
- i
, ra
);
399 byte_dest
|= byte_src
<< 4;
400 cpu_stb_data_ra(env
, dest
- i
, byte_dest
, ra
);
405 void HELPER(mvz
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
407 uintptr_t ra
= GETPC();
410 for (i
= 0; i
<= l
; i
++) {
411 uint8_t b
= cpu_ldub_data_ra(env
, dest
+ i
, ra
) & 0x0f;
412 b
|= cpu_ldub_data_ra(env
, src
+ i
, ra
) & 0xf0;
413 cpu_stb_data_ra(env
, dest
+ i
, b
, ra
);
417 /* compare unsigned byte arrays */
418 static uint32_t do_helper_clc(CPUS390XState
*env
, uint32_t l
, uint64_t s1
,
419 uint64_t s2
, uintptr_t ra
)
424 HELPER_LOG("%s l %d s1 %" PRIx64
" s2 %" PRIx64
"\n",
425 __func__
, l
, s1
, s2
);
427 for (i
= 0; i
<= l
; i
++) {
428 uint8_t x
= cpu_ldub_data_ra(env
, s1
+ i
, ra
);
429 uint8_t y
= cpu_ldub_data_ra(env
, s2
+ i
, ra
);
430 HELPER_LOG("%02x (%c)/%02x (%c) ", x
, x
, y
, y
);
444 uint32_t HELPER(clc
)(CPUS390XState
*env
, uint32_t l
, uint64_t s1
, uint64_t s2
)
446 return do_helper_clc(env
, l
, s1
, s2
, GETPC());
449 /* compare logical under mask */
450 uint32_t HELPER(clm
)(CPUS390XState
*env
, uint32_t r1
, uint32_t mask
,
453 uintptr_t ra
= GETPC();
456 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64
"\n", __func__
, r1
,
461 uint8_t d
= cpu_ldub_data_ra(env
, addr
, ra
);
462 uint8_t r
= extract32(r1
, 24, 8);
463 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64
") ", mask
, r
, d
,
474 mask
= (mask
<< 1) & 0xf;
482 static inline uint64_t get_address(CPUS390XState
*env
, int reg
)
484 return wrap_address(env
, env
->regs
[reg
]);
487 static inline void set_address(CPUS390XState
*env
, int reg
, uint64_t address
)
489 if (env
->psw
.mask
& PSW_MASK_64
) {
491 env
->regs
[reg
] = address
;
493 if (!(env
->psw
.mask
& PSW_MASK_32
)) {
494 /* 24-Bit mode. According to the PoO it is implementation
495 dependent if bits 32-39 remain unchanged or are set to
496 zeros. Choose the former so that the function can also be
498 env
->regs
[reg
] = deposit64(env
->regs
[reg
], 0, 24, address
);
500 /* 31-Bit mode. According to the PoO it is implementation
501 dependent if bit 32 remains unchanged or is set to zero.
502 Choose the latter so that the function can also be used for
504 address
&= 0x7fffffff;
505 env
->regs
[reg
] = deposit64(env
->regs
[reg
], 0, 32, address
);
510 static inline uint64_t wrap_length(CPUS390XState
*env
, uint64_t length
)
512 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
513 /* 24-Bit and 31-Bit mode */
514 length
&= 0x7fffffff;
519 static inline uint64_t get_length(CPUS390XState
*env
, int reg
)
521 return wrap_length(env
, env
->regs
[reg
]);
524 static inline void set_length(CPUS390XState
*env
, int reg
, uint64_t length
)
526 if (env
->psw
.mask
& PSW_MASK_64
) {
528 env
->regs
[reg
] = length
;
530 /* 24-Bit and 31-Bit mode */
531 env
->regs
[reg
] = deposit64(env
->regs
[reg
], 0, 32, length
);
535 /* search string (c is byte to search, r2 is string, r1 end of string) */
536 void HELPER(srst
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
538 uintptr_t ra
= GETPC();
541 uint8_t v
, c
= env
->regs
[0];
543 /* Bits 32-55 must contain all 0. */
544 if (env
->regs
[0] & 0xffffff00u
) {
545 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
548 str
= get_address(env
, r2
);
549 end
= get_address(env
, r1
);
551 /* Lest we fail to service interrupts in a timely manner, limit the
552 amount of work we're willing to do. For now, let's cap at 8k. */
553 for (len
= 0; len
< 0x2000; ++len
) {
554 if (str
+ len
== end
) {
555 /* Character not found. R1 & R2 are unmodified. */
559 v
= cpu_ldub_data_ra(env
, str
+ len
, ra
);
561 /* Character found. Set R1 to the location; R2 is unmodified. */
563 set_address(env
, r1
, str
+ len
);
568 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
570 set_address(env
, r2
, str
+ len
);
573 void HELPER(srstu
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
575 uintptr_t ra
= GETPC();
577 uint16_t v
, c
= env
->regs
[0];
578 uint64_t end
, str
, adj_end
;
580 /* Bits 32-47 of R0 must be zero. */
581 if (env
->regs
[0] & 0xffff0000u
) {
582 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
585 str
= get_address(env
, r2
);
586 end
= get_address(env
, r1
);
588 /* If the LSB of the two addresses differ, use one extra byte. */
589 adj_end
= end
+ ((str
^ end
) & 1);
591 /* Lest we fail to service interrupts in a timely manner, limit the
592 amount of work we're willing to do. For now, let's cap at 8k. */
593 for (len
= 0; len
< 0x2000; len
+= 2) {
594 if (str
+ len
== adj_end
) {
595 /* End of input found. */
599 v
= cpu_lduw_data_ra(env
, str
+ len
, ra
);
601 /* Character found. Set R1 to the location; R2 is unmodified. */
603 set_address(env
, r1
, str
+ len
);
608 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
610 set_address(env
, r2
, str
+ len
);
613 /* unsigned string compare (c is string terminator) */
614 uint64_t HELPER(clst
)(CPUS390XState
*env
, uint64_t c
, uint64_t s1
, uint64_t s2
)
616 uintptr_t ra
= GETPC();
620 s1
= wrap_address(env
, s1
);
621 s2
= wrap_address(env
, s2
);
623 /* Lest we fail to service interrupts in a timely manner, limit the
624 amount of work we're willing to do. For now, let's cap at 8k. */
625 for (len
= 0; len
< 0x2000; ++len
) {
626 uint8_t v1
= cpu_ldub_data_ra(env
, s1
+ len
, ra
);
627 uint8_t v2
= cpu_ldub_data_ra(env
, s2
+ len
, ra
);
630 /* Equal. CC=0, and don't advance the registers. */
636 /* Unequal. CC={1,2}, and advance the registers. Note that
637 the terminator need not be zero, but the string that contains
638 the terminator is by definition "low". */
639 env
->cc_op
= (v1
== c
? 1 : v2
== c
? 2 : v1
< v2
? 1 : 2);
640 env
->retxl
= s2
+ len
;
645 /* CPU-determined bytes equal; advance the registers. */
647 env
->retxl
= s2
+ len
;
652 uint32_t HELPER(mvpg
)(CPUS390XState
*env
, uint64_t r0
, uint64_t r1
, uint64_t r2
)
654 /* ??? missing r0 handling, which includes access keys, but more
655 importantly optional suppression of the exception! */
656 fast_memmove(env
, r1
, r2
, TARGET_PAGE_SIZE
, GETPC());
657 return 0; /* data moved */
660 /* string copy (c is string terminator) */
661 uint64_t HELPER(mvst
)(CPUS390XState
*env
, uint64_t c
, uint64_t d
, uint64_t s
)
663 uintptr_t ra
= GETPC();
667 d
= wrap_address(env
, d
);
668 s
= wrap_address(env
, s
);
670 /* Lest we fail to service interrupts in a timely manner, limit the
671 amount of work we're willing to do. For now, let's cap at 8k. */
672 for (len
= 0; len
< 0x2000; ++len
) {
673 uint8_t v
= cpu_ldub_data_ra(env
, s
+ len
, ra
);
674 cpu_stb_data_ra(env
, d
+ len
, v
, ra
);
676 /* Complete. Set CC=1 and advance R1. */
683 /* Incomplete. Set CC=3 and signal to advance R1 and R2. */
685 env
->retxl
= s
+ len
;
689 /* load access registers r1 to r3 from memory at a2 */
690 void HELPER(lam
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
692 uintptr_t ra
= GETPC();
696 /* we either came here by lam or lamy, which have different lengths */
697 s390_program_interrupt(env
, PGM_SPECIFICATION
, ILEN_AUTO
, ra
);
700 for (i
= r1
;; i
= (i
+ 1) % 16) {
701 env
->aregs
[i
] = cpu_ldl_data_ra(env
, a2
, ra
);
710 /* store access registers r1 to r3 in memory at a2 */
711 void HELPER(stam
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
713 uintptr_t ra
= GETPC();
717 s390_program_interrupt(env
, PGM_SPECIFICATION
, 4, ra
);
720 for (i
= r1
;; i
= (i
+ 1) % 16) {
721 cpu_stl_data_ra(env
, a2
, env
->aregs
[i
], ra
);
730 /* move long helper */
731 static inline uint32_t do_mvcl(CPUS390XState
*env
,
732 uint64_t *dest
, uint64_t *destlen
,
733 uint64_t *src
, uint64_t *srclen
,
734 uint16_t pad
, int wordsize
, uintptr_t ra
)
736 uint64_t len
= MIN(*srclen
, *destlen
);
739 if (*destlen
== *srclen
) {
741 } else if (*destlen
< *srclen
) {
747 /* Copy the src array */
748 fast_memmove(env
, *dest
, *src
, len
, ra
);
754 /* Pad the remaining area */
756 fast_memset(env
, *dest
, pad
, *destlen
, ra
);
760 /* If remaining length is odd, pad with odd byte first. */
762 cpu_stb_data_ra(env
, *dest
, pad
& 0xff, ra
);
766 /* The remaining length is even, pad using words. */
767 for (; *destlen
; *dest
+= 2, *destlen
-= 2) {
768 cpu_stw_data_ra(env
, *dest
, pad
, ra
);
776 uint32_t HELPER(mvcl
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
778 uintptr_t ra
= GETPC();
779 uint64_t destlen
= env
->regs
[r1
+ 1] & 0xffffff;
780 uint64_t dest
= get_address(env
, r1
);
781 uint64_t srclen
= env
->regs
[r2
+ 1] & 0xffffff;
782 uint64_t src
= get_address(env
, r2
);
783 uint8_t pad
= env
->regs
[r2
+ 1] >> 24;
786 cc
= do_mvcl(env
, &dest
, &destlen
, &src
, &srclen
, pad
, 1, ra
);
788 env
->regs
[r1
+ 1] = deposit64(env
->regs
[r1
+ 1], 0, 24, destlen
);
789 env
->regs
[r2
+ 1] = deposit64(env
->regs
[r2
+ 1], 0, 24, srclen
);
790 set_address(env
, r1
, dest
);
791 set_address(env
, r2
, src
);
796 /* move long extended */
797 uint32_t HELPER(mvcle
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
800 uintptr_t ra
= GETPC();
801 uint64_t destlen
= get_length(env
, r1
+ 1);
802 uint64_t dest
= get_address(env
, r1
);
803 uint64_t srclen
= get_length(env
, r3
+ 1);
804 uint64_t src
= get_address(env
, r3
);
808 cc
= do_mvcl(env
, &dest
, &destlen
, &src
, &srclen
, pad
, 1, ra
);
810 set_length(env
, r1
+ 1, destlen
);
811 set_length(env
, r3
+ 1, srclen
);
812 set_address(env
, r1
, dest
);
813 set_address(env
, r3
, src
);
818 /* move long unicode */
819 uint32_t HELPER(mvclu
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
822 uintptr_t ra
= GETPC();
823 uint64_t destlen
= get_length(env
, r1
+ 1);
824 uint64_t dest
= get_address(env
, r1
);
825 uint64_t srclen
= get_length(env
, r3
+ 1);
826 uint64_t src
= get_address(env
, r3
);
830 cc
= do_mvcl(env
, &dest
, &destlen
, &src
, &srclen
, pad
, 2, ra
);
832 set_length(env
, r1
+ 1, destlen
);
833 set_length(env
, r3
+ 1, srclen
);
834 set_address(env
, r1
, dest
);
835 set_address(env
, r3
, src
);
840 /* compare logical long helper */
841 static inline uint32_t do_clcl(CPUS390XState
*env
,
842 uint64_t *src1
, uint64_t *src1len
,
843 uint64_t *src3
, uint64_t *src3len
,
844 uint16_t pad
, uint64_t limit
,
845 int wordsize
, uintptr_t ra
)
847 uint64_t len
= MAX(*src1len
, *src3len
);
850 check_alignment(env
, *src1len
| *src3len
, wordsize
, ra
);
856 /* Lest we fail to service interrupts in a timely manner, limit the
857 amount of work we're willing to do. */
863 for (; len
; len
-= wordsize
) {
868 v1
= cpu_ldusize_data_ra(env
, *src1
, wordsize
, ra
);
871 v3
= cpu_ldusize_data_ra(env
, *src3
, wordsize
, ra
);
875 cc
= (v1
< v3
) ? 1 : 2;
881 *src1len
-= wordsize
;
885 *src3len
-= wordsize
;
893 /* compare logical long */
894 uint32_t HELPER(clcl
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
896 uintptr_t ra
= GETPC();
897 uint64_t src1len
= extract64(env
->regs
[r1
+ 1], 0, 24);
898 uint64_t src1
= get_address(env
, r1
);
899 uint64_t src3len
= extract64(env
->regs
[r2
+ 1], 0, 24);
900 uint64_t src3
= get_address(env
, r2
);
901 uint8_t pad
= env
->regs
[r2
+ 1] >> 24;
904 cc
= do_clcl(env
, &src1
, &src1len
, &src3
, &src3len
, pad
, -1, 1, ra
);
906 env
->regs
[r1
+ 1] = deposit64(env
->regs
[r1
+ 1], 0, 24, src1len
);
907 env
->regs
[r2
+ 1] = deposit64(env
->regs
[r2
+ 1], 0, 24, src3len
);
908 set_address(env
, r1
, src1
);
909 set_address(env
, r2
, src3
);
914 /* compare logical long extended memcompare insn with padding */
915 uint32_t HELPER(clcle
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
918 uintptr_t ra
= GETPC();
919 uint64_t src1len
= get_length(env
, r1
+ 1);
920 uint64_t src1
= get_address(env
, r1
);
921 uint64_t src3len
= get_length(env
, r3
+ 1);
922 uint64_t src3
= get_address(env
, r3
);
926 cc
= do_clcl(env
, &src1
, &src1len
, &src3
, &src3len
, pad
, 0x2000, 1, ra
);
928 set_length(env
, r1
+ 1, src1len
);
929 set_length(env
, r3
+ 1, src3len
);
930 set_address(env
, r1
, src1
);
931 set_address(env
, r3
, src3
);
936 /* compare logical long unicode memcompare insn with padding */
937 uint32_t HELPER(clclu
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
940 uintptr_t ra
= GETPC();
941 uint64_t src1len
= get_length(env
, r1
+ 1);
942 uint64_t src1
= get_address(env
, r1
);
943 uint64_t src3len
= get_length(env
, r3
+ 1);
944 uint64_t src3
= get_address(env
, r3
);
948 cc
= do_clcl(env
, &src1
, &src1len
, &src3
, &src3len
, pad
, 0x1000, 2, ra
);
950 set_length(env
, r1
+ 1, src1len
);
951 set_length(env
, r3
+ 1, src3len
);
952 set_address(env
, r1
, src1
);
953 set_address(env
, r3
, src3
);
959 uint64_t HELPER(cksm
)(CPUS390XState
*env
, uint64_t r1
,
960 uint64_t src
, uint64_t src_len
)
962 uintptr_t ra
= GETPC();
963 uint64_t max_len
, len
;
964 uint64_t cksm
= (uint32_t)r1
;
966 /* Lest we fail to service interrupts in a timely manner, limit the
967 amount of work we're willing to do. For now, let's cap at 8k. */
968 max_len
= (src_len
> 0x2000 ? 0x2000 : src_len
);
970 /* Process full words as available. */
971 for (len
= 0; len
+ 4 <= max_len
; len
+= 4, src
+= 4) {
972 cksm
+= (uint32_t)cpu_ldl_data_ra(env
, src
, ra
);
975 switch (max_len
- len
) {
977 cksm
+= cpu_ldub_data_ra(env
, src
, ra
) << 24;
981 cksm
+= cpu_lduw_data_ra(env
, src
, ra
) << 16;
985 cksm
+= cpu_lduw_data_ra(env
, src
, ra
) << 16;
986 cksm
+= cpu_ldub_data_ra(env
, src
+ 2, ra
) << 8;
991 /* Fold the carry from the checksum. Note that we can see carry-out
992 during folding more than once (but probably not more than twice). */
993 while (cksm
> 0xffffffffull
) {
994 cksm
= (uint32_t)cksm
+ (cksm
>> 32);
997 /* Indicate whether or not we've processed everything. */
998 env
->cc_op
= (len
== src_len
? 0 : 3);
1000 /* Return both cksm and processed length. */
1005 void HELPER(pack
)(CPUS390XState
*env
, uint32_t len
, uint64_t dest
, uint64_t src
)
1007 uintptr_t ra
= GETPC();
1008 int len_dest
= len
>> 4;
1009 int len_src
= len
& 0xf;
1015 /* last byte is special, it only flips the nibbles */
1016 b
= cpu_ldub_data_ra(env
, src
, ra
);
1017 cpu_stb_data_ra(env
, dest
, (b
<< 4) | (b
>> 4), ra
);
1021 /* now pack every value */
1022 while (len_dest
>= 0) {
1026 b
= cpu_ldub_data_ra(env
, src
, ra
) & 0x0f;
1031 b
|= cpu_ldub_data_ra(env
, src
, ra
) << 4;
1038 cpu_stb_data_ra(env
, dest
, b
, ra
);
1042 static inline void do_pkau(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
1043 uint32_t srclen
, int ssize
, uintptr_t ra
)
1046 /* The destination operand is always 16 bytes long. */
1047 const int destlen
= 16;
1049 /* The operands are processed from right to left. */
1051 dest
+= destlen
- 1;
1053 for (i
= 0; i
< destlen
; i
++) {
1056 /* Start with a positive sign */
1059 } else if (srclen
> ssize
) {
1060 b
= cpu_ldub_data_ra(env
, src
, ra
) & 0x0f;
1065 if (srclen
> ssize
) {
1066 b
|= cpu_ldub_data_ra(env
, src
, ra
) << 4;
1071 cpu_stb_data_ra(env
, dest
, b
, ra
);
1077 void HELPER(pka
)(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
1080 do_pkau(env
, dest
, src
, srclen
, 1, GETPC());
1083 void HELPER(pku
)(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
1086 do_pkau(env
, dest
, src
, srclen
, 2, GETPC());
1089 void HELPER(unpk
)(CPUS390XState
*env
, uint32_t len
, uint64_t dest
,
1092 uintptr_t ra
= GETPC();
1093 int len_dest
= len
>> 4;
1094 int len_src
= len
& 0xf;
1096 int second_nibble
= 0;
1101 /* last byte is special, it only flips the nibbles */
1102 b
= cpu_ldub_data_ra(env
, src
, ra
);
1103 cpu_stb_data_ra(env
, dest
, (b
<< 4) | (b
>> 4), ra
);
1107 /* now pad every nibble with 0xf0 */
1109 while (len_dest
> 0) {
1110 uint8_t cur_byte
= 0;
1113 cur_byte
= cpu_ldub_data_ra(env
, src
, ra
);
1119 /* only advance one nibble at a time */
1120 if (second_nibble
) {
1125 second_nibble
= !second_nibble
;
1128 cur_byte
= (cur_byte
& 0xf);
1132 cpu_stb_data_ra(env
, dest
, cur_byte
, ra
);
1136 static inline uint32_t do_unpkau(CPUS390XState
*env
, uint64_t dest
,
1137 uint32_t destlen
, int dsize
, uint64_t src
,
1143 /* The source operand is always 16 bytes long. */
1144 const int srclen
= 16;
1146 /* The operands are processed from right to left. */
1148 dest
+= destlen
- dsize
;
1150 /* Check for the sign. */
1151 b
= cpu_ldub_data_ra(env
, src
, ra
);
1165 cc
= 3; /* invalid */
1169 /* Now pad every nibble with 0x30, advancing one nibble at a time. */
1170 for (i
= 0; i
< destlen
; i
+= dsize
) {
1171 if (i
== (31 * dsize
)) {
1172 /* If length is 32/64 bytes, the leftmost byte is 0. */
1174 } else if (i
% (2 * dsize
)) {
1175 b
= cpu_ldub_data_ra(env
, src
, ra
);
1180 cpu_stsize_data_ra(env
, dest
, 0x30 + (b
& 0xf), dsize
, ra
);
1187 uint32_t HELPER(unpka
)(CPUS390XState
*env
, uint64_t dest
, uint32_t destlen
,
1190 return do_unpkau(env
, dest
, destlen
, 1, src
, GETPC());
1193 uint32_t HELPER(unpku
)(CPUS390XState
*env
, uint64_t dest
, uint32_t destlen
,
1196 return do_unpkau(env
, dest
, destlen
, 2, src
, GETPC());
1199 uint32_t HELPER(tp
)(CPUS390XState
*env
, uint64_t dest
, uint32_t destlen
)
1201 uintptr_t ra
= GETPC();
1205 for (i
= 0; i
< destlen
; i
++) {
1206 uint8_t b
= cpu_ldub_data_ra(env
, dest
+ i
, ra
);
1208 cc
|= (b
& 0xf0) > 0x90 ? 2 : 0;
1210 if (i
== (destlen
- 1)) {
1212 cc
|= (b
& 0xf) < 0xa ? 1 : 0;
1215 cc
|= (b
& 0xf) > 0x9 ? 2 : 0;
1222 static uint32_t do_helper_tr(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
1223 uint64_t trans
, uintptr_t ra
)
1227 for (i
= 0; i
<= len
; i
++) {
1228 uint8_t byte
= cpu_ldub_data_ra(env
, array
+ i
, ra
);
1229 uint8_t new_byte
= cpu_ldub_data_ra(env
, trans
+ byte
, ra
);
1230 cpu_stb_data_ra(env
, array
+ i
, new_byte
, ra
);
1236 void HELPER(tr
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
1239 do_helper_tr(env
, len
, array
, trans
, GETPC());
1242 uint64_t HELPER(tre
)(CPUS390XState
*env
, uint64_t array
,
1243 uint64_t len
, uint64_t trans
)
1245 uintptr_t ra
= GETPC();
1246 uint8_t end
= env
->regs
[0] & 0xff;
1251 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
1252 array
&= 0x7fffffff;
1256 /* Lest we fail to service interrupts in a timely manner, limit the
1257 amount of work we're willing to do. For now, let's cap at 8k. */
1263 for (i
= 0; i
< l
; i
++) {
1264 uint8_t byte
, new_byte
;
1266 byte
= cpu_ldub_data_ra(env
, array
+ i
, ra
);
1273 new_byte
= cpu_ldub_data_ra(env
, trans
+ byte
, ra
);
1274 cpu_stb_data_ra(env
, array
+ i
, new_byte
, ra
);
1278 env
->retxl
= len
- i
;
1282 static inline uint32_t do_helper_trt(CPUS390XState
*env
, int len
,
1283 uint64_t array
, uint64_t trans
,
1284 int inc
, uintptr_t ra
)
1288 for (i
= 0; i
<= len
; i
++) {
1289 uint8_t byte
= cpu_ldub_data_ra(env
, array
+ i
* inc
, ra
);
1290 uint8_t sbyte
= cpu_ldub_data_ra(env
, trans
+ byte
, ra
);
1293 set_address(env
, 1, array
+ i
* inc
);
1294 env
->regs
[2] = deposit64(env
->regs
[2], 0, 8, sbyte
);
1295 return (i
== len
) ? 2 : 1;
1302 static uint32_t do_helper_trt_fwd(CPUS390XState
*env
, uint32_t len
,
1303 uint64_t array
, uint64_t trans
,
1306 return do_helper_trt(env
, len
, array
, trans
, 1, ra
);
1309 uint32_t HELPER(trt
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
1312 return do_helper_trt(env
, len
, array
, trans
, 1, GETPC());
1315 static uint32_t do_helper_trt_bkwd(CPUS390XState
*env
, uint32_t len
,
1316 uint64_t array
, uint64_t trans
,
1319 return do_helper_trt(env
, len
, array
, trans
, -1, ra
);
1322 uint32_t HELPER(trtr
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
1325 return do_helper_trt(env
, len
, array
, trans
, -1, GETPC());
1328 /* Translate one/two to one/two */
1329 uint32_t HELPER(trXX
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
,
1330 uint32_t tst
, uint32_t sizes
)
1332 uintptr_t ra
= GETPC();
1333 int dsize
= (sizes
& 1) ? 1 : 2;
1334 int ssize
= (sizes
& 2) ? 1 : 2;
1335 uint64_t tbl
= get_address(env
, 1);
1336 uint64_t dst
= get_address(env
, r1
);
1337 uint64_t len
= get_length(env
, r1
+ 1);
1338 uint64_t src
= get_address(env
, r2
);
1342 /* The lower address bits of TBL are ignored. For TROO, TROT, it's
1343 the low 3 bits (double-word aligned). For TRTO, TRTT, it's either
1344 the low 12 bits (4K, without ETF2-ENH) or 3 bits (with ETF2-ENH). */
1345 if (ssize
== 2 && !s390_has_feat(S390_FEAT_ETF2_ENH
)) {
1351 check_alignment(env
, len
, ssize
, ra
);
1353 /* Lest we fail to service interrupts in a timely manner, */
1354 /* limit the amount of work we're willing to do. */
1355 for (i
= 0; i
< 0x2000; i
++) {
1356 uint16_t sval
= cpu_ldusize_data_ra(env
, src
, ssize
, ra
);
1357 uint64_t tble
= tbl
+ (sval
* dsize
);
1358 uint16_t dval
= cpu_ldusize_data_ra(env
, tble
, dsize
, ra
);
1363 cpu_stsize_data_ra(env
, dst
, dval
, dsize
, ra
);
1375 set_address(env
, r1
, dst
);
1376 set_length(env
, r1
+ 1, len
);
1377 set_address(env
, r2
, src
);
1382 static void do_cdsg(CPUS390XState
*env
, uint64_t addr
,
1383 uint32_t r1
, uint32_t r3
, bool parallel
)
1385 uintptr_t ra
= GETPC();
1386 Int128 cmpv
= int128_make128(env
->regs
[r1
+ 1], env
->regs
[r1
]);
1387 Int128 newv
= int128_make128(env
->regs
[r3
+ 1], env
->regs
[r3
]);
1392 #ifndef CONFIG_ATOMIC128
1393 cpu_loop_exit_atomic(ENV_GET_CPU(env
), ra
);
1395 int mem_idx
= cpu_mmu_index(env
, false);
1396 TCGMemOpIdx oi
= make_memop_idx(MO_TEQ
| MO_ALIGN_16
, mem_idx
);
1397 oldv
= helper_atomic_cmpxchgo_be_mmu(env
, addr
, cmpv
, newv
, oi
, ra
);
1398 fail
= !int128_eq(oldv
, cmpv
);
1401 uint64_t oldh
, oldl
;
1403 check_alignment(env
, addr
, 16, ra
);
1405 oldh
= cpu_ldq_data_ra(env
, addr
+ 0, ra
);
1406 oldl
= cpu_ldq_data_ra(env
, addr
+ 8, ra
);
1408 oldv
= int128_make128(oldl
, oldh
);
1409 fail
= !int128_eq(oldv
, cmpv
);
1414 cpu_stq_data_ra(env
, addr
+ 0, int128_gethi(newv
), ra
);
1415 cpu_stq_data_ra(env
, addr
+ 8, int128_getlo(newv
), ra
);
1419 env
->regs
[r1
] = int128_gethi(oldv
);
1420 env
->regs
[r1
+ 1] = int128_getlo(oldv
);
1423 void HELPER(cdsg
)(CPUS390XState
*env
, uint64_t addr
,
1424 uint32_t r1
, uint32_t r3
)
1426 do_cdsg(env
, addr
, r1
, r3
, false);
1429 void HELPER(cdsg_parallel
)(CPUS390XState
*env
, uint64_t addr
,
1430 uint32_t r1
, uint32_t r3
)
1432 do_cdsg(env
, addr
, r1
, r3
, true);
1435 static uint32_t do_csst(CPUS390XState
*env
, uint32_t r3
, uint64_t a1
,
1436 uint64_t a2
, bool parallel
)
1438 #if !defined(CONFIG_USER_ONLY) || defined(CONFIG_ATOMIC128)
1439 uint32_t mem_idx
= cpu_mmu_index(env
, false);
1441 uintptr_t ra
= GETPC();
1442 uint32_t fc
= extract32(env
->regs
[0], 0, 8);
1443 uint32_t sc
= extract32(env
->regs
[0], 8, 8);
1444 uint64_t pl
= get_address(env
, 1) & -16;
1448 /* Sanity check the function code and storage characteristic. */
1449 if (fc
> 1 || sc
> 3) {
1450 if (!s390_has_feat(S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2
)) {
1451 goto spec_exception
;
1453 if (fc
> 2 || sc
> 4 || (fc
== 2 && (r3
& 1))) {
1454 goto spec_exception
;
1458 /* Sanity check the alignments. */
1459 if (extract32(a1
, 0, fc
+ 2) || extract32(a2
, 0, sc
)) {
1460 goto spec_exception
;
1463 /* Sanity check writability of the store address. */
1464 #ifndef CONFIG_USER_ONLY
1465 probe_write(env
, a2
, 0, mem_idx
, ra
);
1468 /* Note that the compare-and-swap is atomic, and the store is atomic, but
1469 the complete operation is not. Therefore we do not need to assert serial
1470 context in order to implement this. That said, restart early if we can't
1471 support either operation that is supposed to be atomic. */
1474 #if !defined(CONFIG_ATOMIC64)
1476 #elif !defined(CONFIG_ATOMIC128)
1479 if (((4 << fc
) | (1 << sc
)) & mask
) {
1480 cpu_loop_exit_atomic(ENV_GET_CPU(env
), ra
);
1484 /* All loads happen before all stores. For simplicity, load the entire
1485 store value area from the parameter list. */
1486 svh
= cpu_ldq_data_ra(env
, pl
+ 16, ra
);
1487 svl
= cpu_ldq_data_ra(env
, pl
+ 24, ra
);
1492 uint32_t nv
= cpu_ldl_data_ra(env
, pl
, ra
);
1493 uint32_t cv
= env
->regs
[r3
];
1497 #ifdef CONFIG_USER_ONLY
1498 uint32_t *haddr
= g2h(a1
);
1499 ov
= atomic_cmpxchg__nocheck(haddr
, cv
, nv
);
1501 TCGMemOpIdx oi
= make_memop_idx(MO_TEUL
| MO_ALIGN
, mem_idx
);
1502 ov
= helper_atomic_cmpxchgl_be_mmu(env
, a1
, cv
, nv
, oi
, ra
);
1505 ov
= cpu_ldl_data_ra(env
, a1
, ra
);
1506 cpu_stl_data_ra(env
, a1
, (ov
== cv
? nv
: ov
), ra
);
1509 env
->regs
[r3
] = deposit64(env
->regs
[r3
], 32, 32, ov
);
1515 uint64_t nv
= cpu_ldq_data_ra(env
, pl
, ra
);
1516 uint64_t cv
= env
->regs
[r3
];
1520 #ifdef CONFIG_ATOMIC64
1521 # ifdef CONFIG_USER_ONLY
1522 uint64_t *haddr
= g2h(a1
);
1523 ov
= atomic_cmpxchg__nocheck(haddr
, cv
, nv
);
1525 TCGMemOpIdx oi
= make_memop_idx(MO_TEQ
| MO_ALIGN
, mem_idx
);
1526 ov
= helper_atomic_cmpxchgq_be_mmu(env
, a1
, cv
, nv
, oi
, ra
);
1529 /* Note that we asserted !parallel above. */
1530 g_assert_not_reached();
1533 ov
= cpu_ldq_data_ra(env
, a1
, ra
);
1534 cpu_stq_data_ra(env
, a1
, (ov
== cv
? nv
: ov
), ra
);
1543 uint64_t nvh
= cpu_ldq_data_ra(env
, pl
, ra
);
1544 uint64_t nvl
= cpu_ldq_data_ra(env
, pl
+ 8, ra
);
1545 Int128 nv
= int128_make128(nvl
, nvh
);
1546 Int128 cv
= int128_make128(env
->regs
[r3
+ 1], env
->regs
[r3
]);
1550 #ifdef CONFIG_ATOMIC128
1551 TCGMemOpIdx oi
= make_memop_idx(MO_TEQ
| MO_ALIGN_16
, mem_idx
);
1552 ov
= helper_atomic_cmpxchgo_be_mmu(env
, a1
, cv
, nv
, oi
, ra
);
1553 cc
= !int128_eq(ov
, cv
);
1555 /* Note that we asserted !parallel above. */
1556 g_assert_not_reached();
1559 uint64_t oh
= cpu_ldq_data_ra(env
, a1
+ 0, ra
);
1560 uint64_t ol
= cpu_ldq_data_ra(env
, a1
+ 8, ra
);
1562 ov
= int128_make128(ol
, oh
);
1563 cc
= !int128_eq(ov
, cv
);
1568 cpu_stq_data_ra(env
, a1
+ 0, int128_gethi(nv
), ra
);
1569 cpu_stq_data_ra(env
, a1
+ 8, int128_getlo(nv
), ra
);
1572 env
->regs
[r3
+ 0] = int128_gethi(ov
);
1573 env
->regs
[r3
+ 1] = int128_getlo(ov
);
1578 g_assert_not_reached();
1581 /* Store only if the comparison succeeded. Note that above we use a pair
1582 of 64-bit big-endian loads, so for sc < 3 we must extract the value
1583 from the most-significant bits of svh. */
1587 cpu_stb_data_ra(env
, a2
, svh
>> 56, ra
);
1590 cpu_stw_data_ra(env
, a2
, svh
>> 48, ra
);
1593 cpu_stl_data_ra(env
, a2
, svh
>> 32, ra
);
1596 cpu_stq_data_ra(env
, a2
, svh
, ra
);
1600 #ifdef CONFIG_ATOMIC128
1601 TCGMemOpIdx oi
= make_memop_idx(MO_TEQ
| MO_ALIGN_16
, mem_idx
);
1602 Int128 sv
= int128_make128(svl
, svh
);
1603 helper_atomic_sto_be_mmu(env
, a2
, sv
, oi
, ra
);
1605 /* Note that we asserted !parallel above. */
1606 g_assert_not_reached();
1609 cpu_stq_data_ra(env
, a2
+ 0, svh
, ra
);
1610 cpu_stq_data_ra(env
, a2
+ 8, svl
, ra
);
1614 g_assert_not_reached();
1621 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
1622 g_assert_not_reached();
1625 uint32_t HELPER(csst
)(CPUS390XState
*env
, uint32_t r3
, uint64_t a1
, uint64_t a2
)
1627 return do_csst(env
, r3
, a1
, a2
, false);
1630 uint32_t HELPER(csst_parallel
)(CPUS390XState
*env
, uint32_t r3
, uint64_t a1
,
1633 return do_csst(env
, r3
, a1
, a2
, true);
1636 #if !defined(CONFIG_USER_ONLY)
1637 void HELPER(lctlg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
1639 uintptr_t ra
= GETPC();
1640 S390CPU
*cpu
= s390_env_get_cpu(env
);
1641 bool PERchanged
= false;
1646 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
1649 for (i
= r1
;; i
= (i
+ 1) % 16) {
1650 uint64_t val
= cpu_ldq_data_ra(env
, src
, ra
);
1651 if (env
->cregs
[i
] != val
&& i
>= 9 && i
<= 11) {
1654 env
->cregs
[i
] = val
;
1655 HELPER_LOG("load ctl %d from 0x%" PRIx64
" == 0x%" PRIx64
"\n",
1657 src
+= sizeof(uint64_t);
1664 if (PERchanged
&& env
->psw
.mask
& PSW_MASK_PER
) {
1665 s390_cpu_recompute_watchpoints(CPU(cpu
));
1668 tlb_flush(CPU(cpu
));
1671 void HELPER(lctl
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
1673 uintptr_t ra
= GETPC();
1674 S390CPU
*cpu
= s390_env_get_cpu(env
);
1675 bool PERchanged
= false;
1680 s390_program_interrupt(env
, PGM_SPECIFICATION
, 4, ra
);
1683 for (i
= r1
;; i
= (i
+ 1) % 16) {
1684 uint32_t val
= cpu_ldl_data_ra(env
, src
, ra
);
1685 if ((uint32_t)env
->cregs
[i
] != val
&& i
>= 9 && i
<= 11) {
1688 env
->cregs
[i
] = deposit64(env
->cregs
[i
], 0, 32, val
);
1689 HELPER_LOG("load ctl %d from 0x%" PRIx64
" == 0x%x\n", i
, src
, val
);
1690 src
+= sizeof(uint32_t);
1697 if (PERchanged
&& env
->psw
.mask
& PSW_MASK_PER
) {
1698 s390_cpu_recompute_watchpoints(CPU(cpu
));
1701 tlb_flush(CPU(cpu
));
1704 void HELPER(stctg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
1706 uintptr_t ra
= GETPC();
1711 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
1714 for (i
= r1
;; i
= (i
+ 1) % 16) {
1715 cpu_stq_data_ra(env
, dest
, env
->cregs
[i
], ra
);
1716 dest
+= sizeof(uint64_t);
1724 void HELPER(stctl
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
1726 uintptr_t ra
= GETPC();
1731 s390_program_interrupt(env
, PGM_SPECIFICATION
, 4, ra
);
1734 for (i
= r1
;; i
= (i
+ 1) % 16) {
1735 cpu_stl_data_ra(env
, dest
, env
->cregs
[i
], ra
);
1736 dest
+= sizeof(uint32_t);
1744 uint32_t HELPER(testblock
)(CPUS390XState
*env
, uint64_t real_addr
)
1746 uintptr_t ra
= GETPC();
1749 real_addr
= wrap_address(env
, real_addr
) & TARGET_PAGE_MASK
;
1751 for (i
= 0; i
< TARGET_PAGE_SIZE
; i
+= 8) {
1752 cpu_stq_real_ra(env
, real_addr
+ i
, 0, ra
);
1758 uint32_t HELPER(tprot
)(CPUS390XState
*env
, uint64_t a1
, uint64_t a2
)
1760 S390CPU
*cpu
= s390_env_get_cpu(env
);
1761 CPUState
*cs
= CPU(cpu
);
1764 * TODO: we currently don't handle all access protection types
1765 * (including access-list and key-controlled) as well as AR mode.
1767 if (!s390_cpu_virt_mem_check_write(cpu
, a1
, 0, 1)) {
1768 /* Fetching permitted; storing permitted */
1772 if (env
->int_pgm_code
== PGM_PROTECTION
) {
1773 /* retry if reading is possible */
1774 cs
->exception_index
= 0;
1775 if (!s390_cpu_virt_mem_check_read(cpu
, a1
, 0, 1)) {
1776 /* Fetching permitted; storing not permitted */
1781 switch (env
->int_pgm_code
) {
1782 case PGM_PROTECTION
:
1783 /* Fetching not permitted; storing not permitted */
1784 cs
->exception_index
= 0;
1786 case PGM_ADDRESSING
:
1787 case PGM_TRANS_SPEC
:
1788 /* exceptions forwarded to the guest */
1789 s390_cpu_virt_mem_handle_exc(cpu
, GETPC());
1793 /* Translation not available */
1794 cs
->exception_index
= 0;
1798 /* insert storage key extended */
1799 uint64_t HELPER(iske
)(CPUS390XState
*env
, uint64_t r2
)
1801 static S390SKeysState
*ss
;
1802 static S390SKeysClass
*skeyclass
;
1803 uint64_t addr
= wrap_address(env
, r2
);
1806 if (addr
> ram_size
) {
1810 if (unlikely(!ss
)) {
1811 ss
= s390_get_skeys_device();
1812 skeyclass
= S390_SKEYS_GET_CLASS(ss
);
1815 if (skeyclass
->get_skeys(ss
, addr
/ TARGET_PAGE_SIZE
, 1, &key
)) {
1821 /* set storage key extended */
1822 void HELPER(sske
)(CPUS390XState
*env
, uint64_t r1
, uint64_t r2
)
1824 static S390SKeysState
*ss
;
1825 static S390SKeysClass
*skeyclass
;
1826 uint64_t addr
= wrap_address(env
, r2
);
1829 if (addr
> ram_size
) {
1833 if (unlikely(!ss
)) {
1834 ss
= s390_get_skeys_device();
1835 skeyclass
= S390_SKEYS_GET_CLASS(ss
);
1839 skeyclass
->set_skeys(ss
, addr
/ TARGET_PAGE_SIZE
, 1, &key
);
1842 /* reset reference bit extended */
1843 uint32_t HELPER(rrbe
)(CPUS390XState
*env
, uint64_t r2
)
1845 static S390SKeysState
*ss
;
1846 static S390SKeysClass
*skeyclass
;
1849 if (r2
> ram_size
) {
1853 if (unlikely(!ss
)) {
1854 ss
= s390_get_skeys_device();
1855 skeyclass
= S390_SKEYS_GET_CLASS(ss
);
1858 if (skeyclass
->get_skeys(ss
, r2
/ TARGET_PAGE_SIZE
, 1, &key
)) {
1862 re
= key
& (SK_R
| SK_C
);
1865 if (skeyclass
->set_skeys(ss
, r2
/ TARGET_PAGE_SIZE
, 1, &key
)) {
1872 * 0 Reference bit zero; change bit zero
1873 * 1 Reference bit zero; change bit one
1874 * 2 Reference bit one; change bit zero
1875 * 3 Reference bit one; change bit one
1881 uint32_t HELPER(mvcs
)(CPUS390XState
*env
, uint64_t l
, uint64_t a1
, uint64_t a2
)
1883 uintptr_t ra
= GETPC();
1886 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
1887 __func__
, l
, a1
, a2
);
1895 /* XXX replace w/ memcpy */
1896 for (i
= 0; i
< l
; i
++) {
1897 uint8_t x
= cpu_ldub_primary_ra(env
, a2
+ i
, ra
);
1898 cpu_stb_secondary_ra(env
, a1
+ i
, x
, ra
);
1904 uint32_t HELPER(mvcp
)(CPUS390XState
*env
, uint64_t l
, uint64_t a1
, uint64_t a2
)
1906 uintptr_t ra
= GETPC();
1909 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
1910 __func__
, l
, a1
, a2
);
1918 /* XXX replace w/ memcpy */
1919 for (i
= 0; i
< l
; i
++) {
1920 uint8_t x
= cpu_ldub_secondary_ra(env
, a2
+ i
, ra
);
1921 cpu_stb_primary_ra(env
, a1
+ i
, x
, ra
);
1927 void HELPER(idte
)(CPUS390XState
*env
, uint64_t r1
, uint64_t r2
, uint32_t m4
)
1929 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1930 const uintptr_t ra
= GETPC();
1931 uint64_t table
, entry
, raddr
;
1932 uint16_t entries
, i
, index
= 0;
1935 s390_program_interrupt(env
, PGM_SPECIFICATION
, 4, ra
);
1938 if (!(r2
& 0x800)) {
1939 /* invalidation-and-clearing operation */
1940 table
= r1
& ASCE_ORIGIN
;
1941 entries
= (r2
& 0x7ff) + 1;
1943 switch (r1
& ASCE_TYPE_MASK
) {
1944 case ASCE_TYPE_REGION1
:
1945 index
= (r2
>> 53) & 0x7ff;
1947 case ASCE_TYPE_REGION2
:
1948 index
= (r2
>> 42) & 0x7ff;
1950 case ASCE_TYPE_REGION3
:
1951 index
= (r2
>> 31) & 0x7ff;
1953 case ASCE_TYPE_SEGMENT
:
1954 index
= (r2
>> 20) & 0x7ff;
1957 for (i
= 0; i
< entries
; i
++) {
1958 /* addresses are not wrapped in 24/31bit mode but table index is */
1959 raddr
= table
+ ((index
+ i
) & 0x7ff) * sizeof(entry
);
1960 entry
= cpu_ldq_real_ra(env
, raddr
, ra
);
1961 if (!(entry
& REGION_ENTRY_INV
)) {
1962 /* we are allowed to not store if already invalid */
1963 entry
|= REGION_ENTRY_INV
;
1964 cpu_stq_real_ra(env
, raddr
, entry
, ra
);
1969 /* We simply flush the complete tlb, therefore we can ignore r3. */
1973 tlb_flush_all_cpus_synced(cs
);
1977 /* invalidate pte */
1978 void HELPER(ipte
)(CPUS390XState
*env
, uint64_t pto
, uint64_t vaddr
,
1981 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1982 const uintptr_t ra
= GETPC();
1983 uint64_t page
= vaddr
& TARGET_PAGE_MASK
;
1984 uint64_t pte_addr
, pte
;
1986 /* Compute the page table entry address */
1987 pte_addr
= (pto
& SEGMENT_ENTRY_ORIGIN
);
1988 pte_addr
+= (vaddr
& VADDR_PX
) >> 9;
1990 /* Mark the page table entry as invalid */
1991 pte
= cpu_ldq_real_ra(env
, pte_addr
, ra
);
1992 pte
|= PAGE_INVALID
;
1993 cpu_stq_real_ra(env
, pte_addr
, pte
, ra
);
1995 /* XXX we exploit the fact that Linux passes the exact virtual
1996 address here - it's not obliged to! */
1998 if (vaddr
& ~VADDR_PX
) {
1999 tlb_flush_page(cs
, page
);
2000 /* XXX 31-bit hack */
2001 tlb_flush_page(cs
, page
^ 0x80000000);
2003 /* looks like we don't have a valid virtual address */
2007 if (vaddr
& ~VADDR_PX
) {
2008 tlb_flush_page_all_cpus_synced(cs
, page
);
2009 /* XXX 31-bit hack */
2010 tlb_flush_page_all_cpus_synced(cs
, page
^ 0x80000000);
2012 /* looks like we don't have a valid virtual address */
2013 tlb_flush_all_cpus_synced(cs
);
2018 /* flush local tlb */
2019 void HELPER(ptlb
)(CPUS390XState
*env
)
2021 S390CPU
*cpu
= s390_env_get_cpu(env
);
2023 tlb_flush(CPU(cpu
));
2026 /* flush global tlb */
2027 void HELPER(purge
)(CPUS390XState
*env
)
2029 S390CPU
*cpu
= s390_env_get_cpu(env
);
2031 tlb_flush_all_cpus_synced(CPU(cpu
));
2034 /* load using real address */
2035 uint64_t HELPER(lura
)(CPUS390XState
*env
, uint64_t addr
)
2037 return cpu_ldl_real_ra(env
, wrap_address(env
, addr
), GETPC());
2040 uint64_t HELPER(lurag
)(CPUS390XState
*env
, uint64_t addr
)
2042 return cpu_ldq_real_ra(env
, wrap_address(env
, addr
), GETPC());
2045 /* store using real address */
2046 void HELPER(stura
)(CPUS390XState
*env
, uint64_t addr
, uint64_t v1
)
2048 cpu_stl_real_ra(env
, wrap_address(env
, addr
), (uint32_t)v1
, GETPC());
2050 if ((env
->psw
.mask
& PSW_MASK_PER
) &&
2051 (env
->cregs
[9] & PER_CR9_EVENT_STORE
) &&
2052 (env
->cregs
[9] & PER_CR9_EVENT_STORE_REAL
)) {
2053 /* PSW is saved just before calling the helper. */
2054 env
->per_address
= env
->psw
.addr
;
2055 env
->per_perc_atmid
= PER_CODE_EVENT_STORE_REAL
| get_per_atmid(env
);
2059 void HELPER(sturg
)(CPUS390XState
*env
, uint64_t addr
, uint64_t v1
)
2061 cpu_stq_real_ra(env
, wrap_address(env
, addr
), v1
, GETPC());
2063 if ((env
->psw
.mask
& PSW_MASK_PER
) &&
2064 (env
->cregs
[9] & PER_CR9_EVENT_STORE
) &&
2065 (env
->cregs
[9] & PER_CR9_EVENT_STORE_REAL
)) {
2066 /* PSW is saved just before calling the helper. */
2067 env
->per_address
= env
->psw
.addr
;
2068 env
->per_perc_atmid
= PER_CODE_EVENT_STORE_REAL
| get_per_atmid(env
);
2072 /* load real address */
2073 uint64_t HELPER(lra
)(CPUS390XState
*env
, uint64_t addr
)
2075 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
2077 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
2081 /* XXX incomplete - has more corner cases */
2082 if (!(env
->psw
.mask
& PSW_MASK_64
) && (addr
>> 32)) {
2083 s390_program_interrupt(env
, PGM_SPECIAL_OP
, 2, GETPC());
2086 old_exc
= cs
->exception_index
;
2087 if (mmu_translate(env
, addr
, 0, asc
, &ret
, &flags
, true)) {
2090 if (cs
->exception_index
== EXCP_PGM
) {
2091 ret
= env
->int_pgm_code
| 0x80000000;
2093 ret
|= addr
& ~TARGET_PAGE_MASK
;
2095 cs
->exception_index
= old_exc
;
2102 /* load pair from quadword */
2103 static uint64_t do_lpq(CPUS390XState
*env
, uint64_t addr
, bool parallel
)
2105 uintptr_t ra
= GETPC();
2109 #ifndef CONFIG_ATOMIC128
2110 cpu_loop_exit_atomic(ENV_GET_CPU(env
), ra
);
2112 int mem_idx
= cpu_mmu_index(env
, false);
2113 TCGMemOpIdx oi
= make_memop_idx(MO_TEQ
| MO_ALIGN_16
, mem_idx
);
2114 Int128 v
= helper_atomic_ldo_be_mmu(env
, addr
, oi
, ra
);
2115 hi
= int128_gethi(v
);
2116 lo
= int128_getlo(v
);
2119 check_alignment(env
, addr
, 16, ra
);
2121 hi
= cpu_ldq_data_ra(env
, addr
+ 0, ra
);
2122 lo
= cpu_ldq_data_ra(env
, addr
+ 8, ra
);
2129 uint64_t HELPER(lpq
)(CPUS390XState
*env
, uint64_t addr
)
2131 return do_lpq(env
, addr
, false);
2134 uint64_t HELPER(lpq_parallel
)(CPUS390XState
*env
, uint64_t addr
)
2136 return do_lpq(env
, addr
, true);
2139 /* store pair to quadword */
2140 static void do_stpq(CPUS390XState
*env
, uint64_t addr
,
2141 uint64_t low
, uint64_t high
, bool parallel
)
2143 uintptr_t ra
= GETPC();
2146 #ifndef CONFIG_ATOMIC128
2147 cpu_loop_exit_atomic(ENV_GET_CPU(env
), ra
);
2149 int mem_idx
= cpu_mmu_index(env
, false);
2150 TCGMemOpIdx oi
= make_memop_idx(MO_TEQ
| MO_ALIGN_16
, mem_idx
);
2152 Int128 v
= int128_make128(low
, high
);
2153 helper_atomic_sto_be_mmu(env
, addr
, v
, oi
, ra
);
2156 check_alignment(env
, addr
, 16, ra
);
2158 cpu_stq_data_ra(env
, addr
+ 0, high
, ra
);
2159 cpu_stq_data_ra(env
, addr
+ 8, low
, ra
);
2163 void HELPER(stpq
)(CPUS390XState
*env
, uint64_t addr
,
2164 uint64_t low
, uint64_t high
)
2166 do_stpq(env
, addr
, low
, high
, false);
2169 void HELPER(stpq_parallel
)(CPUS390XState
*env
, uint64_t addr
,
2170 uint64_t low
, uint64_t high
)
2172 do_stpq(env
, addr
, low
, high
, true);
2175 /* Execute instruction. This instruction executes an insn modified with
2176 the contents of r1. It does not change the executed instruction in memory;
2177 it does not change the program counter.
2179 Perform this by recording the modified instruction in env->ex_value.
2180 This will be noticed by cpu_get_tb_cpu_state and thus tb translation.
2182 void HELPER(ex
)(CPUS390XState
*env
, uint32_t ilen
, uint64_t r1
, uint64_t addr
)
2184 uint64_t insn
= cpu_lduw_code(env
, addr
);
2185 uint8_t opc
= insn
>> 8;
2187 /* Or in the contents of R1[56:63]. */
2190 /* Load the rest of the instruction. */
2192 switch (get_ilen(opc
)) {
2196 insn
|= (uint64_t)cpu_lduw_code(env
, addr
+ 2) << 32;
2199 insn
|= (uint64_t)(uint32_t)cpu_ldl_code(env
, addr
+ 2) << 16;
2202 g_assert_not_reached();
2205 /* The very most common cases can be sped up by avoiding a new TB. */
2206 if ((opc
& 0xf0) == 0xd0) {
2207 typedef uint32_t (*dx_helper
)(CPUS390XState
*, uint32_t, uint64_t,
2208 uint64_t, uintptr_t);
2209 static const dx_helper dx
[16] = {
2210 [0x0] = do_helper_trt_bkwd
,
2211 [0x2] = do_helper_mvc
,
2212 [0x4] = do_helper_nc
,
2213 [0x5] = do_helper_clc
,
2214 [0x6] = do_helper_oc
,
2215 [0x7] = do_helper_xc
,
2216 [0xc] = do_helper_tr
,
2217 [0xd] = do_helper_trt_fwd
,
2219 dx_helper helper
= dx
[opc
& 0xf];
2222 uint32_t l
= extract64(insn
, 48, 8);
2223 uint32_t b1
= extract64(insn
, 44, 4);
2224 uint32_t d1
= extract64(insn
, 32, 12);
2225 uint32_t b2
= extract64(insn
, 28, 4);
2226 uint32_t d2
= extract64(insn
, 16, 12);
2227 uint64_t a1
= wrap_address(env
, env
->regs
[b1
] + d1
);
2228 uint64_t a2
= wrap_address(env
, env
->regs
[b2
] + d2
);
2230 env
->cc_op
= helper(env
, l
, a1
, a2
, 0);
2231 env
->psw
.addr
+= ilen
;
2234 } else if (opc
== 0x0a) {
2235 env
->int_svc_code
= extract64(insn
, 48, 8);
2236 env
->int_svc_ilen
= ilen
;
2237 helper_exception(env
, EXCP_SVC
);
2238 g_assert_not_reached();
2241 /* Record the insn we want to execute as well as the ilen to use
2242 during the execution of the target insn. This will also ensure
2243 that ex_value is non-zero, which flags that we are in a state
2244 that requires such execution. */
2245 env
->ex_value
= insn
| ilen
;
2248 uint32_t HELPER(mvcos
)(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
2251 const uint8_t psw_key
= (env
->psw
.mask
& PSW_MASK_KEY
) >> PSW_SHIFT_KEY
;
2252 const uint8_t psw_as
= (env
->psw
.mask
& PSW_MASK_ASC
) >> PSW_SHIFT_ASC
;
2253 const uint64_t r0
= env
->regs
[0];
2254 const uintptr_t ra
= GETPC();
2255 uint8_t dest_key
, dest_as
, dest_k
, dest_a
;
2256 uint8_t src_key
, src_as
, src_k
, src_a
;
2260 HELPER_LOG("%s dest %" PRIx64
", src %" PRIx64
", len %" PRIx64
"\n",
2261 __func__
, dest
, src
, len
);
2263 if (!(env
->psw
.mask
& PSW_MASK_DAT
)) {
2264 s390_program_interrupt(env
, PGM_SPECIAL_OP
, 6, ra
);
2267 /* OAC (operand access control) for the first operand -> dest */
2268 val
= (r0
& 0xffff0000ULL
) >> 16;
2269 dest_key
= (val
>> 12) & 0xf;
2270 dest_as
= (val
>> 6) & 0x3;
2271 dest_k
= (val
>> 1) & 0x1;
2274 /* OAC (operand access control) for the second operand -> src */
2275 val
= (r0
& 0x0000ffffULL
);
2276 src_key
= (val
>> 12) & 0xf;
2277 src_as
= (val
>> 6) & 0x3;
2278 src_k
= (val
>> 1) & 0x1;
2294 if (dest_a
&& dest_as
== AS_HOME
&& (env
->psw
.mask
& PSW_MASK_PSTATE
)) {
2295 s390_program_interrupt(env
, PGM_SPECIAL_OP
, 6, ra
);
2297 if (!(env
->cregs
[0] & CR0_SECONDARY
) &&
2298 (dest_as
== AS_SECONDARY
|| src_as
== AS_SECONDARY
)) {
2299 s390_program_interrupt(env
, PGM_SPECIAL_OP
, 6, ra
);
2301 if (!psw_key_valid(env
, dest_key
) || !psw_key_valid(env
, src_key
)) {
2302 s390_program_interrupt(env
, PGM_PRIVILEGED
, 6, ra
);
2305 len
= wrap_length(env
, len
);
2311 /* FIXME: AR-mode and proper problem state mode (using PSW keys) missing */
2312 if (src_as
== AS_ACCREG
|| dest_as
== AS_ACCREG
||
2313 (env
->psw
.mask
& PSW_MASK_PSTATE
)) {
2314 qemu_log_mask(LOG_UNIMP
, "%s: AR-mode and PSTATE support missing\n",
2316 s390_program_interrupt(env
, PGM_ADDRESSING
, 6, ra
);
2320 * b) Access using correct keys
2323 #ifdef CONFIG_USER_ONLY
2324 /* psw keys are never valid in user mode, we will never reach this */
2325 g_assert_not_reached();
2327 fast_memmove_as(env
, dest
, src
, len
, dest_as
, src_as
, ra
);
2333 /* Decode a Unicode character. A return value < 0 indicates success, storing
2334 the UTF-32 result into OCHAR and the input length into OLEN. A return
2335 value >= 0 indicates failure, and the CC value to be returned. */
2336 typedef int (*decode_unicode_fn
)(CPUS390XState
*env
, uint64_t addr
,
2337 uint64_t ilen
, bool enh_check
, uintptr_t ra
,
2338 uint32_t *ochar
, uint32_t *olen
);
2340 /* Encode a Unicode character. A return value < 0 indicates success, storing
2341 the bytes into ADDR and the output length into OLEN. A return value >= 0
2342 indicates failure, and the CC value to be returned. */
2343 typedef int (*encode_unicode_fn
)(CPUS390XState
*env
, uint64_t addr
,
2344 uint64_t ilen
, uintptr_t ra
, uint32_t c
,
2347 static int decode_utf8(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2348 bool enh_check
, uintptr_t ra
,
2349 uint32_t *ochar
, uint32_t *olen
)
2351 uint8_t s0
, s1
, s2
, s3
;
2357 s0
= cpu_ldub_data_ra(env
, addr
, ra
);
2359 /* one byte character */
2362 } else if (s0
<= (enh_check
? 0xc1 : 0xbf)) {
2363 /* invalid character */
2365 } else if (s0
<= 0xdf) {
2366 /* two byte character */
2371 s1
= cpu_ldub_data_ra(env
, addr
+ 1, ra
);
2373 c
= (c
<< 6) | (s1
& 0x3f);
2374 if (enh_check
&& (s1
& 0xc0) != 0x80) {
2377 } else if (s0
<= 0xef) {
2378 /* three byte character */
2383 s1
= cpu_ldub_data_ra(env
, addr
+ 1, ra
);
2384 s2
= cpu_ldub_data_ra(env
, addr
+ 2, ra
);
2386 c
= (c
<< 6) | (s1
& 0x3f);
2387 c
= (c
<< 6) | (s2
& 0x3f);
2388 /* Fold the byte-by-byte range descriptions in the PoO into
2389 tests against the complete value. It disallows encodings
2390 that could be smaller, and the UTF-16 surrogates. */
2392 && ((s1
& 0xc0) != 0x80
2393 || (s2
& 0xc0) != 0x80
2395 || (c
>= 0xd800 && c
<= 0xdfff))) {
2398 } else if (s0
<= (enh_check
? 0xf4 : 0xf7)) {
2399 /* four byte character */
2404 s1
= cpu_ldub_data_ra(env
, addr
+ 1, ra
);
2405 s2
= cpu_ldub_data_ra(env
, addr
+ 2, ra
);
2406 s3
= cpu_ldub_data_ra(env
, addr
+ 3, ra
);
2408 c
= (c
<< 6) | (s1
& 0x3f);
2409 c
= (c
<< 6) | (s2
& 0x3f);
2410 c
= (c
<< 6) | (s3
& 0x3f);
2413 && ((s1
& 0xc0) != 0x80
2414 || (s2
& 0xc0) != 0x80
2415 || (s3
& 0xc0) != 0x80
2421 /* invalid character */
2430 static int decode_utf16(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2431 bool enh_check
, uintptr_t ra
,
2432 uint32_t *ochar
, uint32_t *olen
)
2440 s0
= cpu_lduw_data_ra(env
, addr
, ra
);
2441 if ((s0
& 0xfc00) != 0xd800) {
2442 /* one word character */
2446 /* two word character */
2451 s1
= cpu_lduw_data_ra(env
, addr
+ 2, ra
);
2452 c
= extract32(s0
, 6, 4) + 1;
2453 c
= (c
<< 6) | (s0
& 0x3f);
2454 c
= (c
<< 10) | (s1
& 0x3ff);
2455 if (enh_check
&& (s1
& 0xfc00) != 0xdc00) {
2456 /* invalid surrogate character */
2466 static int decode_utf32(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2467 bool enh_check
, uintptr_t ra
,
2468 uint32_t *ochar
, uint32_t *olen
)
2475 c
= cpu_ldl_data_ra(env
, addr
, ra
);
2476 if ((c
>= 0xd800 && c
<= 0xdbff) || c
> 0x10ffff) {
2477 /* invalid unicode character */
2486 static int encode_utf8(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2487 uintptr_t ra
, uint32_t c
, uint32_t *olen
)
2493 /* one byte character */
2496 } else if (c
<= 0x7ff) {
2497 /* two byte character */
2499 d
[1] = 0x80 | extract32(c
, 0, 6);
2500 d
[0] = 0xc0 | extract32(c
, 6, 5);
2501 } else if (c
<= 0xffff) {
2502 /* three byte character */
2504 d
[2] = 0x80 | extract32(c
, 0, 6);
2505 d
[1] = 0x80 | extract32(c
, 6, 6);
2506 d
[0] = 0xe0 | extract32(c
, 12, 4);
2508 /* four byte character */
2510 d
[3] = 0x80 | extract32(c
, 0, 6);
2511 d
[2] = 0x80 | extract32(c
, 6, 6);
2512 d
[1] = 0x80 | extract32(c
, 12, 6);
2513 d
[0] = 0xf0 | extract32(c
, 18, 3);
2519 for (i
= 0; i
< l
; ++i
) {
2520 cpu_stb_data_ra(env
, addr
+ i
, d
[i
], ra
);
2527 static int encode_utf16(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2528 uintptr_t ra
, uint32_t c
, uint32_t *olen
)
2533 /* one word character */
2537 cpu_stw_data_ra(env
, addr
, c
, ra
);
2540 /* two word character */
2544 d1
= 0xdc00 | extract32(c
, 0, 10);
2545 d0
= 0xd800 | extract32(c
, 10, 6);
2546 d0
= deposit32(d0
, 6, 4, extract32(c
, 16, 5) - 1);
2547 cpu_stw_data_ra(env
, addr
+ 0, d0
, ra
);
2548 cpu_stw_data_ra(env
, addr
+ 2, d1
, ra
);
2555 static int encode_utf32(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2556 uintptr_t ra
, uint32_t c
, uint32_t *olen
)
2561 cpu_stl_data_ra(env
, addr
, c
, ra
);
2566 static inline uint32_t convert_unicode(CPUS390XState
*env
, uint32_t r1
,
2567 uint32_t r2
, uint32_t m3
, uintptr_t ra
,
2568 decode_unicode_fn decode
,
2569 encode_unicode_fn encode
)
2571 uint64_t dst
= get_address(env
, r1
);
2572 uint64_t dlen
= get_length(env
, r1
+ 1);
2573 uint64_t src
= get_address(env
, r2
);
2574 uint64_t slen
= get_length(env
, r2
+ 1);
2575 bool enh_check
= m3
& 1;
2578 /* Lest we fail to service interrupts in a timely manner, limit the
2579 amount of work we're willing to do. For now, let's cap at 256. */
2580 for (i
= 0; i
< 256; ++i
) {
2581 uint32_t c
, ilen
, olen
;
2583 cc
= decode(env
, src
, slen
, enh_check
, ra
, &c
, &ilen
);
2584 if (unlikely(cc
>= 0)) {
2587 cc
= encode(env
, dst
, dlen
, ra
, c
, &olen
);
2588 if (unlikely(cc
>= 0)) {
2599 set_address(env
, r1
, dst
);
2600 set_length(env
, r1
+ 1, dlen
);
2601 set_address(env
, r2
, src
);
2602 set_length(env
, r2
+ 1, slen
);
2607 uint32_t HELPER(cu12
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2609 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2610 decode_utf8
, encode_utf16
);
2613 uint32_t HELPER(cu14
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2615 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2616 decode_utf8
, encode_utf32
);
2619 uint32_t HELPER(cu21
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2621 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2622 decode_utf16
, encode_utf8
);
2625 uint32_t HELPER(cu24
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2627 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2628 decode_utf16
, encode_utf32
);
2631 uint32_t HELPER(cu41
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2633 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2634 decode_utf32
, encode_utf8
);
2637 uint32_t HELPER(cu42
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2639 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2640 decode_utf32
, encode_utf16
);