2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "exec/address-spaces.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "qemu/int128.h"
30 #if !defined(CONFIG_USER_ONLY)
31 #include "hw/s390x/storage-keys.h"
34 /*****************************************************************************/
36 #if !defined(CONFIG_USER_ONLY)
38 /* try to fill the TLB and return an exception if error. If retaddr is
39 NULL, it means that the function was called in C code (i.e. not
40 from generated code or from helper.c) */
41 /* XXX: fix it to restore all registers */
42 void tlb_fill(CPUState
*cs
, target_ulong addr
, MMUAccessType access_type
,
43 int mmu_idx
, uintptr_t retaddr
)
45 int ret
= s390_cpu_handle_mmu_fault(cs
, addr
, access_type
, mmu_idx
);
46 if (unlikely(ret
!= 0)) {
47 cpu_loop_exit_restore(cs
, retaddr
);
53 /* #define DEBUG_HELPER */
55 #define HELPER_LOG(x...) qemu_log(x)
57 #define HELPER_LOG(x...)
60 static inline bool psw_key_valid(CPUS390XState
*env
, uint8_t psw_key
)
62 uint16_t pkm
= env
->cregs
[3] >> 16;
64 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
65 /* PSW key has range 0..15, it is valid if the bit is 1 in the PKM */
66 return pkm
& (0x80 >> psw_key
);
71 /* Reduce the length so that addr + len doesn't cross a page boundary. */
72 static inline uint32_t adj_len_to_page(uint32_t len
, uint64_t addr
)
74 #ifndef CONFIG_USER_ONLY
75 if ((addr
& ~TARGET_PAGE_MASK
) + len
- 1 >= TARGET_PAGE_SIZE
) {
76 return -(addr
| TARGET_PAGE_MASK
);
82 /* Trigger a SPECIFICATION exception if an address or a length is not
84 static inline void check_alignment(CPUS390XState
*env
, uint64_t v
,
85 int wordsize
, uintptr_t ra
)
88 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
89 cpu_restore_state(cs
, ra
);
90 program_interrupt(env
, PGM_SPECIFICATION
, 6);
94 /* Load a value from memory according to its size. */
95 static inline uint64_t cpu_ldusize_data_ra(CPUS390XState
*env
, uint64_t addr
,
96 int wordsize
, uintptr_t ra
)
100 return cpu_ldub_data_ra(env
, addr
, ra
);
102 return cpu_lduw_data_ra(env
, addr
, ra
);
108 /* Store a to memory according to its size. */
109 static inline void cpu_stsize_data_ra(CPUS390XState
*env
, uint64_t addr
,
110 uint64_t value
, int wordsize
,
115 cpu_stb_data_ra(env
, addr
, value
, ra
);
118 cpu_stw_data_ra(env
, addr
, value
, ra
);
125 static void fast_memset(CPUS390XState
*env
, uint64_t dest
, uint8_t byte
,
126 uint32_t l
, uintptr_t ra
)
128 int mmu_idx
= cpu_mmu_index(env
, false);
131 void *p
= tlb_vaddr_to_host(env
, dest
, MMU_DATA_STORE
, mmu_idx
);
133 /* Access to the whole page in write mode granted. */
134 uint32_t l_adj
= adj_len_to_page(l
, dest
);
135 memset(p
, byte
, l_adj
);
139 /* We failed to get access to the whole page. The next write
140 access will likely fill the QEMU TLB for the next iteration. */
141 cpu_stb_data_ra(env
, dest
, byte
, ra
);
148 #ifndef CONFIG_USER_ONLY
149 static void fast_memmove_idx(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
150 uint32_t len
, int dest_idx
, int src_idx
,
153 TCGMemOpIdx oi_dest
= make_memop_idx(MO_UB
, dest_idx
);
154 TCGMemOpIdx oi_src
= make_memop_idx(MO_UB
, src_idx
);
161 src
= wrap_address(env
, src
);
162 dest
= wrap_address(env
, dest
);
163 src_p
= tlb_vaddr_to_host(env
, src
, MMU_DATA_LOAD
, src_idx
);
164 dest_p
= tlb_vaddr_to_host(env
, dest
, MMU_DATA_STORE
, dest_idx
);
166 if (src_p
&& dest_p
) {
167 /* Access to both whole pages granted. */
168 len_adj
= adj_len_to_page(adj_len_to_page(len
, src
), dest
);
169 memmove(dest_p
, src_p
, len_adj
);
171 /* We failed to get access to one or both whole pages. The next
172 read or write access will likely fill the QEMU TLB for the
175 x
= helper_ret_ldub_mmu(env
, src
, oi_src
, ra
);
176 helper_ret_stb_mmu(env
, dest
, x
, oi_dest
, ra
);
184 static int mmu_idx_from_as(uint8_t as
)
188 return MMU_PRIMARY_IDX
;
190 return MMU_SECONDARY_IDX
;
194 /* FIXME AS_ACCREG */
195 g_assert_not_reached();
199 static void fast_memmove_as(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
200 uint32_t len
, uint8_t dest_as
, uint8_t src_as
,
203 int src_idx
= mmu_idx_from_as(src_as
);
204 int dest_idx
= mmu_idx_from_as(dest_as
);
206 fast_memmove_idx(env
, dest
, src
, len
, dest_idx
, src_idx
, ra
);
210 static void fast_memmove(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
211 uint32_t l
, uintptr_t ra
)
213 int mmu_idx
= cpu_mmu_index(env
, false);
216 void *src_p
= tlb_vaddr_to_host(env
, src
, MMU_DATA_LOAD
, mmu_idx
);
217 void *dest_p
= tlb_vaddr_to_host(env
, dest
, MMU_DATA_STORE
, mmu_idx
);
218 if (src_p
&& dest_p
) {
219 /* Access to both whole pages granted. */
220 uint32_t l_adj
= adj_len_to_page(l
, src
);
221 l_adj
= adj_len_to_page(l_adj
, dest
);
222 memmove(dest_p
, src_p
, l_adj
);
227 /* We failed to get access to one or both whole pages. The next
228 read or write access will likely fill the QEMU TLB for the
230 cpu_stb_data_ra(env
, dest
, cpu_ldub_data_ra(env
, src
, ra
), ra
);
239 static uint32_t do_helper_nc(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
240 uint64_t src
, uintptr_t ra
)
245 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
246 __func__
, l
, dest
, src
);
248 for (i
= 0; i
<= l
; i
++) {
249 uint8_t x
= cpu_ldub_data_ra(env
, src
+ i
, ra
);
250 x
&= cpu_ldub_data_ra(env
, dest
+ i
, ra
);
252 cpu_stb_data_ra(env
, dest
+ i
, x
, ra
);
257 uint32_t HELPER(nc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
260 return do_helper_nc(env
, l
, dest
, src
, GETPC());
264 static uint32_t do_helper_xc(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
265 uint64_t src
, uintptr_t ra
)
270 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
271 __func__
, l
, dest
, src
);
273 /* xor with itself is the same as memset(0) */
275 fast_memset(env
, dest
, 0, l
+ 1, ra
);
279 for (i
= 0; i
<= l
; i
++) {
280 uint8_t x
= cpu_ldub_data_ra(env
, src
+ i
, ra
);
281 x
^= cpu_ldub_data_ra(env
, dest
+ i
, ra
);
283 cpu_stb_data_ra(env
, dest
+ i
, x
, ra
);
288 uint32_t HELPER(xc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
291 return do_helper_xc(env
, l
, dest
, src
, GETPC());
295 static uint32_t do_helper_oc(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
296 uint64_t src
, uintptr_t ra
)
301 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
302 __func__
, l
, dest
, src
);
304 for (i
= 0; i
<= l
; i
++) {
305 uint8_t x
= cpu_ldub_data_ra(env
, src
+ i
, ra
);
306 x
|= cpu_ldub_data_ra(env
, dest
+ i
, ra
);
308 cpu_stb_data_ra(env
, dest
+ i
, x
, ra
);
313 uint32_t HELPER(oc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
316 return do_helper_oc(env
, l
, dest
, src
, GETPC());
320 static uint32_t do_helper_mvc(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
321 uint64_t src
, uintptr_t ra
)
325 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
326 __func__
, l
, dest
, src
);
328 /* mvc and memmove do not behave the same when areas overlap! */
329 /* mvc with source pointing to the byte after the destination is the
330 same as memset with the first source byte */
331 if (dest
== src
+ 1) {
332 fast_memset(env
, dest
, cpu_ldub_data_ra(env
, src
, ra
), l
+ 1, ra
);
333 } else if (dest
< src
|| src
+ l
< dest
) {
334 fast_memmove(env
, dest
, src
, l
+ 1, ra
);
336 /* slow version with byte accesses which always work */
337 for (i
= 0; i
<= l
; i
++) {
338 uint8_t x
= cpu_ldub_data_ra(env
, src
+ i
, ra
);
339 cpu_stb_data_ra(env
, dest
+ i
, x
, ra
);
346 void HELPER(mvc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
348 do_helper_mvc(env
, l
, dest
, src
, GETPC());
352 void HELPER(mvcin
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
354 uintptr_t ra
= GETPC();
357 for (i
= 0; i
<= l
; i
++) {
358 uint8_t v
= cpu_ldub_data_ra(env
, src
- i
, ra
);
359 cpu_stb_data_ra(env
, dest
+ i
, v
, ra
);
364 void HELPER(mvn
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
366 uintptr_t ra
= GETPC();
369 for (i
= 0; i
<= l
; i
++) {
370 uint8_t v
= cpu_ldub_data_ra(env
, dest
+ i
, ra
) & 0xf0;
371 v
|= cpu_ldub_data_ra(env
, src
+ i
, ra
) & 0x0f;
372 cpu_stb_data_ra(env
, dest
+ i
, v
, ra
);
376 /* move with offset */
377 void HELPER(mvo
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
379 uintptr_t ra
= GETPC();
380 int len_dest
= l
>> 4;
381 int len_src
= l
& 0xf;
382 uint8_t byte_dest
, byte_src
;
388 /* Handle rightmost byte */
389 byte_src
= cpu_ldub_data_ra(env
, src
, ra
);
390 byte_dest
= cpu_ldub_data_ra(env
, dest
, ra
);
391 byte_dest
= (byte_dest
& 0x0f) | (byte_src
<< 4);
392 cpu_stb_data_ra(env
, dest
, byte_dest
, ra
);
394 /* Process remaining bytes from right to left */
395 for (i
= 1; i
<= len_dest
; i
++) {
396 byte_dest
= byte_src
>> 4;
397 if (len_src
- i
>= 0) {
398 byte_src
= cpu_ldub_data_ra(env
, src
- i
, ra
);
402 byte_dest
|= byte_src
<< 4;
403 cpu_stb_data_ra(env
, dest
- i
, byte_dest
, ra
);
408 void HELPER(mvz
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
410 uintptr_t ra
= GETPC();
413 for (i
= 0; i
<= l
; i
++) {
414 uint8_t b
= cpu_ldub_data_ra(env
, dest
+ i
, ra
) & 0x0f;
415 b
|= cpu_ldub_data_ra(env
, src
+ i
, ra
) & 0xf0;
416 cpu_stb_data_ra(env
, dest
+ i
, b
, ra
);
420 /* compare unsigned byte arrays */
421 static uint32_t do_helper_clc(CPUS390XState
*env
, uint32_t l
, uint64_t s1
,
422 uint64_t s2
, uintptr_t ra
)
427 HELPER_LOG("%s l %d s1 %" PRIx64
" s2 %" PRIx64
"\n",
428 __func__
, l
, s1
, s2
);
430 for (i
= 0; i
<= l
; i
++) {
431 uint8_t x
= cpu_ldub_data_ra(env
, s1
+ i
, ra
);
432 uint8_t y
= cpu_ldub_data_ra(env
, s2
+ i
, ra
);
433 HELPER_LOG("%02x (%c)/%02x (%c) ", x
, x
, y
, y
);
447 uint32_t HELPER(clc
)(CPUS390XState
*env
, uint32_t l
, uint64_t s1
, uint64_t s2
)
449 return do_helper_clc(env
, l
, s1
, s2
, GETPC());
452 /* compare logical under mask */
453 uint32_t HELPER(clm
)(CPUS390XState
*env
, uint32_t r1
, uint32_t mask
,
456 uintptr_t ra
= GETPC();
459 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64
"\n", __func__
, r1
,
464 uint8_t d
= cpu_ldub_data_ra(env
, addr
, ra
);
465 uint8_t r
= extract32(r1
, 24, 8);
466 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64
") ", mask
, r
, d
,
477 mask
= (mask
<< 1) & 0xf;
485 static inline uint64_t get_address(CPUS390XState
*env
, int reg
)
487 return wrap_address(env
, env
->regs
[reg
]);
490 static inline void set_address(CPUS390XState
*env
, int reg
, uint64_t address
)
492 if (env
->psw
.mask
& PSW_MASK_64
) {
494 env
->regs
[reg
] = address
;
496 if (!(env
->psw
.mask
& PSW_MASK_32
)) {
497 /* 24-Bit mode. According to the PoO it is implementation
498 dependent if bits 32-39 remain unchanged or are set to
499 zeros. Choose the former so that the function can also be
501 env
->regs
[reg
] = deposit64(env
->regs
[reg
], 0, 24, address
);
503 /* 31-Bit mode. According to the PoO it is implementation
504 dependent if bit 32 remains unchanged or is set to zero.
505 Choose the latter so that the function can also be used for
507 address
&= 0x7fffffff;
508 env
->regs
[reg
] = deposit64(env
->regs
[reg
], 0, 32, address
);
513 static inline uint64_t wrap_length(CPUS390XState
*env
, uint64_t length
)
515 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
516 /* 24-Bit and 31-Bit mode */
517 length
&= 0x7fffffff;
522 static inline uint64_t get_length(CPUS390XState
*env
, int reg
)
524 return wrap_length(env
, env
->regs
[reg
]);
527 static inline void set_length(CPUS390XState
*env
, int reg
, uint64_t length
)
529 if (env
->psw
.mask
& PSW_MASK_64
) {
531 env
->regs
[reg
] = length
;
533 /* 24-Bit and 31-Bit mode */
534 env
->regs
[reg
] = deposit64(env
->regs
[reg
], 0, 32, length
);
538 /* search string (c is byte to search, r2 is string, r1 end of string) */
539 void HELPER(srst
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
541 uintptr_t ra
= GETPC();
544 uint8_t v
, c
= env
->regs
[0];
546 /* Bits 32-55 must contain all 0. */
547 if (env
->regs
[0] & 0xffffff00u
) {
548 cpu_restore_state(ENV_GET_CPU(env
), ra
);
549 program_interrupt(env
, PGM_SPECIFICATION
, 6);
552 str
= get_address(env
, r2
);
553 end
= get_address(env
, r1
);
555 /* Lest we fail to service interrupts in a timely manner, limit the
556 amount of work we're willing to do. For now, let's cap at 8k. */
557 for (len
= 0; len
< 0x2000; ++len
) {
558 if (str
+ len
== end
) {
559 /* Character not found. R1 & R2 are unmodified. */
563 v
= cpu_ldub_data_ra(env
, str
+ len
, ra
);
565 /* Character found. Set R1 to the location; R2 is unmodified. */
567 set_address(env
, r1
, str
+ len
);
572 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
574 set_address(env
, r2
, str
+ len
);
577 void HELPER(srstu
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
579 uintptr_t ra
= GETPC();
581 uint16_t v
, c
= env
->regs
[0];
582 uint64_t end
, str
, adj_end
;
584 /* Bits 32-47 of R0 must be zero. */
585 if (env
->regs
[0] & 0xffff0000u
) {
586 cpu_restore_state(ENV_GET_CPU(env
), ra
);
587 program_interrupt(env
, PGM_SPECIFICATION
, 6);
590 str
= get_address(env
, r2
);
591 end
= get_address(env
, r1
);
593 /* If the LSB of the two addresses differ, use one extra byte. */
594 adj_end
= end
+ ((str
^ end
) & 1);
596 /* Lest we fail to service interrupts in a timely manner, limit the
597 amount of work we're willing to do. For now, let's cap at 8k. */
598 for (len
= 0; len
< 0x2000; len
+= 2) {
599 if (str
+ len
== adj_end
) {
600 /* End of input found. */
604 v
= cpu_lduw_data_ra(env
, str
+ len
, ra
);
606 /* Character found. Set R1 to the location; R2 is unmodified. */
608 set_address(env
, r1
, str
+ len
);
613 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
615 set_address(env
, r2
, str
+ len
);
618 /* unsigned string compare (c is string terminator) */
619 uint64_t HELPER(clst
)(CPUS390XState
*env
, uint64_t c
, uint64_t s1
, uint64_t s2
)
621 uintptr_t ra
= GETPC();
625 s1
= wrap_address(env
, s1
);
626 s2
= wrap_address(env
, s2
);
628 /* Lest we fail to service interrupts in a timely manner, limit the
629 amount of work we're willing to do. For now, let's cap at 8k. */
630 for (len
= 0; len
< 0x2000; ++len
) {
631 uint8_t v1
= cpu_ldub_data_ra(env
, s1
+ len
, ra
);
632 uint8_t v2
= cpu_ldub_data_ra(env
, s2
+ len
, ra
);
635 /* Equal. CC=0, and don't advance the registers. */
641 /* Unequal. CC={1,2}, and advance the registers. Note that
642 the terminator need not be zero, but the string that contains
643 the terminator is by definition "low". */
644 env
->cc_op
= (v1
== c
? 1 : v2
== c
? 2 : v1
< v2
? 1 : 2);
645 env
->retxl
= s2
+ len
;
650 /* CPU-determined bytes equal; advance the registers. */
652 env
->retxl
= s2
+ len
;
657 uint32_t HELPER(mvpg
)(CPUS390XState
*env
, uint64_t r0
, uint64_t r1
, uint64_t r2
)
659 /* ??? missing r0 handling, which includes access keys, but more
660 importantly optional suppression of the exception! */
661 fast_memmove(env
, r1
, r2
, TARGET_PAGE_SIZE
, GETPC());
662 return 0; /* data moved */
665 /* string copy (c is string terminator) */
666 uint64_t HELPER(mvst
)(CPUS390XState
*env
, uint64_t c
, uint64_t d
, uint64_t s
)
668 uintptr_t ra
= GETPC();
672 d
= wrap_address(env
, d
);
673 s
= wrap_address(env
, s
);
675 /* Lest we fail to service interrupts in a timely manner, limit the
676 amount of work we're willing to do. For now, let's cap at 8k. */
677 for (len
= 0; len
< 0x2000; ++len
) {
678 uint8_t v
= cpu_ldub_data_ra(env
, s
+ len
, ra
);
679 cpu_stb_data_ra(env
, d
+ len
, v
, ra
);
681 /* Complete. Set CC=1 and advance R1. */
688 /* Incomplete. Set CC=3 and signal to advance R1 and R2. */
690 env
->retxl
= s
+ len
;
694 /* load access registers r1 to r3 from memory at a2 */
695 void HELPER(lam
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
697 uintptr_t ra
= GETPC();
700 for (i
= r1
;; i
= (i
+ 1) % 16) {
701 env
->aregs
[i
] = cpu_ldl_data_ra(env
, a2
, ra
);
710 /* store access registers r1 to r3 in memory at a2 */
711 void HELPER(stam
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
713 uintptr_t ra
= GETPC();
716 for (i
= r1
;; i
= (i
+ 1) % 16) {
717 cpu_stl_data_ra(env
, a2
, env
->aregs
[i
], ra
);
726 /* move long helper */
727 static inline uint32_t do_mvcl(CPUS390XState
*env
,
728 uint64_t *dest
, uint64_t *destlen
,
729 uint64_t *src
, uint64_t *srclen
,
730 uint16_t pad
, int wordsize
, uintptr_t ra
)
732 uint64_t len
= MIN(*srclen
, *destlen
);
735 if (*destlen
== *srclen
) {
737 } else if (*destlen
< *srclen
) {
743 /* Copy the src array */
744 fast_memmove(env
, *dest
, *src
, len
, ra
);
750 /* Pad the remaining area */
752 fast_memset(env
, *dest
, pad
, *destlen
, ra
);
756 /* If remaining length is odd, pad with odd byte first. */
758 cpu_stb_data_ra(env
, *dest
, pad
& 0xff, ra
);
762 /* The remaining length is even, pad using words. */
763 for (; *destlen
; *dest
+= 2, *destlen
-= 2) {
764 cpu_stw_data_ra(env
, *dest
, pad
, ra
);
772 uint32_t HELPER(mvcl
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
774 uintptr_t ra
= GETPC();
775 uint64_t destlen
= env
->regs
[r1
+ 1] & 0xffffff;
776 uint64_t dest
= get_address(env
, r1
);
777 uint64_t srclen
= env
->regs
[r2
+ 1] & 0xffffff;
778 uint64_t src
= get_address(env
, r2
);
779 uint8_t pad
= env
->regs
[r2
+ 1] >> 24;
782 cc
= do_mvcl(env
, &dest
, &destlen
, &src
, &srclen
, pad
, 1, ra
);
784 env
->regs
[r1
+ 1] = deposit64(env
->regs
[r1
+ 1], 0, 24, destlen
);
785 env
->regs
[r2
+ 1] = deposit64(env
->regs
[r2
+ 1], 0, 24, srclen
);
786 set_address(env
, r1
, dest
);
787 set_address(env
, r2
, src
);
792 /* move long extended */
793 uint32_t HELPER(mvcle
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
796 uintptr_t ra
= GETPC();
797 uint64_t destlen
= get_length(env
, r1
+ 1);
798 uint64_t dest
= get_address(env
, r1
);
799 uint64_t srclen
= get_length(env
, r3
+ 1);
800 uint64_t src
= get_address(env
, r3
);
804 cc
= do_mvcl(env
, &dest
, &destlen
, &src
, &srclen
, pad
, 1, ra
);
806 set_length(env
, r1
+ 1, destlen
);
807 set_length(env
, r3
+ 1, srclen
);
808 set_address(env
, r1
, dest
);
809 set_address(env
, r3
, src
);
814 /* move long unicode */
815 uint32_t HELPER(mvclu
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
818 uintptr_t ra
= GETPC();
819 uint64_t destlen
= get_length(env
, r1
+ 1);
820 uint64_t dest
= get_address(env
, r1
);
821 uint64_t srclen
= get_length(env
, r3
+ 1);
822 uint64_t src
= get_address(env
, r3
);
826 cc
= do_mvcl(env
, &dest
, &destlen
, &src
, &srclen
, pad
, 2, ra
);
828 set_length(env
, r1
+ 1, destlen
);
829 set_length(env
, r3
+ 1, srclen
);
830 set_address(env
, r1
, dest
);
831 set_address(env
, r3
, src
);
836 /* compare logical long helper */
837 static inline uint32_t do_clcl(CPUS390XState
*env
,
838 uint64_t *src1
, uint64_t *src1len
,
839 uint64_t *src3
, uint64_t *src3len
,
840 uint16_t pad
, uint64_t limit
,
841 int wordsize
, uintptr_t ra
)
843 uint64_t len
= MAX(*src1len
, *src3len
);
846 check_alignment(env
, *src1len
| *src3len
, wordsize
, ra
);
852 /* Lest we fail to service interrupts in a timely manner, limit the
853 amount of work we're willing to do. */
859 for (; len
; len
-= wordsize
) {
864 v1
= cpu_ldusize_data_ra(env
, *src1
, wordsize
, ra
);
867 v3
= cpu_ldusize_data_ra(env
, *src3
, wordsize
, ra
);
871 cc
= (v1
< v3
) ? 1 : 2;
877 *src1len
-= wordsize
;
881 *src3len
-= wordsize
;
889 /* compare logical long */
890 uint32_t HELPER(clcl
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
892 uintptr_t ra
= GETPC();
893 uint64_t src1len
= extract64(env
->regs
[r1
+ 1], 0, 24);
894 uint64_t src1
= get_address(env
, r1
);
895 uint64_t src3len
= extract64(env
->regs
[r2
+ 1], 0, 24);
896 uint64_t src3
= get_address(env
, r2
);
897 uint8_t pad
= env
->regs
[r2
+ 1] >> 24;
900 cc
= do_clcl(env
, &src1
, &src1len
, &src3
, &src3len
, pad
, -1, 1, ra
);
902 env
->regs
[r1
+ 1] = deposit64(env
->regs
[r1
+ 1], 0, 24, src1len
);
903 env
->regs
[r2
+ 1] = deposit64(env
->regs
[r2
+ 1], 0, 24, src3len
);
904 set_address(env
, r1
, src1
);
905 set_address(env
, r2
, src3
);
910 /* compare logical long extended memcompare insn with padding */
911 uint32_t HELPER(clcle
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
914 uintptr_t ra
= GETPC();
915 uint64_t src1len
= get_length(env
, r1
+ 1);
916 uint64_t src1
= get_address(env
, r1
);
917 uint64_t src3len
= get_length(env
, r3
+ 1);
918 uint64_t src3
= get_address(env
, r3
);
922 cc
= do_clcl(env
, &src1
, &src1len
, &src3
, &src3len
, pad
, 0x2000, 1, ra
);
924 set_length(env
, r1
+ 1, src1len
);
925 set_length(env
, r3
+ 1, src3len
);
926 set_address(env
, r1
, src1
);
927 set_address(env
, r3
, src3
);
932 /* compare logical long unicode memcompare insn with padding */
933 uint32_t HELPER(clclu
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
936 uintptr_t ra
= GETPC();
937 uint64_t src1len
= get_length(env
, r1
+ 1);
938 uint64_t src1
= get_address(env
, r1
);
939 uint64_t src3len
= get_length(env
, r3
+ 1);
940 uint64_t src3
= get_address(env
, r3
);
944 cc
= do_clcl(env
, &src1
, &src1len
, &src3
, &src3len
, pad
, 0x1000, 2, ra
);
946 set_length(env
, r1
+ 1, src1len
);
947 set_length(env
, r3
+ 1, src3len
);
948 set_address(env
, r1
, src1
);
949 set_address(env
, r3
, src3
);
955 uint64_t HELPER(cksm
)(CPUS390XState
*env
, uint64_t r1
,
956 uint64_t src
, uint64_t src_len
)
958 uintptr_t ra
= GETPC();
959 uint64_t max_len
, len
;
960 uint64_t cksm
= (uint32_t)r1
;
962 /* Lest we fail to service interrupts in a timely manner, limit the
963 amount of work we're willing to do. For now, let's cap at 8k. */
964 max_len
= (src_len
> 0x2000 ? 0x2000 : src_len
);
966 /* Process full words as available. */
967 for (len
= 0; len
+ 4 <= max_len
; len
+= 4, src
+= 4) {
968 cksm
+= (uint32_t)cpu_ldl_data_ra(env
, src
, ra
);
971 switch (max_len
- len
) {
973 cksm
+= cpu_ldub_data_ra(env
, src
, ra
) << 24;
977 cksm
+= cpu_lduw_data_ra(env
, src
, ra
) << 16;
981 cksm
+= cpu_lduw_data_ra(env
, src
, ra
) << 16;
982 cksm
+= cpu_ldub_data_ra(env
, src
+ 2, ra
) << 8;
987 /* Fold the carry from the checksum. Note that we can see carry-out
988 during folding more than once (but probably not more than twice). */
989 while (cksm
> 0xffffffffull
) {
990 cksm
= (uint32_t)cksm
+ (cksm
>> 32);
993 /* Indicate whether or not we've processed everything. */
994 env
->cc_op
= (len
== src_len
? 0 : 3);
996 /* Return both cksm and processed length. */
1001 void HELPER(pack
)(CPUS390XState
*env
, uint32_t len
, uint64_t dest
, uint64_t src
)
1003 uintptr_t ra
= GETPC();
1004 int len_dest
= len
>> 4;
1005 int len_src
= len
& 0xf;
1011 /* last byte is special, it only flips the nibbles */
1012 b
= cpu_ldub_data_ra(env
, src
, ra
);
1013 cpu_stb_data_ra(env
, dest
, (b
<< 4) | (b
>> 4), ra
);
1017 /* now pack every value */
1018 while (len_dest
>= 0) {
1022 b
= cpu_ldub_data_ra(env
, src
, ra
) & 0x0f;
1027 b
|= cpu_ldub_data_ra(env
, src
, ra
) << 4;
1034 cpu_stb_data_ra(env
, dest
, b
, ra
);
1038 static inline void do_pkau(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
1039 uint32_t srclen
, int ssize
, uintptr_t ra
)
1042 /* The destination operand is always 16 bytes long. */
1043 const int destlen
= 16;
1045 /* The operands are processed from right to left. */
1047 dest
+= destlen
- 1;
1049 for (i
= 0; i
< destlen
; i
++) {
1052 /* Start with a positive sign */
1055 } else if (srclen
> ssize
) {
1056 b
= cpu_ldub_data_ra(env
, src
, ra
) & 0x0f;
1061 if (srclen
> ssize
) {
1062 b
|= cpu_ldub_data_ra(env
, src
, ra
) << 4;
1067 cpu_stb_data_ra(env
, dest
, b
, ra
);
1073 void HELPER(pka
)(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
1076 do_pkau(env
, dest
, src
, srclen
, 1, GETPC());
1079 void HELPER(pku
)(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
1082 do_pkau(env
, dest
, src
, srclen
, 2, GETPC());
1085 void HELPER(unpk
)(CPUS390XState
*env
, uint32_t len
, uint64_t dest
,
1088 uintptr_t ra
= GETPC();
1089 int len_dest
= len
>> 4;
1090 int len_src
= len
& 0xf;
1092 int second_nibble
= 0;
1097 /* last byte is special, it only flips the nibbles */
1098 b
= cpu_ldub_data_ra(env
, src
, ra
);
1099 cpu_stb_data_ra(env
, dest
, (b
<< 4) | (b
>> 4), ra
);
1103 /* now pad every nibble with 0xf0 */
1105 while (len_dest
> 0) {
1106 uint8_t cur_byte
= 0;
1109 cur_byte
= cpu_ldub_data_ra(env
, src
, ra
);
1115 /* only advance one nibble at a time */
1116 if (second_nibble
) {
1121 second_nibble
= !second_nibble
;
1124 cur_byte
= (cur_byte
& 0xf);
1128 cpu_stb_data_ra(env
, dest
, cur_byte
, ra
);
1132 static inline uint32_t do_unpkau(CPUS390XState
*env
, uint64_t dest
,
1133 uint32_t destlen
, int dsize
, uint64_t src
,
1139 /* The source operand is always 16 bytes long. */
1140 const int srclen
= 16;
1142 /* The operands are processed from right to left. */
1144 dest
+= destlen
- dsize
;
1146 /* Check for the sign. */
1147 b
= cpu_ldub_data_ra(env
, src
, ra
);
1161 cc
= 3; /* invalid */
1165 /* Now pad every nibble with 0x30, advancing one nibble at a time. */
1166 for (i
= 0; i
< destlen
; i
+= dsize
) {
1167 if (i
== (31 * dsize
)) {
1168 /* If length is 32/64 bytes, the leftmost byte is 0. */
1170 } else if (i
% (2 * dsize
)) {
1171 b
= cpu_ldub_data_ra(env
, src
, ra
);
1176 cpu_stsize_data_ra(env
, dest
, 0x30 + (b
& 0xf), dsize
, ra
);
1183 uint32_t HELPER(unpka
)(CPUS390XState
*env
, uint64_t dest
, uint32_t destlen
,
1186 return do_unpkau(env
, dest
, destlen
, 1, src
, GETPC());
1189 uint32_t HELPER(unpku
)(CPUS390XState
*env
, uint64_t dest
, uint32_t destlen
,
1192 return do_unpkau(env
, dest
, destlen
, 2, src
, GETPC());
1195 uint32_t HELPER(tp
)(CPUS390XState
*env
, uint64_t dest
, uint32_t destlen
)
1197 uintptr_t ra
= GETPC();
1201 for (i
= 0; i
< destlen
; i
++) {
1202 uint8_t b
= cpu_ldub_data_ra(env
, dest
+ i
, ra
);
1204 cc
|= (b
& 0xf0) > 0x90 ? 2 : 0;
1206 if (i
== (destlen
- 1)) {
1208 cc
|= (b
& 0xf) < 0xa ? 1 : 0;
1211 cc
|= (b
& 0xf) > 0x9 ? 2 : 0;
1218 static uint32_t do_helper_tr(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
1219 uint64_t trans
, uintptr_t ra
)
1223 for (i
= 0; i
<= len
; i
++) {
1224 uint8_t byte
= cpu_ldub_data_ra(env
, array
+ i
, ra
);
1225 uint8_t new_byte
= cpu_ldub_data_ra(env
, trans
+ byte
, ra
);
1226 cpu_stb_data_ra(env
, array
+ i
, new_byte
, ra
);
1232 void HELPER(tr
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
1235 do_helper_tr(env
, len
, array
, trans
, GETPC());
1238 uint64_t HELPER(tre
)(CPUS390XState
*env
, uint64_t array
,
1239 uint64_t len
, uint64_t trans
)
1241 uintptr_t ra
= GETPC();
1242 uint8_t end
= env
->regs
[0] & 0xff;
1247 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
1248 array
&= 0x7fffffff;
1252 /* Lest we fail to service interrupts in a timely manner, limit the
1253 amount of work we're willing to do. For now, let's cap at 8k. */
1259 for (i
= 0; i
< l
; i
++) {
1260 uint8_t byte
, new_byte
;
1262 byte
= cpu_ldub_data_ra(env
, array
+ i
, ra
);
1269 new_byte
= cpu_ldub_data_ra(env
, trans
+ byte
, ra
);
1270 cpu_stb_data_ra(env
, array
+ i
, new_byte
, ra
);
1274 env
->retxl
= len
- i
;
1278 static inline uint32_t do_helper_trt(CPUS390XState
*env
, int len
,
1279 uint64_t array
, uint64_t trans
,
1280 int inc
, uintptr_t ra
)
1284 for (i
= 0; i
<= len
; i
++) {
1285 uint8_t byte
= cpu_ldub_data_ra(env
, array
+ i
* inc
, ra
);
1286 uint8_t sbyte
= cpu_ldub_data_ra(env
, trans
+ byte
, ra
);
1289 set_address(env
, 1, array
+ i
* inc
);
1290 env
->regs
[2] = deposit64(env
->regs
[2], 0, 8, sbyte
);
1291 return (i
== len
) ? 2 : 1;
1298 uint32_t HELPER(trt
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
1301 return do_helper_trt(env
, len
, array
, trans
, 1, GETPC());
1304 uint32_t HELPER(trtr
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
1307 return do_helper_trt(env
, len
, array
, trans
, -1, GETPC());
1310 /* Translate one/two to one/two */
1311 uint32_t HELPER(trXX
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
,
1312 uint32_t tst
, uint32_t sizes
)
1314 uintptr_t ra
= GETPC();
1315 int dsize
= (sizes
& 1) ? 1 : 2;
1316 int ssize
= (sizes
& 2) ? 1 : 2;
1317 uint64_t tbl
= get_address(env
, 1);
1318 uint64_t dst
= get_address(env
, r1
);
1319 uint64_t len
= get_length(env
, r1
+ 1);
1320 uint64_t src
= get_address(env
, r2
);
1324 /* The lower address bits of TBL are ignored. For TROO, TROT, it's
1325 the low 3 bits (double-word aligned). For TRTO, TRTT, it's either
1326 the low 12 bits (4K, without ETF2-ENH) or 3 bits (with ETF2-ENH). */
1327 if (ssize
== 2 && !s390_has_feat(S390_FEAT_ETF2_ENH
)) {
1333 check_alignment(env
, len
, ssize
, ra
);
1335 /* Lest we fail to service interrupts in a timely manner, */
1336 /* limit the amount of work we're willing to do. */
1337 for (i
= 0; i
< 0x2000; i
++) {
1338 uint16_t sval
= cpu_ldusize_data_ra(env
, src
, ssize
, ra
);
1339 uint64_t tble
= tbl
+ (sval
* dsize
);
1340 uint16_t dval
= cpu_ldusize_data_ra(env
, tble
, dsize
, ra
);
1345 cpu_stsize_data_ra(env
, dst
, dval
, dsize
, ra
);
1357 set_address(env
, r1
, dst
);
1358 set_length(env
, r1
+ 1, len
);
1359 set_address(env
, r2
, src
);
1364 void HELPER(cdsg
)(CPUS390XState
*env
, uint64_t addr
,
1365 uint32_t r1
, uint32_t r3
)
1367 uintptr_t ra
= GETPC();
1368 Int128 cmpv
= int128_make128(env
->regs
[r1
+ 1], env
->regs
[r1
]);
1369 Int128 newv
= int128_make128(env
->regs
[r3
+ 1], env
->regs
[r3
]);
1373 if (parallel_cpus
) {
1374 #ifndef CONFIG_ATOMIC128
1375 cpu_loop_exit_atomic(ENV_GET_CPU(env
), ra
);
1377 int mem_idx
= cpu_mmu_index(env
, false);
1378 TCGMemOpIdx oi
= make_memop_idx(MO_TEQ
| MO_ALIGN_16
, mem_idx
);
1379 oldv
= helper_atomic_cmpxchgo_be_mmu(env
, addr
, cmpv
, newv
, oi
, ra
);
1380 fail
= !int128_eq(oldv
, cmpv
);
1383 uint64_t oldh
, oldl
;
1385 check_alignment(env
, addr
, 16, ra
);
1387 oldh
= cpu_ldq_data_ra(env
, addr
+ 0, ra
);
1388 oldl
= cpu_ldq_data_ra(env
, addr
+ 8, ra
);
1390 oldv
= int128_make128(oldl
, oldh
);
1391 fail
= !int128_eq(oldv
, cmpv
);
1396 cpu_stq_data_ra(env
, addr
+ 0, int128_gethi(newv
), ra
);
1397 cpu_stq_data_ra(env
, addr
+ 8, int128_getlo(newv
), ra
);
1401 env
->regs
[r1
] = int128_gethi(oldv
);
1402 env
->regs
[r1
+ 1] = int128_getlo(oldv
);
1405 uint32_t HELPER(csst
)(CPUS390XState
*env
, uint32_t r3
, uint64_t a1
, uint64_t a2
)
1407 #if !defined(CONFIG_USER_ONLY) || defined(CONFIG_ATOMIC128)
1408 uint32_t mem_idx
= cpu_mmu_index(env
, false);
1410 uintptr_t ra
= GETPC();
1411 uint32_t fc
= extract32(env
->regs
[0], 0, 8);
1412 uint32_t sc
= extract32(env
->regs
[0], 8, 8);
1413 uint64_t pl
= get_address(env
, 1) & -16;
1417 /* Sanity check the function code and storage characteristic. */
1418 if (fc
> 1 || sc
> 3) {
1419 if (!s390_has_feat(S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2
)) {
1420 goto spec_exception
;
1422 if (fc
> 2 || sc
> 4 || (fc
== 2 && (r3
& 1))) {
1423 goto spec_exception
;
1427 /* Sanity check the alignments. */
1428 if (extract32(a1
, 0, 4 << fc
) || extract32(a2
, 0, 1 << sc
)) {
1429 goto spec_exception
;
1432 /* Sanity check writability of the store address. */
1433 #ifndef CONFIG_USER_ONLY
1434 probe_write(env
, a2
, mem_idx
, ra
);
1437 /* Note that the compare-and-swap is atomic, and the store is atomic, but
1438 the complete operation is not. Therefore we do not need to assert serial
1439 context in order to implement this. That said, restart early if we can't
1440 support either operation that is supposed to be atomic. */
1441 if (parallel_cpus
) {
1443 #if !defined(CONFIG_ATOMIC64)
1445 #elif !defined(CONFIG_ATOMIC128)
1448 if (((4 << fc
) | (1 << sc
)) & mask
) {
1449 cpu_loop_exit_atomic(ENV_GET_CPU(env
), ra
);
1453 /* All loads happen before all stores. For simplicity, load the entire
1454 store value area from the parameter list. */
1455 svh
= cpu_ldq_data_ra(env
, pl
+ 16, ra
);
1456 svl
= cpu_ldq_data_ra(env
, pl
+ 24, ra
);
1461 uint32_t nv
= cpu_ldl_data_ra(env
, pl
, ra
);
1462 uint32_t cv
= env
->regs
[r3
];
1465 if (parallel_cpus
) {
1466 #ifdef CONFIG_USER_ONLY
1467 uint32_t *haddr
= g2h(a1
);
1468 ov
= atomic_cmpxchg__nocheck(haddr
, cv
, nv
);
1470 TCGMemOpIdx oi
= make_memop_idx(MO_TEUL
| MO_ALIGN
, mem_idx
);
1471 ov
= helper_atomic_cmpxchgl_be_mmu(env
, a1
, cv
, nv
, oi
, ra
);
1474 ov
= cpu_ldl_data_ra(env
, a1
, ra
);
1475 cpu_stl_data_ra(env
, a1
, (ov
== cv
? nv
: ov
), ra
);
1478 env
->regs
[r3
] = deposit64(env
->regs
[r3
], 32, 32, ov
);
1484 uint64_t nv
= cpu_ldq_data_ra(env
, pl
, ra
);
1485 uint64_t cv
= env
->regs
[r3
];
1488 if (parallel_cpus
) {
1489 #ifdef CONFIG_ATOMIC64
1490 # ifdef CONFIG_USER_ONLY
1491 uint64_t *haddr
= g2h(a1
);
1492 ov
= atomic_cmpxchg__nocheck(haddr
, cv
, nv
);
1494 TCGMemOpIdx oi
= make_memop_idx(MO_TEQ
| MO_ALIGN
, mem_idx
);
1495 ov
= helper_atomic_cmpxchgq_be_mmu(env
, a1
, cv
, nv
, oi
, ra
);
1498 /* Note that we asserted !parallel_cpus above. */
1499 g_assert_not_reached();
1502 ov
= cpu_ldq_data_ra(env
, a1
, ra
);
1503 cpu_stq_data_ra(env
, a1
, (ov
== cv
? nv
: ov
), ra
);
1512 uint64_t nvh
= cpu_ldq_data_ra(env
, pl
, ra
);
1513 uint64_t nvl
= cpu_ldq_data_ra(env
, pl
+ 8, ra
);
1514 Int128 nv
= int128_make128(nvl
, nvh
);
1515 Int128 cv
= int128_make128(env
->regs
[r3
+ 1], env
->regs
[r3
]);
1518 if (parallel_cpus
) {
1519 #ifdef CONFIG_ATOMIC128
1520 TCGMemOpIdx oi
= make_memop_idx(MO_TEQ
| MO_ALIGN_16
, mem_idx
);
1521 ov
= helper_atomic_cmpxchgo_be_mmu(env
, a1
, cv
, nv
, oi
, ra
);
1522 cc
= !int128_eq(ov
, cv
);
1524 /* Note that we asserted !parallel_cpus above. */
1525 g_assert_not_reached();
1528 uint64_t oh
= cpu_ldq_data_ra(env
, a1
+ 0, ra
);
1529 uint64_t ol
= cpu_ldq_data_ra(env
, a1
+ 8, ra
);
1531 ov
= int128_make128(ol
, oh
);
1532 cc
= !int128_eq(ov
, cv
);
1537 cpu_stq_data_ra(env
, a1
+ 0, int128_gethi(nv
), ra
);
1538 cpu_stq_data_ra(env
, a1
+ 8, int128_getlo(nv
), ra
);
1541 env
->regs
[r3
+ 0] = int128_gethi(ov
);
1542 env
->regs
[r3
+ 1] = int128_getlo(ov
);
1547 g_assert_not_reached();
1550 /* Store only if the comparison succeeded. Note that above we use a pair
1551 of 64-bit big-endian loads, so for sc < 3 we must extract the value
1552 from the most-significant bits of svh. */
1556 cpu_stb_data_ra(env
, a2
, svh
>> 56, ra
);
1559 cpu_stw_data_ra(env
, a2
, svh
>> 48, ra
);
1562 cpu_stl_data_ra(env
, a2
, svh
>> 32, ra
);
1565 cpu_stq_data_ra(env
, a2
, svh
, ra
);
1568 if (parallel_cpus
) {
1569 #ifdef CONFIG_ATOMIC128
1570 TCGMemOpIdx oi
= make_memop_idx(MO_TEQ
| MO_ALIGN_16
, mem_idx
);
1571 Int128 sv
= int128_make128(svl
, svh
);
1572 helper_atomic_sto_be_mmu(env
, a2
, sv
, oi
, ra
);
1574 /* Note that we asserted !parallel_cpus above. */
1575 g_assert_not_reached();
1578 cpu_stq_data_ra(env
, a2
+ 0, svh
, ra
);
1579 cpu_stq_data_ra(env
, a2
+ 8, svl
, ra
);
1583 g_assert_not_reached();
1590 cpu_restore_state(ENV_GET_CPU(env
), ra
);
1591 program_interrupt(env
, PGM_SPECIFICATION
, 6);
1592 g_assert_not_reached();
1595 #if !defined(CONFIG_USER_ONLY)
1596 void HELPER(lctlg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
1598 uintptr_t ra
= GETPC();
1599 S390CPU
*cpu
= s390_env_get_cpu(env
);
1600 bool PERchanged
= false;
1604 for (i
= r1
;; i
= (i
+ 1) % 16) {
1605 uint64_t val
= cpu_ldq_data_ra(env
, src
, ra
);
1606 if (env
->cregs
[i
] != val
&& i
>= 9 && i
<= 11) {
1609 env
->cregs
[i
] = val
;
1610 HELPER_LOG("load ctl %d from 0x%" PRIx64
" == 0x%" PRIx64
"\n",
1612 src
+= sizeof(uint64_t);
1619 if (PERchanged
&& env
->psw
.mask
& PSW_MASK_PER
) {
1620 s390_cpu_recompute_watchpoints(CPU(cpu
));
1623 tlb_flush(CPU(cpu
));
1626 void HELPER(lctl
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
1628 uintptr_t ra
= GETPC();
1629 S390CPU
*cpu
= s390_env_get_cpu(env
);
1630 bool PERchanged
= false;
1634 for (i
= r1
;; i
= (i
+ 1) % 16) {
1635 uint32_t val
= cpu_ldl_data_ra(env
, src
, ra
);
1636 if ((uint32_t)env
->cregs
[i
] != val
&& i
>= 9 && i
<= 11) {
1639 env
->cregs
[i
] = deposit64(env
->cregs
[i
], 0, 32, val
);
1640 HELPER_LOG("load ctl %d from 0x%" PRIx64
" == 0x%x\n", i
, src
, val
);
1641 src
+= sizeof(uint32_t);
1648 if (PERchanged
&& env
->psw
.mask
& PSW_MASK_PER
) {
1649 s390_cpu_recompute_watchpoints(CPU(cpu
));
1652 tlb_flush(CPU(cpu
));
1655 void HELPER(stctg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
1657 uintptr_t ra
= GETPC();
1661 for (i
= r1
;; i
= (i
+ 1) % 16) {
1662 cpu_stq_data_ra(env
, dest
, env
->cregs
[i
], ra
);
1663 dest
+= sizeof(uint64_t);
1671 void HELPER(stctl
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
1673 uintptr_t ra
= GETPC();
1677 for (i
= r1
;; i
= (i
+ 1) % 16) {
1678 cpu_stl_data_ra(env
, dest
, env
->cregs
[i
], ra
);
1679 dest
+= sizeof(uint32_t);
1687 uint32_t HELPER(testblock
)(CPUS390XState
*env
, uint64_t real_addr
)
1689 uintptr_t ra
= GETPC();
1690 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1693 real_addr
= wrap_address(env
, real_addr
) & TARGET_PAGE_MASK
;
1695 /* Check low-address protection */
1696 if ((env
->cregs
[0] & CR0_LOWPROT
) && real_addr
< 0x2000) {
1697 cpu_restore_state(cs
, ra
);
1698 program_interrupt(env
, PGM_PROTECTION
, 4);
1702 for (i
= 0; i
< TARGET_PAGE_SIZE
; i
+= 8) {
1703 cpu_stq_real_ra(env
, real_addr
+ i
, 0, ra
);
1709 uint32_t HELPER(tprot
)(uint64_t a1
, uint64_t a2
)
1715 /* insert storage key extended */
1716 uint64_t HELPER(iske
)(CPUS390XState
*env
, uint64_t r2
)
1718 static S390SKeysState
*ss
;
1719 static S390SKeysClass
*skeyclass
;
1720 uint64_t addr
= wrap_address(env
, r2
);
1723 if (addr
> ram_size
) {
1727 if (unlikely(!ss
)) {
1728 ss
= s390_get_skeys_device();
1729 skeyclass
= S390_SKEYS_GET_CLASS(ss
);
1732 if (skeyclass
->get_skeys(ss
, addr
/ TARGET_PAGE_SIZE
, 1, &key
)) {
1738 /* set storage key extended */
1739 void HELPER(sske
)(CPUS390XState
*env
, uint64_t r1
, uint64_t r2
)
1741 static S390SKeysState
*ss
;
1742 static S390SKeysClass
*skeyclass
;
1743 uint64_t addr
= wrap_address(env
, r2
);
1746 if (addr
> ram_size
) {
1750 if (unlikely(!ss
)) {
1751 ss
= s390_get_skeys_device();
1752 skeyclass
= S390_SKEYS_GET_CLASS(ss
);
1756 skeyclass
->set_skeys(ss
, addr
/ TARGET_PAGE_SIZE
, 1, &key
);
1759 /* reset reference bit extended */
1760 uint32_t HELPER(rrbe
)(CPUS390XState
*env
, uint64_t r2
)
1762 static S390SKeysState
*ss
;
1763 static S390SKeysClass
*skeyclass
;
1766 if (r2
> ram_size
) {
1770 if (unlikely(!ss
)) {
1771 ss
= s390_get_skeys_device();
1772 skeyclass
= S390_SKEYS_GET_CLASS(ss
);
1775 if (skeyclass
->get_skeys(ss
, r2
/ TARGET_PAGE_SIZE
, 1, &key
)) {
1779 re
= key
& (SK_R
| SK_C
);
1782 if (skeyclass
->set_skeys(ss
, r2
/ TARGET_PAGE_SIZE
, 1, &key
)) {
1789 * 0 Reference bit zero; change bit zero
1790 * 1 Reference bit zero; change bit one
1791 * 2 Reference bit one; change bit zero
1792 * 3 Reference bit one; change bit one
1798 uint32_t HELPER(mvcs
)(CPUS390XState
*env
, uint64_t l
, uint64_t a1
, uint64_t a2
)
1800 uintptr_t ra
= GETPC();
1803 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
1804 __func__
, l
, a1
, a2
);
1812 /* XXX replace w/ memcpy */
1813 for (i
= 0; i
< l
; i
++) {
1814 uint8_t x
= cpu_ldub_primary_ra(env
, a2
+ i
, ra
);
1815 cpu_stb_secondary_ra(env
, a1
+ i
, x
, ra
);
1821 uint32_t HELPER(mvcp
)(CPUS390XState
*env
, uint64_t l
, uint64_t a1
, uint64_t a2
)
1823 uintptr_t ra
= GETPC();
1826 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
1827 __func__
, l
, a1
, a2
);
1835 /* XXX replace w/ memcpy */
1836 for (i
= 0; i
< l
; i
++) {
1837 uint8_t x
= cpu_ldub_secondary_ra(env
, a2
+ i
, ra
);
1838 cpu_stb_primary_ra(env
, a1
+ i
, x
, ra
);
1844 void HELPER(idte
)(CPUS390XState
*env
, uint64_t r1
, uint64_t r2
, uint32_t m4
)
1846 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1847 const uintptr_t ra
= GETPC();
1848 uint64_t table
, entry
, raddr
;
1849 uint16_t entries
, i
, index
= 0;
1852 cpu_restore_state(cs
, ra
);
1853 program_interrupt(env
, PGM_SPECIFICATION
, 4);
1856 if (!(r2
& 0x800)) {
1857 /* invalidation-and-clearing operation */
1858 table
= r1
& _ASCE_ORIGIN
;
1859 entries
= (r2
& 0x7ff) + 1;
1861 switch (r1
& _ASCE_TYPE_MASK
) {
1862 case _ASCE_TYPE_REGION1
:
1863 index
= (r2
>> 53) & 0x7ff;
1865 case _ASCE_TYPE_REGION2
:
1866 index
= (r2
>> 42) & 0x7ff;
1868 case _ASCE_TYPE_REGION3
:
1869 index
= (r2
>> 31) & 0x7ff;
1871 case _ASCE_TYPE_SEGMENT
:
1872 index
= (r2
>> 20) & 0x7ff;
1875 for (i
= 0; i
< entries
; i
++) {
1876 /* addresses are not wrapped in 24/31bit mode but table index is */
1877 raddr
= table
+ ((index
+ i
) & 0x7ff) * sizeof(entry
);
1878 entry
= ldq_phys(cs
->as
, raddr
);
1879 if (!(entry
& _REGION_ENTRY_INV
)) {
1880 /* we are allowed to not store if already invalid */
1881 entry
|= _REGION_ENTRY_INV
;
1882 stq_phys(cs
->as
, raddr
, entry
);
1887 /* We simply flush the complete tlb, therefore we can ignore r3. */
1891 tlb_flush_all_cpus_synced(cs
);
1895 /* invalidate pte */
1896 void HELPER(ipte
)(CPUS390XState
*env
, uint64_t pto
, uint64_t vaddr
,
1899 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1900 uint64_t page
= vaddr
& TARGET_PAGE_MASK
;
1901 uint64_t pte_addr
, pte
;
1903 /* Compute the page table entry address */
1904 pte_addr
= (pto
& _SEGMENT_ENTRY_ORIGIN
);
1905 pte_addr
+= (vaddr
& VADDR_PX
) >> 9;
1907 /* Mark the page table entry as invalid */
1908 pte
= ldq_phys(cs
->as
, pte_addr
);
1909 pte
|= _PAGE_INVALID
;
1910 stq_phys(cs
->as
, pte_addr
, pte
);
1912 /* XXX we exploit the fact that Linux passes the exact virtual
1913 address here - it's not obliged to! */
1915 if (vaddr
& ~VADDR_PX
) {
1916 tlb_flush_page(cs
, page
);
1917 /* XXX 31-bit hack */
1918 tlb_flush_page(cs
, page
^ 0x80000000);
1920 /* looks like we don't have a valid virtual address */
1924 if (vaddr
& ~VADDR_PX
) {
1925 tlb_flush_page_all_cpus_synced(cs
, page
);
1926 /* XXX 31-bit hack */
1927 tlb_flush_page_all_cpus_synced(cs
, page
^ 0x80000000);
1929 /* looks like we don't have a valid virtual address */
1930 tlb_flush_all_cpus_synced(cs
);
1935 /* flush local tlb */
1936 void HELPER(ptlb
)(CPUS390XState
*env
)
1938 S390CPU
*cpu
= s390_env_get_cpu(env
);
1940 tlb_flush(CPU(cpu
));
1943 /* flush global tlb */
1944 void HELPER(purge
)(CPUS390XState
*env
)
1946 S390CPU
*cpu
= s390_env_get_cpu(env
);
1948 tlb_flush_all_cpus_synced(CPU(cpu
));
1951 /* load using real address */
1952 uint64_t HELPER(lura
)(CPUS390XState
*env
, uint64_t addr
)
1954 return cpu_ldl_real_ra(env
, wrap_address(env
, addr
), GETPC());
1957 uint64_t HELPER(lurag
)(CPUS390XState
*env
, uint64_t addr
)
1959 return cpu_ldq_real_ra(env
, wrap_address(env
, addr
), GETPC());
1962 /* store using real address */
1963 void HELPER(stura
)(CPUS390XState
*env
, uint64_t addr
, uint64_t v1
)
1965 cpu_stl_real_ra(env
, wrap_address(env
, addr
), (uint32_t)v1
, GETPC());
1967 if ((env
->psw
.mask
& PSW_MASK_PER
) &&
1968 (env
->cregs
[9] & PER_CR9_EVENT_STORE
) &&
1969 (env
->cregs
[9] & PER_CR9_EVENT_STORE_REAL
)) {
1970 /* PSW is saved just before calling the helper. */
1971 env
->per_address
= env
->psw
.addr
;
1972 env
->per_perc_atmid
= PER_CODE_EVENT_STORE_REAL
| get_per_atmid(env
);
1976 void HELPER(sturg
)(CPUS390XState
*env
, uint64_t addr
, uint64_t v1
)
1978 cpu_stq_real_ra(env
, wrap_address(env
, addr
), v1
, GETPC());
1980 if ((env
->psw
.mask
& PSW_MASK_PER
) &&
1981 (env
->cregs
[9] & PER_CR9_EVENT_STORE
) &&
1982 (env
->cregs
[9] & PER_CR9_EVENT_STORE_REAL
)) {
1983 /* PSW is saved just before calling the helper. */
1984 env
->per_address
= env
->psw
.addr
;
1985 env
->per_perc_atmid
= PER_CODE_EVENT_STORE_REAL
| get_per_atmid(env
);
1989 /* load real address */
1990 uint64_t HELPER(lra
)(CPUS390XState
*env
, uint64_t addr
)
1992 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1994 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
1998 /* XXX incomplete - has more corner cases */
1999 if (!(env
->psw
.mask
& PSW_MASK_64
) && (addr
>> 32)) {
2000 cpu_restore_state(cs
, GETPC());
2001 program_interrupt(env
, PGM_SPECIAL_OP
, 2);
2004 old_exc
= cs
->exception_index
;
2005 if (mmu_translate(env
, addr
, 0, asc
, &ret
, &flags
, true)) {
2008 if (cs
->exception_index
== EXCP_PGM
) {
2009 ret
= env
->int_pgm_code
| 0x80000000;
2011 ret
|= addr
& ~TARGET_PAGE_MASK
;
2013 cs
->exception_index
= old_exc
;
2020 /* load pair from quadword */
2021 uint64_t HELPER(lpq
)(CPUS390XState
*env
, uint64_t addr
)
2023 uintptr_t ra
= GETPC();
2026 if (parallel_cpus
) {
2027 #ifndef CONFIG_ATOMIC128
2028 cpu_loop_exit_atomic(ENV_GET_CPU(env
), ra
);
2030 int mem_idx
= cpu_mmu_index(env
, false);
2031 TCGMemOpIdx oi
= make_memop_idx(MO_TEQ
| MO_ALIGN_16
, mem_idx
);
2032 Int128 v
= helper_atomic_ldo_be_mmu(env
, addr
, oi
, ra
);
2033 hi
= int128_gethi(v
);
2034 lo
= int128_getlo(v
);
2037 check_alignment(env
, addr
, 16, ra
);
2039 hi
= cpu_ldq_data_ra(env
, addr
+ 0, ra
);
2040 lo
= cpu_ldq_data_ra(env
, addr
+ 8, ra
);
2047 /* store pair to quadword */
2048 void HELPER(stpq
)(CPUS390XState
*env
, uint64_t addr
,
2049 uint64_t low
, uint64_t high
)
2051 uintptr_t ra
= GETPC();
2053 if (parallel_cpus
) {
2054 #ifndef CONFIG_ATOMIC128
2055 cpu_loop_exit_atomic(ENV_GET_CPU(env
), ra
);
2057 int mem_idx
= cpu_mmu_index(env
, false);
2058 TCGMemOpIdx oi
= make_memop_idx(MO_TEQ
| MO_ALIGN_16
, mem_idx
);
2060 Int128 v
= int128_make128(low
, high
);
2061 helper_atomic_sto_be_mmu(env
, addr
, v
, oi
, ra
);
2064 check_alignment(env
, addr
, 16, ra
);
2066 cpu_stq_data_ra(env
, addr
+ 0, high
, ra
);
2067 cpu_stq_data_ra(env
, addr
+ 8, low
, ra
);
2071 /* Execute instruction. This instruction executes an insn modified with
2072 the contents of r1. It does not change the executed instruction in memory;
2073 it does not change the program counter.
2075 Perform this by recording the modified instruction in env->ex_value.
2076 This will be noticed by cpu_get_tb_cpu_state and thus tb translation.
2078 void HELPER(ex
)(CPUS390XState
*env
, uint32_t ilen
, uint64_t r1
, uint64_t addr
)
2080 uint64_t insn
= cpu_lduw_code(env
, addr
);
2081 uint8_t opc
= insn
>> 8;
2083 /* Or in the contents of R1[56:63]. */
2086 /* Load the rest of the instruction. */
2088 switch (get_ilen(opc
)) {
2092 insn
|= (uint64_t)cpu_lduw_code(env
, addr
+ 2) << 32;
2095 insn
|= (uint64_t)(uint32_t)cpu_ldl_code(env
, addr
+ 2) << 16;
2098 g_assert_not_reached();
2101 /* The very most common cases can be sped up by avoiding a new TB. */
2102 if ((opc
& 0xf0) == 0xd0) {
2103 typedef uint32_t (*dx_helper
)(CPUS390XState
*, uint32_t, uint64_t,
2104 uint64_t, uintptr_t);
2105 static const dx_helper dx
[16] = {
2106 [0x2] = do_helper_mvc
,
2107 [0x4] = do_helper_nc
,
2108 [0x5] = do_helper_clc
,
2109 [0x6] = do_helper_oc
,
2110 [0x7] = do_helper_xc
,
2111 [0xc] = do_helper_tr
,
2113 dx_helper helper
= dx
[opc
& 0xf];
2116 uint32_t l
= extract64(insn
, 48, 8);
2117 uint32_t b1
= extract64(insn
, 44, 4);
2118 uint32_t d1
= extract64(insn
, 32, 12);
2119 uint32_t b2
= extract64(insn
, 28, 4);
2120 uint32_t d2
= extract64(insn
, 16, 12);
2121 uint64_t a1
= wrap_address(env
, env
->regs
[b1
] + d1
);
2122 uint64_t a2
= wrap_address(env
, env
->regs
[b2
] + d2
);
2124 env
->cc_op
= helper(env
, l
, a1
, a2
, 0);
2125 env
->psw
.addr
+= ilen
;
2128 } else if (opc
== 0x0a) {
2129 env
->int_svc_code
= extract64(insn
, 48, 8);
2130 env
->int_svc_ilen
= ilen
;
2131 helper_exception(env
, EXCP_SVC
);
2132 g_assert_not_reached();
2135 /* Record the insn we want to execute as well as the ilen to use
2136 during the execution of the target insn. This will also ensure
2137 that ex_value is non-zero, which flags that we are in a state
2138 that requires such execution. */
2139 env
->ex_value
= insn
| ilen
;
2142 uint32_t HELPER(mvcos
)(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
2145 const uint8_t psw_key
= (env
->psw
.mask
& PSW_MASK_KEY
) >> PSW_SHIFT_KEY
;
2146 const uint8_t psw_as
= (env
->psw
.mask
& PSW_MASK_ASC
) >> PSW_SHIFT_ASC
;
2147 const uint64_t r0
= env
->regs
[0];
2148 const uintptr_t ra
= GETPC();
2149 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
2150 uint8_t dest_key
, dest_as
, dest_k
, dest_a
;
2151 uint8_t src_key
, src_as
, src_k
, src_a
;
2155 HELPER_LOG("%s dest %" PRIx64
", src %" PRIx64
", len %" PRIx64
"\n",
2156 __func__
, dest
, src
, len
);
2158 if (!(env
->psw
.mask
& PSW_MASK_DAT
)) {
2159 cpu_restore_state(cs
, ra
);
2160 program_interrupt(env
, PGM_SPECIAL_OP
, 6);
2163 /* OAC (operand access control) for the first operand -> dest */
2164 val
= (r0
& 0xffff0000ULL
) >> 16;
2165 dest_key
= (val
>> 12) & 0xf;
2166 dest_as
= (val
>> 6) & 0x3;
2167 dest_k
= (val
>> 1) & 0x1;
2170 /* OAC (operand access control) for the second operand -> src */
2171 val
= (r0
& 0x0000ffffULL
);
2172 src_key
= (val
>> 12) & 0xf;
2173 src_as
= (val
>> 6) & 0x3;
2174 src_k
= (val
>> 1) & 0x1;
2190 if (dest_a
&& dest_as
== AS_HOME
&& (env
->psw
.mask
& PSW_MASK_PSTATE
)) {
2191 cpu_restore_state(cs
, ra
);
2192 program_interrupt(env
, PGM_SPECIAL_OP
, 6);
2194 if (!(env
->cregs
[0] & CR0_SECONDARY
) &&
2195 (dest_as
== AS_SECONDARY
|| src_as
== AS_SECONDARY
)) {
2196 cpu_restore_state(cs
, ra
);
2197 program_interrupt(env
, PGM_SPECIAL_OP
, 6);
2199 if (!psw_key_valid(env
, dest_key
) || !psw_key_valid(env
, src_key
)) {
2200 cpu_restore_state(cs
, ra
);
2201 program_interrupt(env
, PGM_PRIVILEGED
, 6);
2204 len
= wrap_length(env
, len
);
2210 /* FIXME: AR-mode and proper problem state mode (using PSW keys) missing */
2211 if (src_as
== AS_ACCREG
|| dest_as
== AS_ACCREG
||
2212 (env
->psw
.mask
& PSW_MASK_PSTATE
)) {
2213 qemu_log_mask(LOG_UNIMP
, "%s: AR-mode and PSTATE support missing\n",
2215 cpu_restore_state(cs
, ra
);
2216 program_interrupt(env
, PGM_ADDRESSING
, 6);
2220 * b) Access using correct keys
2223 #ifdef CONFIG_USER_ONLY
2224 /* psw keys are never valid in user mode, we will never reach this */
2225 g_assert_not_reached();
2227 fast_memmove_as(env
, dest
, src
, len
, dest_as
, src_as
, ra
);
2233 /* Decode a Unicode character. A return value < 0 indicates success, storing
2234 the UTF-32 result into OCHAR and the input length into OLEN. A return
2235 value >= 0 indicates failure, and the CC value to be returned. */
2236 typedef int (*decode_unicode_fn
)(CPUS390XState
*env
, uint64_t addr
,
2237 uint64_t ilen
, bool enh_check
, uintptr_t ra
,
2238 uint32_t *ochar
, uint32_t *olen
);
2240 /* Encode a Unicode character. A return value < 0 indicates success, storing
2241 the bytes into ADDR and the output length into OLEN. A return value >= 0
2242 indicates failure, and the CC value to be returned. */
2243 typedef int (*encode_unicode_fn
)(CPUS390XState
*env
, uint64_t addr
,
2244 uint64_t ilen
, uintptr_t ra
, uint32_t c
,
2247 static int decode_utf8(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2248 bool enh_check
, uintptr_t ra
,
2249 uint32_t *ochar
, uint32_t *olen
)
2251 uint8_t s0
, s1
, s2
, s3
;
2257 s0
= cpu_ldub_data_ra(env
, addr
, ra
);
2259 /* one byte character */
2262 } else if (s0
<= (enh_check
? 0xc1 : 0xbf)) {
2263 /* invalid character */
2265 } else if (s0
<= 0xdf) {
2266 /* two byte character */
2271 s1
= cpu_ldub_data_ra(env
, addr
+ 1, ra
);
2273 c
= (c
<< 6) | (s1
& 0x3f);
2274 if (enh_check
&& (s1
& 0xc0) != 0x80) {
2277 } else if (s0
<= 0xef) {
2278 /* three byte character */
2283 s1
= cpu_ldub_data_ra(env
, addr
+ 1, ra
);
2284 s2
= cpu_ldub_data_ra(env
, addr
+ 2, ra
);
2286 c
= (c
<< 6) | (s1
& 0x3f);
2287 c
= (c
<< 6) | (s2
& 0x3f);
2288 /* Fold the byte-by-byte range descriptions in the PoO into
2289 tests against the complete value. It disallows encodings
2290 that could be smaller, and the UTF-16 surrogates. */
2292 && ((s1
& 0xc0) != 0x80
2293 || (s2
& 0xc0) != 0x80
2295 || (c
>= 0xd800 && c
<= 0xdfff))) {
2298 } else if (s0
<= (enh_check
? 0xf4 : 0xf7)) {
2299 /* four byte character */
2304 s1
= cpu_ldub_data_ra(env
, addr
+ 1, ra
);
2305 s2
= cpu_ldub_data_ra(env
, addr
+ 2, ra
);
2306 s3
= cpu_ldub_data_ra(env
, addr
+ 3, ra
);
2308 c
= (c
<< 6) | (s1
& 0x3f);
2309 c
= (c
<< 6) | (s2
& 0x3f);
2310 c
= (c
<< 6) | (s3
& 0x3f);
2313 && ((s1
& 0xc0) != 0x80
2314 || (s2
& 0xc0) != 0x80
2315 || (s3
& 0xc0) != 0x80
2321 /* invalid character */
2330 static int decode_utf16(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2331 bool enh_check
, uintptr_t ra
,
2332 uint32_t *ochar
, uint32_t *olen
)
2340 s0
= cpu_lduw_data_ra(env
, addr
, ra
);
2341 if ((s0
& 0xfc00) != 0xd800) {
2342 /* one word character */
2346 /* two word character */
2351 s1
= cpu_lduw_data_ra(env
, addr
+ 2, ra
);
2352 c
= extract32(s0
, 6, 4) + 1;
2353 c
= (c
<< 6) | (s0
& 0x3f);
2354 c
= (c
<< 10) | (s1
& 0x3ff);
2355 if (enh_check
&& (s1
& 0xfc00) != 0xdc00) {
2356 /* invalid surrogate character */
2366 static int decode_utf32(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2367 bool enh_check
, uintptr_t ra
,
2368 uint32_t *ochar
, uint32_t *olen
)
2375 c
= cpu_ldl_data_ra(env
, addr
, ra
);
2376 if ((c
>= 0xd800 && c
<= 0xdbff) || c
> 0x10ffff) {
2377 /* invalid unicode character */
2386 static int encode_utf8(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2387 uintptr_t ra
, uint32_t c
, uint32_t *olen
)
2393 /* one byte character */
2396 } else if (c
<= 0x7ff) {
2397 /* two byte character */
2399 d
[1] = 0x80 | extract32(c
, 0, 6);
2400 d
[0] = 0xc0 | extract32(c
, 6, 5);
2401 } else if (c
<= 0xffff) {
2402 /* three byte character */
2404 d
[2] = 0x80 | extract32(c
, 0, 6);
2405 d
[1] = 0x80 | extract32(c
, 6, 6);
2406 d
[0] = 0xe0 | extract32(c
, 12, 4);
2408 /* four byte character */
2410 d
[3] = 0x80 | extract32(c
, 0, 6);
2411 d
[2] = 0x80 | extract32(c
, 6, 6);
2412 d
[1] = 0x80 | extract32(c
, 12, 6);
2413 d
[0] = 0xf0 | extract32(c
, 18, 3);
2419 for (i
= 0; i
< l
; ++i
) {
2420 cpu_stb_data_ra(env
, addr
+ i
, d
[i
], ra
);
2427 static int encode_utf16(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2428 uintptr_t ra
, uint32_t c
, uint32_t *olen
)
2433 /* one word character */
2437 cpu_stw_data_ra(env
, addr
, c
, ra
);
2440 /* two word character */
2444 d1
= 0xdc00 | extract32(c
, 0, 10);
2445 d0
= 0xd800 | extract32(c
, 10, 6);
2446 d0
= deposit32(d0
, 6, 4, extract32(c
, 16, 5) - 1);
2447 cpu_stw_data_ra(env
, addr
+ 0, d0
, ra
);
2448 cpu_stw_data_ra(env
, addr
+ 2, d1
, ra
);
2455 static int encode_utf32(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2456 uintptr_t ra
, uint32_t c
, uint32_t *olen
)
2461 cpu_stl_data_ra(env
, addr
, c
, ra
);
2466 static inline uint32_t convert_unicode(CPUS390XState
*env
, uint32_t r1
,
2467 uint32_t r2
, uint32_t m3
, uintptr_t ra
,
2468 decode_unicode_fn decode
,
2469 encode_unicode_fn encode
)
2471 uint64_t dst
= get_address(env
, r1
);
2472 uint64_t dlen
= get_length(env
, r1
+ 1);
2473 uint64_t src
= get_address(env
, r2
);
2474 uint64_t slen
= get_length(env
, r2
+ 1);
2475 bool enh_check
= m3
& 1;
2478 /* Lest we fail to service interrupts in a timely manner, limit the
2479 amount of work we're willing to do. For now, let's cap at 256. */
2480 for (i
= 0; i
< 256; ++i
) {
2481 uint32_t c
, ilen
, olen
;
2483 cc
= decode(env
, src
, slen
, enh_check
, ra
, &c
, &ilen
);
2484 if (unlikely(cc
>= 0)) {
2487 cc
= encode(env
, dst
, dlen
, ra
, c
, &olen
);
2488 if (unlikely(cc
>= 0)) {
2499 set_address(env
, r1
, dst
);
2500 set_length(env
, r1
+ 1, dlen
);
2501 set_address(env
, r2
, src
);
2502 set_length(env
, r2
+ 1, slen
);
2507 uint32_t HELPER(cu12
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2509 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2510 decode_utf8
, encode_utf16
);
2513 uint32_t HELPER(cu14
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2515 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2516 decode_utf8
, encode_utf32
);
2519 uint32_t HELPER(cu21
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2521 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2522 decode_utf16
, encode_utf8
);
2525 uint32_t HELPER(cu24
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2527 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2528 decode_utf16
, encode_utf32
);
2531 uint32_t HELPER(cu41
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2533 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2534 decode_utf32
, encode_utf8
);
2537 uint32_t HELPER(cu42
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2539 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2540 decode_utf32
, encode_utf16
);