2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "qemu/int128.h"
28 #include "qemu/atomic128.h"
30 #if !defined(CONFIG_USER_ONLY)
31 #include "hw/s390x/storage-keys.h"
34 /*****************************************************************************/
37 /* #define DEBUG_HELPER */
39 #define HELPER_LOG(x...) qemu_log(x)
41 #define HELPER_LOG(x...)
44 static inline bool psw_key_valid(CPUS390XState
*env
, uint8_t psw_key
)
46 uint16_t pkm
= env
->cregs
[3] >> 16;
48 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
49 /* PSW key has range 0..15, it is valid if the bit is 1 in the PKM */
50 return pkm
& (0x80 >> psw_key
);
55 /* Reduce the length so that addr + len doesn't cross a page boundary. */
56 static inline uint32_t adj_len_to_page(uint32_t len
, uint64_t addr
)
58 #ifndef CONFIG_USER_ONLY
59 if ((addr
& ~TARGET_PAGE_MASK
) + len
- 1 >= TARGET_PAGE_SIZE
) {
60 return -(addr
| TARGET_PAGE_MASK
);
66 /* Trigger a SPECIFICATION exception if an address or a length is not
68 static inline void check_alignment(CPUS390XState
*env
, uint64_t v
,
69 int wordsize
, uintptr_t ra
)
72 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
76 /* Load a value from memory according to its size. */
77 static inline uint64_t cpu_ldusize_data_ra(CPUS390XState
*env
, uint64_t addr
,
78 int wordsize
, uintptr_t ra
)
82 return cpu_ldub_data_ra(env
, addr
, ra
);
84 return cpu_lduw_data_ra(env
, addr
, ra
);
90 /* Store a to memory according to its size. */
91 static inline void cpu_stsize_data_ra(CPUS390XState
*env
, uint64_t addr
,
92 uint64_t value
, int wordsize
,
97 cpu_stb_data_ra(env
, addr
, value
, ra
);
100 cpu_stw_data_ra(env
, addr
, value
, ra
);
107 static void fast_memset(CPUS390XState
*env
, uint64_t dest
, uint8_t byte
,
108 uint32_t l
, uintptr_t ra
)
110 int mmu_idx
= cpu_mmu_index(env
, false);
113 void *p
= tlb_vaddr_to_host(env
, dest
, MMU_DATA_STORE
, mmu_idx
);
115 /* Access to the whole page in write mode granted. */
116 uint32_t l_adj
= adj_len_to_page(l
, dest
);
117 memset(p
, byte
, l_adj
);
121 /* We failed to get access to the whole page. The next write
122 access will likely fill the QEMU TLB for the next iteration. */
123 cpu_stb_data_ra(env
, dest
, byte
, ra
);
130 #ifndef CONFIG_USER_ONLY
131 static void fast_memmove_idx(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
132 uint32_t len
, int dest_idx
, int src_idx
,
135 TCGMemOpIdx oi_dest
= make_memop_idx(MO_UB
, dest_idx
);
136 TCGMemOpIdx oi_src
= make_memop_idx(MO_UB
, src_idx
);
143 src
= wrap_address(env
, src
);
144 dest
= wrap_address(env
, dest
);
145 src_p
= tlb_vaddr_to_host(env
, src
, MMU_DATA_LOAD
, src_idx
);
146 dest_p
= tlb_vaddr_to_host(env
, dest
, MMU_DATA_STORE
, dest_idx
);
148 if (src_p
&& dest_p
) {
149 /* Access to both whole pages granted. */
150 len_adj
= adj_len_to_page(adj_len_to_page(len
, src
), dest
);
151 memmove(dest_p
, src_p
, len_adj
);
153 /* We failed to get access to one or both whole pages. The next
154 read or write access will likely fill the QEMU TLB for the
157 x
= helper_ret_ldub_mmu(env
, src
, oi_src
, ra
);
158 helper_ret_stb_mmu(env
, dest
, x
, oi_dest
, ra
);
166 static int mmu_idx_from_as(uint8_t as
)
170 return MMU_PRIMARY_IDX
;
172 return MMU_SECONDARY_IDX
;
176 /* FIXME AS_ACCREG */
177 g_assert_not_reached();
181 static void fast_memmove_as(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
182 uint32_t len
, uint8_t dest_as
, uint8_t src_as
,
185 int src_idx
= mmu_idx_from_as(src_as
);
186 int dest_idx
= mmu_idx_from_as(dest_as
);
188 fast_memmove_idx(env
, dest
, src
, len
, dest_idx
, src_idx
, ra
);
192 static void fast_memmove(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
193 uint32_t l
, uintptr_t ra
)
195 int mmu_idx
= cpu_mmu_index(env
, false);
198 void *src_p
= tlb_vaddr_to_host(env
, src
, MMU_DATA_LOAD
, mmu_idx
);
199 void *dest_p
= tlb_vaddr_to_host(env
, dest
, MMU_DATA_STORE
, mmu_idx
);
200 if (src_p
&& dest_p
) {
201 /* Access to both whole pages granted. */
202 uint32_t l_adj
= adj_len_to_page(l
, src
);
203 l_adj
= adj_len_to_page(l_adj
, dest
);
204 memmove(dest_p
, src_p
, l_adj
);
209 /* We failed to get access to one or both whole pages. The next
210 read or write access will likely fill the QEMU TLB for the
212 cpu_stb_data_ra(env
, dest
, cpu_ldub_data_ra(env
, src
, ra
), ra
);
221 static uint32_t do_helper_nc(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
222 uint64_t src
, uintptr_t ra
)
227 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
228 __func__
, l
, dest
, src
);
230 for (i
= 0; i
<= l
; i
++) {
231 uint8_t x
= cpu_ldub_data_ra(env
, src
+ i
, ra
);
232 x
&= cpu_ldub_data_ra(env
, dest
+ i
, ra
);
234 cpu_stb_data_ra(env
, dest
+ i
, x
, ra
);
239 uint32_t HELPER(nc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
242 return do_helper_nc(env
, l
, dest
, src
, GETPC());
246 static uint32_t do_helper_xc(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
247 uint64_t src
, uintptr_t ra
)
252 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
253 __func__
, l
, dest
, src
);
255 /* xor with itself is the same as memset(0) */
257 fast_memset(env
, dest
, 0, l
+ 1, ra
);
261 for (i
= 0; i
<= l
; i
++) {
262 uint8_t x
= cpu_ldub_data_ra(env
, src
+ i
, ra
);
263 x
^= cpu_ldub_data_ra(env
, dest
+ i
, ra
);
265 cpu_stb_data_ra(env
, dest
+ i
, x
, ra
);
270 uint32_t HELPER(xc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
273 return do_helper_xc(env
, l
, dest
, src
, GETPC());
277 static uint32_t do_helper_oc(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
278 uint64_t src
, uintptr_t ra
)
283 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
284 __func__
, l
, dest
, src
);
286 for (i
= 0; i
<= l
; i
++) {
287 uint8_t x
= cpu_ldub_data_ra(env
, src
+ i
, ra
);
288 x
|= cpu_ldub_data_ra(env
, dest
+ i
, ra
);
290 cpu_stb_data_ra(env
, dest
+ i
, x
, ra
);
295 uint32_t HELPER(oc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
298 return do_helper_oc(env
, l
, dest
, src
, GETPC());
302 static uint32_t do_helper_mvc(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
303 uint64_t src
, uintptr_t ra
)
307 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
308 __func__
, l
, dest
, src
);
310 /* mvc and memmove do not behave the same when areas overlap! */
311 /* mvc with source pointing to the byte after the destination is the
312 same as memset with the first source byte */
313 if (dest
== src
+ 1) {
314 fast_memset(env
, dest
, cpu_ldub_data_ra(env
, src
, ra
), l
+ 1, ra
);
315 } else if (dest
< src
|| src
+ l
< dest
) {
316 fast_memmove(env
, dest
, src
, l
+ 1, ra
);
318 /* slow version with byte accesses which always work */
319 for (i
= 0; i
<= l
; i
++) {
320 uint8_t x
= cpu_ldub_data_ra(env
, src
+ i
, ra
);
321 cpu_stb_data_ra(env
, dest
+ i
, x
, ra
);
328 void HELPER(mvc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
330 do_helper_mvc(env
, l
, dest
, src
, GETPC());
334 void HELPER(mvcin
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
336 uintptr_t ra
= GETPC();
339 for (i
= 0; i
<= l
; i
++) {
340 uint8_t v
= cpu_ldub_data_ra(env
, src
- i
, ra
);
341 cpu_stb_data_ra(env
, dest
+ i
, v
, ra
);
346 void HELPER(mvn
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
348 uintptr_t ra
= GETPC();
351 for (i
= 0; i
<= l
; i
++) {
352 uint8_t v
= cpu_ldub_data_ra(env
, dest
+ i
, ra
) & 0xf0;
353 v
|= cpu_ldub_data_ra(env
, src
+ i
, ra
) & 0x0f;
354 cpu_stb_data_ra(env
, dest
+ i
, v
, ra
);
358 /* move with offset */
359 void HELPER(mvo
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
361 uintptr_t ra
= GETPC();
362 int len_dest
= l
>> 4;
363 int len_src
= l
& 0xf;
364 uint8_t byte_dest
, byte_src
;
370 /* Handle rightmost byte */
371 byte_src
= cpu_ldub_data_ra(env
, src
, ra
);
372 byte_dest
= cpu_ldub_data_ra(env
, dest
, ra
);
373 byte_dest
= (byte_dest
& 0x0f) | (byte_src
<< 4);
374 cpu_stb_data_ra(env
, dest
, byte_dest
, ra
);
376 /* Process remaining bytes from right to left */
377 for (i
= 1; i
<= len_dest
; i
++) {
378 byte_dest
= byte_src
>> 4;
379 if (len_src
- i
>= 0) {
380 byte_src
= cpu_ldub_data_ra(env
, src
- i
, ra
);
384 byte_dest
|= byte_src
<< 4;
385 cpu_stb_data_ra(env
, dest
- i
, byte_dest
, ra
);
390 void HELPER(mvz
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
392 uintptr_t ra
= GETPC();
395 for (i
= 0; i
<= l
; i
++) {
396 uint8_t b
= cpu_ldub_data_ra(env
, dest
+ i
, ra
) & 0x0f;
397 b
|= cpu_ldub_data_ra(env
, src
+ i
, ra
) & 0xf0;
398 cpu_stb_data_ra(env
, dest
+ i
, b
, ra
);
402 /* compare unsigned byte arrays */
403 static uint32_t do_helper_clc(CPUS390XState
*env
, uint32_t l
, uint64_t s1
,
404 uint64_t s2
, uintptr_t ra
)
409 HELPER_LOG("%s l %d s1 %" PRIx64
" s2 %" PRIx64
"\n",
410 __func__
, l
, s1
, s2
);
412 for (i
= 0; i
<= l
; i
++) {
413 uint8_t x
= cpu_ldub_data_ra(env
, s1
+ i
, ra
);
414 uint8_t y
= cpu_ldub_data_ra(env
, s2
+ i
, ra
);
415 HELPER_LOG("%02x (%c)/%02x (%c) ", x
, x
, y
, y
);
429 uint32_t HELPER(clc
)(CPUS390XState
*env
, uint32_t l
, uint64_t s1
, uint64_t s2
)
431 return do_helper_clc(env
, l
, s1
, s2
, GETPC());
434 /* compare logical under mask */
435 uint32_t HELPER(clm
)(CPUS390XState
*env
, uint32_t r1
, uint32_t mask
,
438 uintptr_t ra
= GETPC();
441 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64
"\n", __func__
, r1
,
446 uint8_t d
= cpu_ldub_data_ra(env
, addr
, ra
);
447 uint8_t r
= extract32(r1
, 24, 8);
448 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64
") ", mask
, r
, d
,
459 mask
= (mask
<< 1) & 0xf;
467 static inline uint64_t get_address(CPUS390XState
*env
, int reg
)
469 return wrap_address(env
, env
->regs
[reg
]);
472 static inline void set_address(CPUS390XState
*env
, int reg
, uint64_t address
)
474 if (env
->psw
.mask
& PSW_MASK_64
) {
476 env
->regs
[reg
] = address
;
478 if (!(env
->psw
.mask
& PSW_MASK_32
)) {
479 /* 24-Bit mode. According to the PoO it is implementation
480 dependent if bits 32-39 remain unchanged or are set to
481 zeros. Choose the former so that the function can also be
483 env
->regs
[reg
] = deposit64(env
->regs
[reg
], 0, 24, address
);
485 /* 31-Bit mode. According to the PoO it is implementation
486 dependent if bit 32 remains unchanged or is set to zero.
487 Choose the latter so that the function can also be used for
489 address
&= 0x7fffffff;
490 env
->regs
[reg
] = deposit64(env
->regs
[reg
], 0, 32, address
);
495 static inline uint64_t wrap_length(CPUS390XState
*env
, uint64_t length
)
497 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
498 /* 24-Bit and 31-Bit mode */
499 length
&= 0x7fffffff;
504 static inline uint64_t get_length(CPUS390XState
*env
, int reg
)
506 return wrap_length(env
, env
->regs
[reg
]);
509 static inline void set_length(CPUS390XState
*env
, int reg
, uint64_t length
)
511 if (env
->psw
.mask
& PSW_MASK_64
) {
513 env
->regs
[reg
] = length
;
515 /* 24-Bit and 31-Bit mode */
516 env
->regs
[reg
] = deposit64(env
->regs
[reg
], 0, 32, length
);
520 /* search string (c is byte to search, r2 is string, r1 end of string) */
521 void HELPER(srst
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
523 uintptr_t ra
= GETPC();
526 uint8_t v
, c
= env
->regs
[0];
528 /* Bits 32-55 must contain all 0. */
529 if (env
->regs
[0] & 0xffffff00u
) {
530 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
533 str
= get_address(env
, r2
);
534 end
= get_address(env
, r1
);
536 /* Lest we fail to service interrupts in a timely manner, limit the
537 amount of work we're willing to do. For now, let's cap at 8k. */
538 for (len
= 0; len
< 0x2000; ++len
) {
539 if (str
+ len
== end
) {
540 /* Character not found. R1 & R2 are unmodified. */
544 v
= cpu_ldub_data_ra(env
, str
+ len
, ra
);
546 /* Character found. Set R1 to the location; R2 is unmodified. */
548 set_address(env
, r1
, str
+ len
);
553 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
555 set_address(env
, r2
, str
+ len
);
558 void HELPER(srstu
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
560 uintptr_t ra
= GETPC();
562 uint16_t v
, c
= env
->regs
[0];
563 uint64_t end
, str
, adj_end
;
565 /* Bits 32-47 of R0 must be zero. */
566 if (env
->regs
[0] & 0xffff0000u
) {
567 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
570 str
= get_address(env
, r2
);
571 end
= get_address(env
, r1
);
573 /* If the LSB of the two addresses differ, use one extra byte. */
574 adj_end
= end
+ ((str
^ end
) & 1);
576 /* Lest we fail to service interrupts in a timely manner, limit the
577 amount of work we're willing to do. For now, let's cap at 8k. */
578 for (len
= 0; len
< 0x2000; len
+= 2) {
579 if (str
+ len
== adj_end
) {
580 /* End of input found. */
584 v
= cpu_lduw_data_ra(env
, str
+ len
, ra
);
586 /* Character found. Set R1 to the location; R2 is unmodified. */
588 set_address(env
, r1
, str
+ len
);
593 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
595 set_address(env
, r2
, str
+ len
);
598 /* unsigned string compare (c is string terminator) */
599 uint64_t HELPER(clst
)(CPUS390XState
*env
, uint64_t c
, uint64_t s1
, uint64_t s2
)
601 uintptr_t ra
= GETPC();
605 s1
= wrap_address(env
, s1
);
606 s2
= wrap_address(env
, s2
);
608 /* Lest we fail to service interrupts in a timely manner, limit the
609 amount of work we're willing to do. For now, let's cap at 8k. */
610 for (len
= 0; len
< 0x2000; ++len
) {
611 uint8_t v1
= cpu_ldub_data_ra(env
, s1
+ len
, ra
);
612 uint8_t v2
= cpu_ldub_data_ra(env
, s2
+ len
, ra
);
615 /* Equal. CC=0, and don't advance the registers. */
621 /* Unequal. CC={1,2}, and advance the registers. Note that
622 the terminator need not be zero, but the string that contains
623 the terminator is by definition "low". */
624 env
->cc_op
= (v1
== c
? 1 : v2
== c
? 2 : v1
< v2
? 1 : 2);
625 env
->retxl
= s2
+ len
;
630 /* CPU-determined bytes equal; advance the registers. */
632 env
->retxl
= s2
+ len
;
637 uint32_t HELPER(mvpg
)(CPUS390XState
*env
, uint64_t r0
, uint64_t r1
, uint64_t r2
)
639 /* ??? missing r0 handling, which includes access keys, but more
640 importantly optional suppression of the exception! */
641 fast_memmove(env
, r1
, r2
, TARGET_PAGE_SIZE
, GETPC());
642 return 0; /* data moved */
645 /* string copy (c is string terminator) */
646 uint64_t HELPER(mvst
)(CPUS390XState
*env
, uint64_t c
, uint64_t d
, uint64_t s
)
648 uintptr_t ra
= GETPC();
652 d
= wrap_address(env
, d
);
653 s
= wrap_address(env
, s
);
655 /* Lest we fail to service interrupts in a timely manner, limit the
656 amount of work we're willing to do. For now, let's cap at 8k. */
657 for (len
= 0; len
< 0x2000; ++len
) {
658 uint8_t v
= cpu_ldub_data_ra(env
, s
+ len
, ra
);
659 cpu_stb_data_ra(env
, d
+ len
, v
, ra
);
661 /* Complete. Set CC=1 and advance R1. */
668 /* Incomplete. Set CC=3 and signal to advance R1 and R2. */
670 env
->retxl
= s
+ len
;
674 /* load access registers r1 to r3 from memory at a2 */
675 void HELPER(lam
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
677 uintptr_t ra
= GETPC();
681 /* we either came here by lam or lamy, which have different lengths */
682 s390_program_interrupt(env
, PGM_SPECIFICATION
, ILEN_AUTO
, ra
);
685 for (i
= r1
;; i
= (i
+ 1) % 16) {
686 env
->aregs
[i
] = cpu_ldl_data_ra(env
, a2
, ra
);
695 /* store access registers r1 to r3 in memory at a2 */
696 void HELPER(stam
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
698 uintptr_t ra
= GETPC();
702 s390_program_interrupt(env
, PGM_SPECIFICATION
, 4, ra
);
705 for (i
= r1
;; i
= (i
+ 1) % 16) {
706 cpu_stl_data_ra(env
, a2
, env
->aregs
[i
], ra
);
715 /* move long helper */
716 static inline uint32_t do_mvcl(CPUS390XState
*env
,
717 uint64_t *dest
, uint64_t *destlen
,
718 uint64_t *src
, uint64_t *srclen
,
719 uint16_t pad
, int wordsize
, uintptr_t ra
)
721 uint64_t len
= MIN(*srclen
, *destlen
);
724 if (*destlen
== *srclen
) {
726 } else if (*destlen
< *srclen
) {
732 /* Copy the src array */
733 fast_memmove(env
, *dest
, *src
, len
, ra
);
739 /* Pad the remaining area */
741 fast_memset(env
, *dest
, pad
, *destlen
, ra
);
745 /* If remaining length is odd, pad with odd byte first. */
747 cpu_stb_data_ra(env
, *dest
, pad
& 0xff, ra
);
751 /* The remaining length is even, pad using words. */
752 for (; *destlen
; *dest
+= 2, *destlen
-= 2) {
753 cpu_stw_data_ra(env
, *dest
, pad
, ra
);
761 uint32_t HELPER(mvcl
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
763 uintptr_t ra
= GETPC();
764 uint64_t destlen
= env
->regs
[r1
+ 1] & 0xffffff;
765 uint64_t dest
= get_address(env
, r1
);
766 uint64_t srclen
= env
->regs
[r2
+ 1] & 0xffffff;
767 uint64_t src
= get_address(env
, r2
);
768 uint8_t pad
= env
->regs
[r2
+ 1] >> 24;
771 cc
= do_mvcl(env
, &dest
, &destlen
, &src
, &srclen
, pad
, 1, ra
);
773 env
->regs
[r1
+ 1] = deposit64(env
->regs
[r1
+ 1], 0, 24, destlen
);
774 env
->regs
[r2
+ 1] = deposit64(env
->regs
[r2
+ 1], 0, 24, srclen
);
775 set_address(env
, r1
, dest
);
776 set_address(env
, r2
, src
);
781 /* move long extended */
782 uint32_t HELPER(mvcle
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
785 uintptr_t ra
= GETPC();
786 uint64_t destlen
= get_length(env
, r1
+ 1);
787 uint64_t dest
= get_address(env
, r1
);
788 uint64_t srclen
= get_length(env
, r3
+ 1);
789 uint64_t src
= get_address(env
, r3
);
793 cc
= do_mvcl(env
, &dest
, &destlen
, &src
, &srclen
, pad
, 1, ra
);
795 set_length(env
, r1
+ 1, destlen
);
796 set_length(env
, r3
+ 1, srclen
);
797 set_address(env
, r1
, dest
);
798 set_address(env
, r3
, src
);
803 /* move long unicode */
804 uint32_t HELPER(mvclu
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
807 uintptr_t ra
= GETPC();
808 uint64_t destlen
= get_length(env
, r1
+ 1);
809 uint64_t dest
= get_address(env
, r1
);
810 uint64_t srclen
= get_length(env
, r3
+ 1);
811 uint64_t src
= get_address(env
, r3
);
815 cc
= do_mvcl(env
, &dest
, &destlen
, &src
, &srclen
, pad
, 2, ra
);
817 set_length(env
, r1
+ 1, destlen
);
818 set_length(env
, r3
+ 1, srclen
);
819 set_address(env
, r1
, dest
);
820 set_address(env
, r3
, src
);
825 /* compare logical long helper */
826 static inline uint32_t do_clcl(CPUS390XState
*env
,
827 uint64_t *src1
, uint64_t *src1len
,
828 uint64_t *src3
, uint64_t *src3len
,
829 uint16_t pad
, uint64_t limit
,
830 int wordsize
, uintptr_t ra
)
832 uint64_t len
= MAX(*src1len
, *src3len
);
835 check_alignment(env
, *src1len
| *src3len
, wordsize
, ra
);
841 /* Lest we fail to service interrupts in a timely manner, limit the
842 amount of work we're willing to do. */
848 for (; len
; len
-= wordsize
) {
853 v1
= cpu_ldusize_data_ra(env
, *src1
, wordsize
, ra
);
856 v3
= cpu_ldusize_data_ra(env
, *src3
, wordsize
, ra
);
860 cc
= (v1
< v3
) ? 1 : 2;
866 *src1len
-= wordsize
;
870 *src3len
-= wordsize
;
878 /* compare logical long */
879 uint32_t HELPER(clcl
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
881 uintptr_t ra
= GETPC();
882 uint64_t src1len
= extract64(env
->regs
[r1
+ 1], 0, 24);
883 uint64_t src1
= get_address(env
, r1
);
884 uint64_t src3len
= extract64(env
->regs
[r2
+ 1], 0, 24);
885 uint64_t src3
= get_address(env
, r2
);
886 uint8_t pad
= env
->regs
[r2
+ 1] >> 24;
889 cc
= do_clcl(env
, &src1
, &src1len
, &src3
, &src3len
, pad
, -1, 1, ra
);
891 env
->regs
[r1
+ 1] = deposit64(env
->regs
[r1
+ 1], 0, 24, src1len
);
892 env
->regs
[r2
+ 1] = deposit64(env
->regs
[r2
+ 1], 0, 24, src3len
);
893 set_address(env
, r1
, src1
);
894 set_address(env
, r2
, src3
);
899 /* compare logical long extended memcompare insn with padding */
900 uint32_t HELPER(clcle
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
903 uintptr_t ra
= GETPC();
904 uint64_t src1len
= get_length(env
, r1
+ 1);
905 uint64_t src1
= get_address(env
, r1
);
906 uint64_t src3len
= get_length(env
, r3
+ 1);
907 uint64_t src3
= get_address(env
, r3
);
911 cc
= do_clcl(env
, &src1
, &src1len
, &src3
, &src3len
, pad
, 0x2000, 1, ra
);
913 set_length(env
, r1
+ 1, src1len
);
914 set_length(env
, r3
+ 1, src3len
);
915 set_address(env
, r1
, src1
);
916 set_address(env
, r3
, src3
);
921 /* compare logical long unicode memcompare insn with padding */
922 uint32_t HELPER(clclu
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
925 uintptr_t ra
= GETPC();
926 uint64_t src1len
= get_length(env
, r1
+ 1);
927 uint64_t src1
= get_address(env
, r1
);
928 uint64_t src3len
= get_length(env
, r3
+ 1);
929 uint64_t src3
= get_address(env
, r3
);
933 cc
= do_clcl(env
, &src1
, &src1len
, &src3
, &src3len
, pad
, 0x1000, 2, ra
);
935 set_length(env
, r1
+ 1, src1len
);
936 set_length(env
, r3
+ 1, src3len
);
937 set_address(env
, r1
, src1
);
938 set_address(env
, r3
, src3
);
944 uint64_t HELPER(cksm
)(CPUS390XState
*env
, uint64_t r1
,
945 uint64_t src
, uint64_t src_len
)
947 uintptr_t ra
= GETPC();
948 uint64_t max_len
, len
;
949 uint64_t cksm
= (uint32_t)r1
;
951 /* Lest we fail to service interrupts in a timely manner, limit the
952 amount of work we're willing to do. For now, let's cap at 8k. */
953 max_len
= (src_len
> 0x2000 ? 0x2000 : src_len
);
955 /* Process full words as available. */
956 for (len
= 0; len
+ 4 <= max_len
; len
+= 4, src
+= 4) {
957 cksm
+= (uint32_t)cpu_ldl_data_ra(env
, src
, ra
);
960 switch (max_len
- len
) {
962 cksm
+= cpu_ldub_data_ra(env
, src
, ra
) << 24;
966 cksm
+= cpu_lduw_data_ra(env
, src
, ra
) << 16;
970 cksm
+= cpu_lduw_data_ra(env
, src
, ra
) << 16;
971 cksm
+= cpu_ldub_data_ra(env
, src
+ 2, ra
) << 8;
976 /* Fold the carry from the checksum. Note that we can see carry-out
977 during folding more than once (but probably not more than twice). */
978 while (cksm
> 0xffffffffull
) {
979 cksm
= (uint32_t)cksm
+ (cksm
>> 32);
982 /* Indicate whether or not we've processed everything. */
983 env
->cc_op
= (len
== src_len
? 0 : 3);
985 /* Return both cksm and processed length. */
990 void HELPER(pack
)(CPUS390XState
*env
, uint32_t len
, uint64_t dest
, uint64_t src
)
992 uintptr_t ra
= GETPC();
993 int len_dest
= len
>> 4;
994 int len_src
= len
& 0xf;
1000 /* last byte is special, it only flips the nibbles */
1001 b
= cpu_ldub_data_ra(env
, src
, ra
);
1002 cpu_stb_data_ra(env
, dest
, (b
<< 4) | (b
>> 4), ra
);
1006 /* now pack every value */
1007 while (len_dest
> 0) {
1011 b
= cpu_ldub_data_ra(env
, src
, ra
) & 0x0f;
1016 b
|= cpu_ldub_data_ra(env
, src
, ra
) << 4;
1023 cpu_stb_data_ra(env
, dest
, b
, ra
);
1027 static inline void do_pkau(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
1028 uint32_t srclen
, int ssize
, uintptr_t ra
)
1031 /* The destination operand is always 16 bytes long. */
1032 const int destlen
= 16;
1034 /* The operands are processed from right to left. */
1036 dest
+= destlen
- 1;
1038 for (i
= 0; i
< destlen
; i
++) {
1041 /* Start with a positive sign */
1044 } else if (srclen
> ssize
) {
1045 b
= cpu_ldub_data_ra(env
, src
, ra
) & 0x0f;
1050 if (srclen
> ssize
) {
1051 b
|= cpu_ldub_data_ra(env
, src
, ra
) << 4;
1056 cpu_stb_data_ra(env
, dest
, b
, ra
);
1062 void HELPER(pka
)(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
1065 do_pkau(env
, dest
, src
, srclen
, 1, GETPC());
1068 void HELPER(pku
)(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
1071 do_pkau(env
, dest
, src
, srclen
, 2, GETPC());
1074 void HELPER(unpk
)(CPUS390XState
*env
, uint32_t len
, uint64_t dest
,
1077 uintptr_t ra
= GETPC();
1078 int len_dest
= len
>> 4;
1079 int len_src
= len
& 0xf;
1081 int second_nibble
= 0;
1086 /* last byte is special, it only flips the nibbles */
1087 b
= cpu_ldub_data_ra(env
, src
, ra
);
1088 cpu_stb_data_ra(env
, dest
, (b
<< 4) | (b
>> 4), ra
);
1092 /* now pad every nibble with 0xf0 */
1094 while (len_dest
> 0) {
1095 uint8_t cur_byte
= 0;
1098 cur_byte
= cpu_ldub_data_ra(env
, src
, ra
);
1104 /* only advance one nibble at a time */
1105 if (second_nibble
) {
1110 second_nibble
= !second_nibble
;
1113 cur_byte
= (cur_byte
& 0xf);
1117 cpu_stb_data_ra(env
, dest
, cur_byte
, ra
);
1121 static inline uint32_t do_unpkau(CPUS390XState
*env
, uint64_t dest
,
1122 uint32_t destlen
, int dsize
, uint64_t src
,
1128 /* The source operand is always 16 bytes long. */
1129 const int srclen
= 16;
1131 /* The operands are processed from right to left. */
1133 dest
+= destlen
- dsize
;
1135 /* Check for the sign. */
1136 b
= cpu_ldub_data_ra(env
, src
, ra
);
1150 cc
= 3; /* invalid */
1154 /* Now pad every nibble with 0x30, advancing one nibble at a time. */
1155 for (i
= 0; i
< destlen
; i
+= dsize
) {
1156 if (i
== (31 * dsize
)) {
1157 /* If length is 32/64 bytes, the leftmost byte is 0. */
1159 } else if (i
% (2 * dsize
)) {
1160 b
= cpu_ldub_data_ra(env
, src
, ra
);
1165 cpu_stsize_data_ra(env
, dest
, 0x30 + (b
& 0xf), dsize
, ra
);
1172 uint32_t HELPER(unpka
)(CPUS390XState
*env
, uint64_t dest
, uint32_t destlen
,
1175 return do_unpkau(env
, dest
, destlen
, 1, src
, GETPC());
1178 uint32_t HELPER(unpku
)(CPUS390XState
*env
, uint64_t dest
, uint32_t destlen
,
1181 return do_unpkau(env
, dest
, destlen
, 2, src
, GETPC());
1184 uint32_t HELPER(tp
)(CPUS390XState
*env
, uint64_t dest
, uint32_t destlen
)
1186 uintptr_t ra
= GETPC();
1190 for (i
= 0; i
< destlen
; i
++) {
1191 uint8_t b
= cpu_ldub_data_ra(env
, dest
+ i
, ra
);
1193 cc
|= (b
& 0xf0) > 0x90 ? 2 : 0;
1195 if (i
== (destlen
- 1)) {
1197 cc
|= (b
& 0xf) < 0xa ? 1 : 0;
1200 cc
|= (b
& 0xf) > 0x9 ? 2 : 0;
1207 static uint32_t do_helper_tr(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
1208 uint64_t trans
, uintptr_t ra
)
1212 for (i
= 0; i
<= len
; i
++) {
1213 uint8_t byte
= cpu_ldub_data_ra(env
, array
+ i
, ra
);
1214 uint8_t new_byte
= cpu_ldub_data_ra(env
, trans
+ byte
, ra
);
1215 cpu_stb_data_ra(env
, array
+ i
, new_byte
, ra
);
1221 void HELPER(tr
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
1224 do_helper_tr(env
, len
, array
, trans
, GETPC());
1227 uint64_t HELPER(tre
)(CPUS390XState
*env
, uint64_t array
,
1228 uint64_t len
, uint64_t trans
)
1230 uintptr_t ra
= GETPC();
1231 uint8_t end
= env
->regs
[0] & 0xff;
1236 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
1237 array
&= 0x7fffffff;
1241 /* Lest we fail to service interrupts in a timely manner, limit the
1242 amount of work we're willing to do. For now, let's cap at 8k. */
1248 for (i
= 0; i
< l
; i
++) {
1249 uint8_t byte
, new_byte
;
1251 byte
= cpu_ldub_data_ra(env
, array
+ i
, ra
);
1258 new_byte
= cpu_ldub_data_ra(env
, trans
+ byte
, ra
);
1259 cpu_stb_data_ra(env
, array
+ i
, new_byte
, ra
);
1263 env
->retxl
= len
- i
;
1267 static inline uint32_t do_helper_trt(CPUS390XState
*env
, int len
,
1268 uint64_t array
, uint64_t trans
,
1269 int inc
, uintptr_t ra
)
1273 for (i
= 0; i
<= len
; i
++) {
1274 uint8_t byte
= cpu_ldub_data_ra(env
, array
+ i
* inc
, ra
);
1275 uint8_t sbyte
= cpu_ldub_data_ra(env
, trans
+ byte
, ra
);
1278 set_address(env
, 1, array
+ i
* inc
);
1279 env
->regs
[2] = deposit64(env
->regs
[2], 0, 8, sbyte
);
1280 return (i
== len
) ? 2 : 1;
1287 static uint32_t do_helper_trt_fwd(CPUS390XState
*env
, uint32_t len
,
1288 uint64_t array
, uint64_t trans
,
1291 return do_helper_trt(env
, len
, array
, trans
, 1, ra
);
1294 uint32_t HELPER(trt
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
1297 return do_helper_trt(env
, len
, array
, trans
, 1, GETPC());
1300 static uint32_t do_helper_trt_bkwd(CPUS390XState
*env
, uint32_t len
,
1301 uint64_t array
, uint64_t trans
,
1304 return do_helper_trt(env
, len
, array
, trans
, -1, ra
);
1307 uint32_t HELPER(trtr
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
1310 return do_helper_trt(env
, len
, array
, trans
, -1, GETPC());
1313 /* Translate one/two to one/two */
1314 uint32_t HELPER(trXX
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
,
1315 uint32_t tst
, uint32_t sizes
)
1317 uintptr_t ra
= GETPC();
1318 int dsize
= (sizes
& 1) ? 1 : 2;
1319 int ssize
= (sizes
& 2) ? 1 : 2;
1320 uint64_t tbl
= get_address(env
, 1);
1321 uint64_t dst
= get_address(env
, r1
);
1322 uint64_t len
= get_length(env
, r1
+ 1);
1323 uint64_t src
= get_address(env
, r2
);
1327 /* The lower address bits of TBL are ignored. For TROO, TROT, it's
1328 the low 3 bits (double-word aligned). For TRTO, TRTT, it's either
1329 the low 12 bits (4K, without ETF2-ENH) or 3 bits (with ETF2-ENH). */
1330 if (ssize
== 2 && !s390_has_feat(S390_FEAT_ETF2_ENH
)) {
1336 check_alignment(env
, len
, ssize
, ra
);
1338 /* Lest we fail to service interrupts in a timely manner, */
1339 /* limit the amount of work we're willing to do. */
1340 for (i
= 0; i
< 0x2000; i
++) {
1341 uint16_t sval
= cpu_ldusize_data_ra(env
, src
, ssize
, ra
);
1342 uint64_t tble
= tbl
+ (sval
* dsize
);
1343 uint16_t dval
= cpu_ldusize_data_ra(env
, tble
, dsize
, ra
);
1348 cpu_stsize_data_ra(env
, dst
, dval
, dsize
, ra
);
1360 set_address(env
, r1
, dst
);
1361 set_length(env
, r1
+ 1, len
);
1362 set_address(env
, r2
, src
);
1367 void HELPER(cdsg
)(CPUS390XState
*env
, uint64_t addr
,
1368 uint32_t r1
, uint32_t r3
)
1370 uintptr_t ra
= GETPC();
1371 Int128 cmpv
= int128_make128(env
->regs
[r1
+ 1], env
->regs
[r1
]);
1372 Int128 newv
= int128_make128(env
->regs
[r3
+ 1], env
->regs
[r3
]);
1374 uint64_t oldh
, oldl
;
1377 check_alignment(env
, addr
, 16, ra
);
1379 oldh
= cpu_ldq_data_ra(env
, addr
+ 0, ra
);
1380 oldl
= cpu_ldq_data_ra(env
, addr
+ 8, ra
);
1382 oldv
= int128_make128(oldl
, oldh
);
1383 fail
= !int128_eq(oldv
, cmpv
);
1388 cpu_stq_data_ra(env
, addr
+ 0, int128_gethi(newv
), ra
);
1389 cpu_stq_data_ra(env
, addr
+ 8, int128_getlo(newv
), ra
);
1392 env
->regs
[r1
] = int128_gethi(oldv
);
1393 env
->regs
[r1
+ 1] = int128_getlo(oldv
);
1396 void HELPER(cdsg_parallel
)(CPUS390XState
*env
, uint64_t addr
,
1397 uint32_t r1
, uint32_t r3
)
1399 uintptr_t ra
= GETPC();
1400 Int128 cmpv
= int128_make128(env
->regs
[r1
+ 1], env
->regs
[r1
]);
1401 Int128 newv
= int128_make128(env
->regs
[r3
+ 1], env
->regs
[r3
]);
1407 assert(HAVE_CMPXCHG128
);
1409 mem_idx
= cpu_mmu_index(env
, false);
1410 oi
= make_memop_idx(MO_TEQ
| MO_ALIGN_16
, mem_idx
);
1411 oldv
= helper_atomic_cmpxchgo_be_mmu(env
, addr
, cmpv
, newv
, oi
, ra
);
1412 fail
= !int128_eq(oldv
, cmpv
);
1415 env
->regs
[r1
] = int128_gethi(oldv
);
1416 env
->regs
[r1
+ 1] = int128_getlo(oldv
);
1419 static uint32_t do_csst(CPUS390XState
*env
, uint32_t r3
, uint64_t a1
,
1420 uint64_t a2
, bool parallel
)
1422 uint32_t mem_idx
= cpu_mmu_index(env
, false);
1423 uintptr_t ra
= GETPC();
1424 uint32_t fc
= extract32(env
->regs
[0], 0, 8);
1425 uint32_t sc
= extract32(env
->regs
[0], 8, 8);
1426 uint64_t pl
= get_address(env
, 1) & -16;
1430 /* Sanity check the function code and storage characteristic. */
1431 if (fc
> 1 || sc
> 3) {
1432 if (!s390_has_feat(S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2
)) {
1433 goto spec_exception
;
1435 if (fc
> 2 || sc
> 4 || (fc
== 2 && (r3
& 1))) {
1436 goto spec_exception
;
1440 /* Sanity check the alignments. */
1441 if (extract32(a1
, 0, fc
+ 2) || extract32(a2
, 0, sc
)) {
1442 goto spec_exception
;
1445 /* Sanity check writability of the store address. */
1446 #ifndef CONFIG_USER_ONLY
1447 probe_write(env
, a2
, 0, mem_idx
, ra
);
1451 * Note that the compare-and-swap is atomic, and the store is atomic,
1452 * but the complete operation is not. Therefore we do not need to
1453 * assert serial context in order to implement this. That said,
1454 * restart early if we can't support either operation that is supposed
1459 #ifdef CONFIG_ATOMIC64
1462 if ((HAVE_CMPXCHG128
? 0 : fc
+ 2 > max
) ||
1463 (HAVE_ATOMIC128
? 0 : sc
> max
)) {
1464 cpu_loop_exit_atomic(env_cpu(env
), ra
);
1468 /* All loads happen before all stores. For simplicity, load the entire
1469 store value area from the parameter list. */
1470 svh
= cpu_ldq_data_ra(env
, pl
+ 16, ra
);
1471 svl
= cpu_ldq_data_ra(env
, pl
+ 24, ra
);
1476 uint32_t nv
= cpu_ldl_data_ra(env
, pl
, ra
);
1477 uint32_t cv
= env
->regs
[r3
];
1481 #ifdef CONFIG_USER_ONLY
1482 uint32_t *haddr
= g2h(a1
);
1483 ov
= atomic_cmpxchg__nocheck(haddr
, cv
, nv
);
1485 TCGMemOpIdx oi
= make_memop_idx(MO_TEUL
| MO_ALIGN
, mem_idx
);
1486 ov
= helper_atomic_cmpxchgl_be_mmu(env
, a1
, cv
, nv
, oi
, ra
);
1489 ov
= cpu_ldl_data_ra(env
, a1
, ra
);
1490 cpu_stl_data_ra(env
, a1
, (ov
== cv
? nv
: ov
), ra
);
1493 env
->regs
[r3
] = deposit64(env
->regs
[r3
], 32, 32, ov
);
1499 uint64_t nv
= cpu_ldq_data_ra(env
, pl
, ra
);
1500 uint64_t cv
= env
->regs
[r3
];
1504 #ifdef CONFIG_ATOMIC64
1505 # ifdef CONFIG_USER_ONLY
1506 uint64_t *haddr
= g2h(a1
);
1507 ov
= atomic_cmpxchg__nocheck(haddr
, cv
, nv
);
1509 TCGMemOpIdx oi
= make_memop_idx(MO_TEQ
| MO_ALIGN
, mem_idx
);
1510 ov
= helper_atomic_cmpxchgq_be_mmu(env
, a1
, cv
, nv
, oi
, ra
);
1513 /* Note that we asserted !parallel above. */
1514 g_assert_not_reached();
1517 ov
= cpu_ldq_data_ra(env
, a1
, ra
);
1518 cpu_stq_data_ra(env
, a1
, (ov
== cv
? nv
: ov
), ra
);
1527 uint64_t nvh
= cpu_ldq_data_ra(env
, pl
, ra
);
1528 uint64_t nvl
= cpu_ldq_data_ra(env
, pl
+ 8, ra
);
1529 Int128 nv
= int128_make128(nvl
, nvh
);
1530 Int128 cv
= int128_make128(env
->regs
[r3
+ 1], env
->regs
[r3
]);
1534 uint64_t oh
= cpu_ldq_data_ra(env
, a1
+ 0, ra
);
1535 uint64_t ol
= cpu_ldq_data_ra(env
, a1
+ 8, ra
);
1537 ov
= int128_make128(ol
, oh
);
1538 cc
= !int128_eq(ov
, cv
);
1543 cpu_stq_data_ra(env
, a1
+ 0, int128_gethi(nv
), ra
);
1544 cpu_stq_data_ra(env
, a1
+ 8, int128_getlo(nv
), ra
);
1545 } else if (HAVE_CMPXCHG128
) {
1546 TCGMemOpIdx oi
= make_memop_idx(MO_TEQ
| MO_ALIGN_16
, mem_idx
);
1547 ov
= helper_atomic_cmpxchgo_be_mmu(env
, a1
, cv
, nv
, oi
, ra
);
1548 cc
= !int128_eq(ov
, cv
);
1550 /* Note that we asserted !parallel above. */
1551 g_assert_not_reached();
1554 env
->regs
[r3
+ 0] = int128_gethi(ov
);
1555 env
->regs
[r3
+ 1] = int128_getlo(ov
);
1560 g_assert_not_reached();
1563 /* Store only if the comparison succeeded. Note that above we use a pair
1564 of 64-bit big-endian loads, so for sc < 3 we must extract the value
1565 from the most-significant bits of svh. */
1569 cpu_stb_data_ra(env
, a2
, svh
>> 56, ra
);
1572 cpu_stw_data_ra(env
, a2
, svh
>> 48, ra
);
1575 cpu_stl_data_ra(env
, a2
, svh
>> 32, ra
);
1578 cpu_stq_data_ra(env
, a2
, svh
, ra
);
1582 cpu_stq_data_ra(env
, a2
+ 0, svh
, ra
);
1583 cpu_stq_data_ra(env
, a2
+ 8, svl
, ra
);
1584 } else if (HAVE_ATOMIC128
) {
1585 TCGMemOpIdx oi
= make_memop_idx(MO_TEQ
| MO_ALIGN_16
, mem_idx
);
1586 Int128 sv
= int128_make128(svl
, svh
);
1587 helper_atomic_sto_be_mmu(env
, a2
, sv
, oi
, ra
);
1589 /* Note that we asserted !parallel above. */
1590 g_assert_not_reached();
1594 g_assert_not_reached();
1601 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
1602 g_assert_not_reached();
1605 uint32_t HELPER(csst
)(CPUS390XState
*env
, uint32_t r3
, uint64_t a1
, uint64_t a2
)
1607 return do_csst(env
, r3
, a1
, a2
, false);
1610 uint32_t HELPER(csst_parallel
)(CPUS390XState
*env
, uint32_t r3
, uint64_t a1
,
1613 return do_csst(env
, r3
, a1
, a2
, true);
1616 #if !defined(CONFIG_USER_ONLY)
1617 void HELPER(lctlg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
1619 uintptr_t ra
= GETPC();
1620 bool PERchanged
= false;
1625 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
1628 for (i
= r1
;; i
= (i
+ 1) % 16) {
1629 uint64_t val
= cpu_ldq_data_ra(env
, src
, ra
);
1630 if (env
->cregs
[i
] != val
&& i
>= 9 && i
<= 11) {
1633 env
->cregs
[i
] = val
;
1634 HELPER_LOG("load ctl %d from 0x%" PRIx64
" == 0x%" PRIx64
"\n",
1636 src
+= sizeof(uint64_t);
1643 if (PERchanged
&& env
->psw
.mask
& PSW_MASK_PER
) {
1644 s390_cpu_recompute_watchpoints(env_cpu(env
));
1647 tlb_flush(env_cpu(env
));
1650 void HELPER(lctl
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
1652 uintptr_t ra
= GETPC();
1653 bool PERchanged
= false;
1658 s390_program_interrupt(env
, PGM_SPECIFICATION
, 4, ra
);
1661 for (i
= r1
;; i
= (i
+ 1) % 16) {
1662 uint32_t val
= cpu_ldl_data_ra(env
, src
, ra
);
1663 if ((uint32_t)env
->cregs
[i
] != val
&& i
>= 9 && i
<= 11) {
1666 env
->cregs
[i
] = deposit64(env
->cregs
[i
], 0, 32, val
);
1667 HELPER_LOG("load ctl %d from 0x%" PRIx64
" == 0x%x\n", i
, src
, val
);
1668 src
+= sizeof(uint32_t);
1675 if (PERchanged
&& env
->psw
.mask
& PSW_MASK_PER
) {
1676 s390_cpu_recompute_watchpoints(env_cpu(env
));
1679 tlb_flush(env_cpu(env
));
1682 void HELPER(stctg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
1684 uintptr_t ra
= GETPC();
1689 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
1692 for (i
= r1
;; i
= (i
+ 1) % 16) {
1693 cpu_stq_data_ra(env
, dest
, env
->cregs
[i
], ra
);
1694 dest
+= sizeof(uint64_t);
1702 void HELPER(stctl
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
1704 uintptr_t ra
= GETPC();
1709 s390_program_interrupt(env
, PGM_SPECIFICATION
, 4, ra
);
1712 for (i
= r1
;; i
= (i
+ 1) % 16) {
1713 cpu_stl_data_ra(env
, dest
, env
->cregs
[i
], ra
);
1714 dest
+= sizeof(uint32_t);
1722 uint32_t HELPER(testblock
)(CPUS390XState
*env
, uint64_t real_addr
)
1724 uintptr_t ra
= GETPC();
1727 real_addr
= wrap_address(env
, real_addr
) & TARGET_PAGE_MASK
;
1729 for (i
= 0; i
< TARGET_PAGE_SIZE
; i
+= 8) {
1730 cpu_stq_real_ra(env
, real_addr
+ i
, 0, ra
);
1736 uint32_t HELPER(tprot
)(CPUS390XState
*env
, uint64_t a1
, uint64_t a2
)
1738 S390CPU
*cpu
= env_archcpu(env
);
1739 CPUState
*cs
= env_cpu(env
);
1742 * TODO: we currently don't handle all access protection types
1743 * (including access-list and key-controlled) as well as AR mode.
1745 if (!s390_cpu_virt_mem_check_write(cpu
, a1
, 0, 1)) {
1746 /* Fetching permitted; storing permitted */
1750 if (env
->int_pgm_code
== PGM_PROTECTION
) {
1751 /* retry if reading is possible */
1752 cs
->exception_index
= 0;
1753 if (!s390_cpu_virt_mem_check_read(cpu
, a1
, 0, 1)) {
1754 /* Fetching permitted; storing not permitted */
1759 switch (env
->int_pgm_code
) {
1760 case PGM_PROTECTION
:
1761 /* Fetching not permitted; storing not permitted */
1762 cs
->exception_index
= 0;
1764 case PGM_ADDRESSING
:
1765 case PGM_TRANS_SPEC
:
1766 /* exceptions forwarded to the guest */
1767 s390_cpu_virt_mem_handle_exc(cpu
, GETPC());
1771 /* Translation not available */
1772 cs
->exception_index
= 0;
1776 /* insert storage key extended */
1777 uint64_t HELPER(iske
)(CPUS390XState
*env
, uint64_t r2
)
1779 static S390SKeysState
*ss
;
1780 static S390SKeysClass
*skeyclass
;
1781 uint64_t addr
= wrap_address(env
, r2
);
1784 if (addr
> ram_size
) {
1788 if (unlikely(!ss
)) {
1789 ss
= s390_get_skeys_device();
1790 skeyclass
= S390_SKEYS_GET_CLASS(ss
);
1793 if (skeyclass
->get_skeys(ss
, addr
/ TARGET_PAGE_SIZE
, 1, &key
)) {
1799 /* set storage key extended */
1800 void HELPER(sske
)(CPUS390XState
*env
, uint64_t r1
, uint64_t r2
)
1802 static S390SKeysState
*ss
;
1803 static S390SKeysClass
*skeyclass
;
1804 uint64_t addr
= wrap_address(env
, r2
);
1807 if (addr
> ram_size
) {
1811 if (unlikely(!ss
)) {
1812 ss
= s390_get_skeys_device();
1813 skeyclass
= S390_SKEYS_GET_CLASS(ss
);
1817 skeyclass
->set_skeys(ss
, addr
/ TARGET_PAGE_SIZE
, 1, &key
);
1820 /* reset reference bit extended */
1821 uint32_t HELPER(rrbe
)(CPUS390XState
*env
, uint64_t r2
)
1823 static S390SKeysState
*ss
;
1824 static S390SKeysClass
*skeyclass
;
1827 if (r2
> ram_size
) {
1831 if (unlikely(!ss
)) {
1832 ss
= s390_get_skeys_device();
1833 skeyclass
= S390_SKEYS_GET_CLASS(ss
);
1836 if (skeyclass
->get_skeys(ss
, r2
/ TARGET_PAGE_SIZE
, 1, &key
)) {
1840 re
= key
& (SK_R
| SK_C
);
1843 if (skeyclass
->set_skeys(ss
, r2
/ TARGET_PAGE_SIZE
, 1, &key
)) {
1850 * 0 Reference bit zero; change bit zero
1851 * 1 Reference bit zero; change bit one
1852 * 2 Reference bit one; change bit zero
1853 * 3 Reference bit one; change bit one
1859 uint32_t HELPER(mvcs
)(CPUS390XState
*env
, uint64_t l
, uint64_t a1
, uint64_t a2
)
1861 uintptr_t ra
= GETPC();
1864 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
1865 __func__
, l
, a1
, a2
);
1873 /* XXX replace w/ memcpy */
1874 for (i
= 0; i
< l
; i
++) {
1875 uint8_t x
= cpu_ldub_primary_ra(env
, a2
+ i
, ra
);
1876 cpu_stb_secondary_ra(env
, a1
+ i
, x
, ra
);
1882 uint32_t HELPER(mvcp
)(CPUS390XState
*env
, uint64_t l
, uint64_t a1
, uint64_t a2
)
1884 uintptr_t ra
= GETPC();
1887 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
1888 __func__
, l
, a1
, a2
);
1896 /* XXX replace w/ memcpy */
1897 for (i
= 0; i
< l
; i
++) {
1898 uint8_t x
= cpu_ldub_secondary_ra(env
, a2
+ i
, ra
);
1899 cpu_stb_primary_ra(env
, a1
+ i
, x
, ra
);
1905 void HELPER(idte
)(CPUS390XState
*env
, uint64_t r1
, uint64_t r2
, uint32_t m4
)
1907 CPUState
*cs
= env_cpu(env
);
1908 const uintptr_t ra
= GETPC();
1909 uint64_t table
, entry
, raddr
;
1910 uint16_t entries
, i
, index
= 0;
1913 s390_program_interrupt(env
, PGM_SPECIFICATION
, 4, ra
);
1916 if (!(r2
& 0x800)) {
1917 /* invalidation-and-clearing operation */
1918 table
= r1
& ASCE_ORIGIN
;
1919 entries
= (r2
& 0x7ff) + 1;
1921 switch (r1
& ASCE_TYPE_MASK
) {
1922 case ASCE_TYPE_REGION1
:
1923 index
= (r2
>> 53) & 0x7ff;
1925 case ASCE_TYPE_REGION2
:
1926 index
= (r2
>> 42) & 0x7ff;
1928 case ASCE_TYPE_REGION3
:
1929 index
= (r2
>> 31) & 0x7ff;
1931 case ASCE_TYPE_SEGMENT
:
1932 index
= (r2
>> 20) & 0x7ff;
1935 for (i
= 0; i
< entries
; i
++) {
1936 /* addresses are not wrapped in 24/31bit mode but table index is */
1937 raddr
= table
+ ((index
+ i
) & 0x7ff) * sizeof(entry
);
1938 entry
= cpu_ldq_real_ra(env
, raddr
, ra
);
1939 if (!(entry
& REGION_ENTRY_INV
)) {
1940 /* we are allowed to not store if already invalid */
1941 entry
|= REGION_ENTRY_INV
;
1942 cpu_stq_real_ra(env
, raddr
, entry
, ra
);
1947 /* We simply flush the complete tlb, therefore we can ignore r3. */
1951 tlb_flush_all_cpus_synced(cs
);
1955 /* invalidate pte */
1956 void HELPER(ipte
)(CPUS390XState
*env
, uint64_t pto
, uint64_t vaddr
,
1959 CPUState
*cs
= env_cpu(env
);
1960 const uintptr_t ra
= GETPC();
1961 uint64_t page
= vaddr
& TARGET_PAGE_MASK
;
1962 uint64_t pte_addr
, pte
;
1964 /* Compute the page table entry address */
1965 pte_addr
= (pto
& SEGMENT_ENTRY_ORIGIN
);
1966 pte_addr
+= (vaddr
& VADDR_PX
) >> 9;
1968 /* Mark the page table entry as invalid */
1969 pte
= cpu_ldq_real_ra(env
, pte_addr
, ra
);
1970 pte
|= PAGE_INVALID
;
1971 cpu_stq_real_ra(env
, pte_addr
, pte
, ra
);
1973 /* XXX we exploit the fact that Linux passes the exact virtual
1974 address here - it's not obliged to! */
1976 if (vaddr
& ~VADDR_PX
) {
1977 tlb_flush_page(cs
, page
);
1978 /* XXX 31-bit hack */
1979 tlb_flush_page(cs
, page
^ 0x80000000);
1981 /* looks like we don't have a valid virtual address */
1985 if (vaddr
& ~VADDR_PX
) {
1986 tlb_flush_page_all_cpus_synced(cs
, page
);
1987 /* XXX 31-bit hack */
1988 tlb_flush_page_all_cpus_synced(cs
, page
^ 0x80000000);
1990 /* looks like we don't have a valid virtual address */
1991 tlb_flush_all_cpus_synced(cs
);
1996 /* flush local tlb */
1997 void HELPER(ptlb
)(CPUS390XState
*env
)
1999 tlb_flush(env_cpu(env
));
2002 /* flush global tlb */
2003 void HELPER(purge
)(CPUS390XState
*env
)
2005 tlb_flush_all_cpus_synced(env_cpu(env
));
2008 /* load using real address */
2009 uint64_t HELPER(lura
)(CPUS390XState
*env
, uint64_t addr
)
2011 return cpu_ldl_real_ra(env
, wrap_address(env
, addr
), GETPC());
2014 uint64_t HELPER(lurag
)(CPUS390XState
*env
, uint64_t addr
)
2016 return cpu_ldq_real_ra(env
, wrap_address(env
, addr
), GETPC());
2019 /* store using real address */
2020 void HELPER(stura
)(CPUS390XState
*env
, uint64_t addr
, uint64_t v1
)
2022 cpu_stl_real_ra(env
, wrap_address(env
, addr
), (uint32_t)v1
, GETPC());
2024 if ((env
->psw
.mask
& PSW_MASK_PER
) &&
2025 (env
->cregs
[9] & PER_CR9_EVENT_STORE
) &&
2026 (env
->cregs
[9] & PER_CR9_EVENT_STORE_REAL
)) {
2027 /* PSW is saved just before calling the helper. */
2028 env
->per_address
= env
->psw
.addr
;
2029 env
->per_perc_atmid
= PER_CODE_EVENT_STORE_REAL
| get_per_atmid(env
);
2033 void HELPER(sturg
)(CPUS390XState
*env
, uint64_t addr
, uint64_t v1
)
2035 cpu_stq_real_ra(env
, wrap_address(env
, addr
), v1
, GETPC());
2037 if ((env
->psw
.mask
& PSW_MASK_PER
) &&
2038 (env
->cregs
[9] & PER_CR9_EVENT_STORE
) &&
2039 (env
->cregs
[9] & PER_CR9_EVENT_STORE_REAL
)) {
2040 /* PSW is saved just before calling the helper. */
2041 env
->per_address
= env
->psw
.addr
;
2042 env
->per_perc_atmid
= PER_CODE_EVENT_STORE_REAL
| get_per_atmid(env
);
2046 /* load real address */
2047 uint64_t HELPER(lra
)(CPUS390XState
*env
, uint64_t addr
)
2049 CPUState
*cs
= env_cpu(env
);
2051 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
2055 /* XXX incomplete - has more corner cases */
2056 if (!(env
->psw
.mask
& PSW_MASK_64
) && (addr
>> 32)) {
2057 s390_program_interrupt(env
, PGM_SPECIAL_OP
, 2, GETPC());
2060 old_exc
= cs
->exception_index
;
2061 if (mmu_translate(env
, addr
, 0, asc
, &ret
, &flags
, true)) {
2064 if (cs
->exception_index
== EXCP_PGM
) {
2065 ret
= env
->int_pgm_code
| 0x80000000;
2067 ret
|= addr
& ~TARGET_PAGE_MASK
;
2069 cs
->exception_index
= old_exc
;
2076 /* load pair from quadword */
2077 uint64_t HELPER(lpq
)(CPUS390XState
*env
, uint64_t addr
)
2079 uintptr_t ra
= GETPC();
2082 check_alignment(env
, addr
, 16, ra
);
2083 hi
= cpu_ldq_data_ra(env
, addr
+ 0, ra
);
2084 lo
= cpu_ldq_data_ra(env
, addr
+ 8, ra
);
2090 uint64_t HELPER(lpq_parallel
)(CPUS390XState
*env
, uint64_t addr
)
2092 uintptr_t ra
= GETPC();
2098 assert(HAVE_ATOMIC128
);
2100 mem_idx
= cpu_mmu_index(env
, false);
2101 oi
= make_memop_idx(MO_TEQ
| MO_ALIGN_16
, mem_idx
);
2102 v
= helper_atomic_ldo_be_mmu(env
, addr
, oi
, ra
);
2103 hi
= int128_gethi(v
);
2104 lo
= int128_getlo(v
);
2110 /* store pair to quadword */
2111 void HELPER(stpq
)(CPUS390XState
*env
, uint64_t addr
,
2112 uint64_t low
, uint64_t high
)
2114 uintptr_t ra
= GETPC();
2116 check_alignment(env
, addr
, 16, ra
);
2117 cpu_stq_data_ra(env
, addr
+ 0, high
, ra
);
2118 cpu_stq_data_ra(env
, addr
+ 8, low
, ra
);
2121 void HELPER(stpq_parallel
)(CPUS390XState
*env
, uint64_t addr
,
2122 uint64_t low
, uint64_t high
)
2124 uintptr_t ra
= GETPC();
2129 assert(HAVE_ATOMIC128
);
2131 mem_idx
= cpu_mmu_index(env
, false);
2132 oi
= make_memop_idx(MO_TEQ
| MO_ALIGN_16
, mem_idx
);
2133 v
= int128_make128(low
, high
);
2134 helper_atomic_sto_be_mmu(env
, addr
, v
, oi
, ra
);
2137 /* Execute instruction. This instruction executes an insn modified with
2138 the contents of r1. It does not change the executed instruction in memory;
2139 it does not change the program counter.
2141 Perform this by recording the modified instruction in env->ex_value.
2142 This will be noticed by cpu_get_tb_cpu_state and thus tb translation.
2144 void HELPER(ex
)(CPUS390XState
*env
, uint32_t ilen
, uint64_t r1
, uint64_t addr
)
2146 uint64_t insn
= cpu_lduw_code(env
, addr
);
2147 uint8_t opc
= insn
>> 8;
2149 /* Or in the contents of R1[56:63]. */
2152 /* Load the rest of the instruction. */
2154 switch (get_ilen(opc
)) {
2158 insn
|= (uint64_t)cpu_lduw_code(env
, addr
+ 2) << 32;
2161 insn
|= (uint64_t)(uint32_t)cpu_ldl_code(env
, addr
+ 2) << 16;
2164 g_assert_not_reached();
2167 /* The very most common cases can be sped up by avoiding a new TB. */
2168 if ((opc
& 0xf0) == 0xd0) {
2169 typedef uint32_t (*dx_helper
)(CPUS390XState
*, uint32_t, uint64_t,
2170 uint64_t, uintptr_t);
2171 static const dx_helper dx
[16] = {
2172 [0x0] = do_helper_trt_bkwd
,
2173 [0x2] = do_helper_mvc
,
2174 [0x4] = do_helper_nc
,
2175 [0x5] = do_helper_clc
,
2176 [0x6] = do_helper_oc
,
2177 [0x7] = do_helper_xc
,
2178 [0xc] = do_helper_tr
,
2179 [0xd] = do_helper_trt_fwd
,
2181 dx_helper helper
= dx
[opc
& 0xf];
2184 uint32_t l
= extract64(insn
, 48, 8);
2185 uint32_t b1
= extract64(insn
, 44, 4);
2186 uint32_t d1
= extract64(insn
, 32, 12);
2187 uint32_t b2
= extract64(insn
, 28, 4);
2188 uint32_t d2
= extract64(insn
, 16, 12);
2189 uint64_t a1
= wrap_address(env
, env
->regs
[b1
] + d1
);
2190 uint64_t a2
= wrap_address(env
, env
->regs
[b2
] + d2
);
2192 env
->cc_op
= helper(env
, l
, a1
, a2
, 0);
2193 env
->psw
.addr
+= ilen
;
2196 } else if (opc
== 0x0a) {
2197 env
->int_svc_code
= extract64(insn
, 48, 8);
2198 env
->int_svc_ilen
= ilen
;
2199 helper_exception(env
, EXCP_SVC
);
2200 g_assert_not_reached();
2203 /* Record the insn we want to execute as well as the ilen to use
2204 during the execution of the target insn. This will also ensure
2205 that ex_value is non-zero, which flags that we are in a state
2206 that requires such execution. */
2207 env
->ex_value
= insn
| ilen
;
2210 uint32_t HELPER(mvcos
)(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
2213 const uint8_t psw_key
= (env
->psw
.mask
& PSW_MASK_KEY
) >> PSW_SHIFT_KEY
;
2214 const uint8_t psw_as
= (env
->psw
.mask
& PSW_MASK_ASC
) >> PSW_SHIFT_ASC
;
2215 const uint64_t r0
= env
->regs
[0];
2216 const uintptr_t ra
= GETPC();
2217 uint8_t dest_key
, dest_as
, dest_k
, dest_a
;
2218 uint8_t src_key
, src_as
, src_k
, src_a
;
2222 HELPER_LOG("%s dest %" PRIx64
", src %" PRIx64
", len %" PRIx64
"\n",
2223 __func__
, dest
, src
, len
);
2225 if (!(env
->psw
.mask
& PSW_MASK_DAT
)) {
2226 s390_program_interrupt(env
, PGM_SPECIAL_OP
, 6, ra
);
2229 /* OAC (operand access control) for the first operand -> dest */
2230 val
= (r0
& 0xffff0000ULL
) >> 16;
2231 dest_key
= (val
>> 12) & 0xf;
2232 dest_as
= (val
>> 6) & 0x3;
2233 dest_k
= (val
>> 1) & 0x1;
2236 /* OAC (operand access control) for the second operand -> src */
2237 val
= (r0
& 0x0000ffffULL
);
2238 src_key
= (val
>> 12) & 0xf;
2239 src_as
= (val
>> 6) & 0x3;
2240 src_k
= (val
>> 1) & 0x1;
2256 if (dest_a
&& dest_as
== AS_HOME
&& (env
->psw
.mask
& PSW_MASK_PSTATE
)) {
2257 s390_program_interrupt(env
, PGM_SPECIAL_OP
, 6, ra
);
2259 if (!(env
->cregs
[0] & CR0_SECONDARY
) &&
2260 (dest_as
== AS_SECONDARY
|| src_as
== AS_SECONDARY
)) {
2261 s390_program_interrupt(env
, PGM_SPECIAL_OP
, 6, ra
);
2263 if (!psw_key_valid(env
, dest_key
) || !psw_key_valid(env
, src_key
)) {
2264 s390_program_interrupt(env
, PGM_PRIVILEGED
, 6, ra
);
2267 len
= wrap_length(env
, len
);
2273 /* FIXME: AR-mode and proper problem state mode (using PSW keys) missing */
2274 if (src_as
== AS_ACCREG
|| dest_as
== AS_ACCREG
||
2275 (env
->psw
.mask
& PSW_MASK_PSTATE
)) {
2276 qemu_log_mask(LOG_UNIMP
, "%s: AR-mode and PSTATE support missing\n",
2278 s390_program_interrupt(env
, PGM_ADDRESSING
, 6, ra
);
2282 * b) Access using correct keys
2285 #ifdef CONFIG_USER_ONLY
2286 /* psw keys are never valid in user mode, we will never reach this */
2287 g_assert_not_reached();
2289 fast_memmove_as(env
, dest
, src
, len
, dest_as
, src_as
, ra
);
2295 /* Decode a Unicode character. A return value < 0 indicates success, storing
2296 the UTF-32 result into OCHAR and the input length into OLEN. A return
2297 value >= 0 indicates failure, and the CC value to be returned. */
2298 typedef int (*decode_unicode_fn
)(CPUS390XState
*env
, uint64_t addr
,
2299 uint64_t ilen
, bool enh_check
, uintptr_t ra
,
2300 uint32_t *ochar
, uint32_t *olen
);
2302 /* Encode a Unicode character. A return value < 0 indicates success, storing
2303 the bytes into ADDR and the output length into OLEN. A return value >= 0
2304 indicates failure, and the CC value to be returned. */
2305 typedef int (*encode_unicode_fn
)(CPUS390XState
*env
, uint64_t addr
,
2306 uint64_t ilen
, uintptr_t ra
, uint32_t c
,
2309 static int decode_utf8(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2310 bool enh_check
, uintptr_t ra
,
2311 uint32_t *ochar
, uint32_t *olen
)
2313 uint8_t s0
, s1
, s2
, s3
;
2319 s0
= cpu_ldub_data_ra(env
, addr
, ra
);
2321 /* one byte character */
2324 } else if (s0
<= (enh_check
? 0xc1 : 0xbf)) {
2325 /* invalid character */
2327 } else if (s0
<= 0xdf) {
2328 /* two byte character */
2333 s1
= cpu_ldub_data_ra(env
, addr
+ 1, ra
);
2335 c
= (c
<< 6) | (s1
& 0x3f);
2336 if (enh_check
&& (s1
& 0xc0) != 0x80) {
2339 } else if (s0
<= 0xef) {
2340 /* three byte character */
2345 s1
= cpu_ldub_data_ra(env
, addr
+ 1, ra
);
2346 s2
= cpu_ldub_data_ra(env
, addr
+ 2, ra
);
2348 c
= (c
<< 6) | (s1
& 0x3f);
2349 c
= (c
<< 6) | (s2
& 0x3f);
2350 /* Fold the byte-by-byte range descriptions in the PoO into
2351 tests against the complete value. It disallows encodings
2352 that could be smaller, and the UTF-16 surrogates. */
2354 && ((s1
& 0xc0) != 0x80
2355 || (s2
& 0xc0) != 0x80
2357 || (c
>= 0xd800 && c
<= 0xdfff))) {
2360 } else if (s0
<= (enh_check
? 0xf4 : 0xf7)) {
2361 /* four byte character */
2366 s1
= cpu_ldub_data_ra(env
, addr
+ 1, ra
);
2367 s2
= cpu_ldub_data_ra(env
, addr
+ 2, ra
);
2368 s3
= cpu_ldub_data_ra(env
, addr
+ 3, ra
);
2370 c
= (c
<< 6) | (s1
& 0x3f);
2371 c
= (c
<< 6) | (s2
& 0x3f);
2372 c
= (c
<< 6) | (s3
& 0x3f);
2375 && ((s1
& 0xc0) != 0x80
2376 || (s2
& 0xc0) != 0x80
2377 || (s3
& 0xc0) != 0x80
2383 /* invalid character */
2392 static int decode_utf16(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2393 bool enh_check
, uintptr_t ra
,
2394 uint32_t *ochar
, uint32_t *olen
)
2402 s0
= cpu_lduw_data_ra(env
, addr
, ra
);
2403 if ((s0
& 0xfc00) != 0xd800) {
2404 /* one word character */
2408 /* two word character */
2413 s1
= cpu_lduw_data_ra(env
, addr
+ 2, ra
);
2414 c
= extract32(s0
, 6, 4) + 1;
2415 c
= (c
<< 6) | (s0
& 0x3f);
2416 c
= (c
<< 10) | (s1
& 0x3ff);
2417 if (enh_check
&& (s1
& 0xfc00) != 0xdc00) {
2418 /* invalid surrogate character */
2428 static int decode_utf32(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2429 bool enh_check
, uintptr_t ra
,
2430 uint32_t *ochar
, uint32_t *olen
)
2437 c
= cpu_ldl_data_ra(env
, addr
, ra
);
2438 if ((c
>= 0xd800 && c
<= 0xdbff) || c
> 0x10ffff) {
2439 /* invalid unicode character */
2448 static int encode_utf8(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2449 uintptr_t ra
, uint32_t c
, uint32_t *olen
)
2455 /* one byte character */
2458 } else if (c
<= 0x7ff) {
2459 /* two byte character */
2461 d
[1] = 0x80 | extract32(c
, 0, 6);
2462 d
[0] = 0xc0 | extract32(c
, 6, 5);
2463 } else if (c
<= 0xffff) {
2464 /* three byte character */
2466 d
[2] = 0x80 | extract32(c
, 0, 6);
2467 d
[1] = 0x80 | extract32(c
, 6, 6);
2468 d
[0] = 0xe0 | extract32(c
, 12, 4);
2470 /* four byte character */
2472 d
[3] = 0x80 | extract32(c
, 0, 6);
2473 d
[2] = 0x80 | extract32(c
, 6, 6);
2474 d
[1] = 0x80 | extract32(c
, 12, 6);
2475 d
[0] = 0xf0 | extract32(c
, 18, 3);
2481 for (i
= 0; i
< l
; ++i
) {
2482 cpu_stb_data_ra(env
, addr
+ i
, d
[i
], ra
);
2489 static int encode_utf16(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2490 uintptr_t ra
, uint32_t c
, uint32_t *olen
)
2495 /* one word character */
2499 cpu_stw_data_ra(env
, addr
, c
, ra
);
2502 /* two word character */
2506 d1
= 0xdc00 | extract32(c
, 0, 10);
2507 d0
= 0xd800 | extract32(c
, 10, 6);
2508 d0
= deposit32(d0
, 6, 4, extract32(c
, 16, 5) - 1);
2509 cpu_stw_data_ra(env
, addr
+ 0, d0
, ra
);
2510 cpu_stw_data_ra(env
, addr
+ 2, d1
, ra
);
2517 static int encode_utf32(CPUS390XState
*env
, uint64_t addr
, uint64_t ilen
,
2518 uintptr_t ra
, uint32_t c
, uint32_t *olen
)
2523 cpu_stl_data_ra(env
, addr
, c
, ra
);
2528 static inline uint32_t convert_unicode(CPUS390XState
*env
, uint32_t r1
,
2529 uint32_t r2
, uint32_t m3
, uintptr_t ra
,
2530 decode_unicode_fn decode
,
2531 encode_unicode_fn encode
)
2533 uint64_t dst
= get_address(env
, r1
);
2534 uint64_t dlen
= get_length(env
, r1
+ 1);
2535 uint64_t src
= get_address(env
, r2
);
2536 uint64_t slen
= get_length(env
, r2
+ 1);
2537 bool enh_check
= m3
& 1;
2540 /* Lest we fail to service interrupts in a timely manner, limit the
2541 amount of work we're willing to do. For now, let's cap at 256. */
2542 for (i
= 0; i
< 256; ++i
) {
2543 uint32_t c
, ilen
, olen
;
2545 cc
= decode(env
, src
, slen
, enh_check
, ra
, &c
, &ilen
);
2546 if (unlikely(cc
>= 0)) {
2549 cc
= encode(env
, dst
, dlen
, ra
, c
, &olen
);
2550 if (unlikely(cc
>= 0)) {
2561 set_address(env
, r1
, dst
);
2562 set_length(env
, r1
+ 1, dlen
);
2563 set_address(env
, r2
, src
);
2564 set_length(env
, r2
+ 1, slen
);
2569 uint32_t HELPER(cu12
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2571 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2572 decode_utf8
, encode_utf16
);
2575 uint32_t HELPER(cu14
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2577 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2578 decode_utf8
, encode_utf32
);
2581 uint32_t HELPER(cu21
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2583 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2584 decode_utf16
, encode_utf8
);
2587 uint32_t HELPER(cu24
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2589 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2590 decode_utf16
, encode_utf32
);
2593 uint32_t HELPER(cu41
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2595 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2596 decode_utf32
, encode_utf8
);
2599 uint32_t HELPER(cu42
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
, uint32_t m3
)
2601 return convert_unicode(env
, r1
, r2
, m3
, GETPC(),
2602 decode_utf32
, encode_utf16
);
2605 void probe_write_access(CPUS390XState
*env
, uint64_t addr
, uint64_t len
,
2608 #ifdef CONFIG_USER_ONLY
2609 if (!h2g_valid(addr
) || !h2g_valid(addr
+ len
- 1) ||
2610 page_check_range(addr
, len
, PAGE_WRITE
) < 0) {
2611 s390_program_interrupt(env
, PGM_ADDRESSING
, ILEN_AUTO
, ra
);
2614 /* test the actual access, not just any access to the page due to LAP */
2616 const uint64_t pagelen
= -(addr
| -TARGET_PAGE_MASK
);
2617 const uint64_t curlen
= MIN(pagelen
, len
);
2619 probe_write(env
, addr
, curlen
, cpu_mmu_index(env
, false), ra
);
2620 addr
= wrap_address(env
, addr
+ curlen
);
2626 void HELPER(probe_write_access
)(CPUS390XState
*env
, uint64_t addr
, uint64_t len
)
2628 probe_write_access(env
, addr
, len
, GETPC());