2 * S/390 memory access helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "exec/address-spaces.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "qemu/int128.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/s390x/storage-keys.h"
33 /*****************************************************************************/
35 #if !defined(CONFIG_USER_ONLY)
37 /* try to fill the TLB and return an exception if error. If retaddr is
38 NULL, it means that the function was called in C code (i.e. not
39 from generated code or from helper.c) */
40 /* XXX: fix it to restore all registers */
41 void tlb_fill(CPUState
*cs
, target_ulong addr
, MMUAccessType access_type
,
42 int mmu_idx
, uintptr_t retaddr
)
44 int ret
= s390_cpu_handle_mmu_fault(cs
, addr
, access_type
, mmu_idx
);
45 if (unlikely(ret
!= 0)) {
46 cpu_loop_exit_restore(cs
, retaddr
);
52 /* #define DEBUG_HELPER */
54 #define HELPER_LOG(x...) qemu_log(x)
56 #define HELPER_LOG(x...)
59 /* Reduce the length so that addr + len doesn't cross a page boundary. */
60 static inline uint32_t adj_len_to_page(uint32_t len
, uint64_t addr
)
62 #ifndef CONFIG_USER_ONLY
63 if ((addr
& ~TARGET_PAGE_MASK
) + len
- 1 >= TARGET_PAGE_SIZE
) {
64 return -(addr
| TARGET_PAGE_MASK
);
70 /* Trigger a SPECIFICATION exception if an address or a length is not
72 static inline void check_alignment(CPUS390XState
*env
, uint64_t v
,
73 int wordsize
, uintptr_t ra
)
76 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
77 cpu_restore_state(cs
, ra
);
78 program_interrupt(env
, PGM_SPECIFICATION
, 6);
82 /* Load a value from memory according to its size. */
83 static inline uint64_t cpu_ldusize_data_ra(CPUS390XState
*env
, uint64_t addr
,
84 int wordsize
, uintptr_t ra
)
88 return cpu_ldub_data_ra(env
, addr
, ra
);
90 return cpu_lduw_data_ra(env
, addr
, ra
);
96 /* Store a to memory according to its size. */
97 static inline void cpu_stsize_data_ra(CPUS390XState
*env
, uint64_t addr
,
98 uint64_t value
, int wordsize
,
103 cpu_stb_data_ra(env
, addr
, value
, ra
);
106 cpu_stw_data_ra(env
, addr
, value
, ra
);
113 static void fast_memset(CPUS390XState
*env
, uint64_t dest
, uint8_t byte
,
114 uint32_t l
, uintptr_t ra
)
116 int mmu_idx
= cpu_mmu_index(env
, false);
119 void *p
= tlb_vaddr_to_host(env
, dest
, MMU_DATA_STORE
, mmu_idx
);
121 /* Access to the whole page in write mode granted. */
122 uint32_t l_adj
= adj_len_to_page(l
, dest
);
123 memset(p
, byte
, l_adj
);
127 /* We failed to get access to the whole page. The next write
128 access will likely fill the QEMU TLB for the next iteration. */
129 cpu_stb_data_ra(env
, dest
, byte
, ra
);
136 static void fast_memmove(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
137 uint32_t l
, uintptr_t ra
)
139 int mmu_idx
= cpu_mmu_index(env
, false);
142 void *src_p
= tlb_vaddr_to_host(env
, src
, MMU_DATA_LOAD
, mmu_idx
);
143 void *dest_p
= tlb_vaddr_to_host(env
, dest
, MMU_DATA_STORE
, mmu_idx
);
144 if (src_p
&& dest_p
) {
145 /* Access to both whole pages granted. */
146 uint32_t l_adj
= adj_len_to_page(l
, src
);
147 l_adj
= adj_len_to_page(l_adj
, dest
);
148 memmove(dest_p
, src_p
, l_adj
);
153 /* We failed to get access to one or both whole pages. The next
154 read or write access will likely fill the QEMU TLB for the
156 cpu_stb_data_ra(env
, dest
, cpu_ldub_data_ra(env
, src
, ra
), ra
);
165 static uint32_t do_helper_nc(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
166 uint64_t src
, uintptr_t ra
)
171 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
172 __func__
, l
, dest
, src
);
174 for (i
= 0; i
<= l
; i
++) {
175 uint8_t x
= cpu_ldub_data_ra(env
, src
+ i
, ra
);
176 x
&= cpu_ldub_data_ra(env
, dest
+ i
, ra
);
178 cpu_stb_data_ra(env
, dest
+ i
, x
, ra
);
183 uint32_t HELPER(nc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
186 return do_helper_nc(env
, l
, dest
, src
, GETPC());
190 static uint32_t do_helper_xc(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
191 uint64_t src
, uintptr_t ra
)
196 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
197 __func__
, l
, dest
, src
);
199 /* xor with itself is the same as memset(0) */
201 fast_memset(env
, dest
, 0, l
+ 1, ra
);
205 for (i
= 0; i
<= l
; i
++) {
206 uint8_t x
= cpu_ldub_data_ra(env
, src
+ i
, ra
);
207 x
^= cpu_ldub_data_ra(env
, dest
+ i
, ra
);
209 cpu_stb_data_ra(env
, dest
+ i
, x
, ra
);
214 uint32_t HELPER(xc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
217 return do_helper_xc(env
, l
, dest
, src
, GETPC());
221 static uint32_t do_helper_oc(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
222 uint64_t src
, uintptr_t ra
)
227 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
228 __func__
, l
, dest
, src
);
230 for (i
= 0; i
<= l
; i
++) {
231 uint8_t x
= cpu_ldub_data_ra(env
, src
+ i
, ra
);
232 x
|= cpu_ldub_data_ra(env
, dest
+ i
, ra
);
234 cpu_stb_data_ra(env
, dest
+ i
, x
, ra
);
239 uint32_t HELPER(oc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
242 return do_helper_oc(env
, l
, dest
, src
, GETPC());
246 static uint32_t do_helper_mvc(CPUS390XState
*env
, uint32_t l
, uint64_t dest
,
247 uint64_t src
, uintptr_t ra
)
251 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
252 __func__
, l
, dest
, src
);
254 /* mvc and memmove do not behave the same when areas overlap! */
255 /* mvc with source pointing to the byte after the destination is the
256 same as memset with the first source byte */
257 if (dest
== src
+ 1) {
258 fast_memset(env
, dest
, cpu_ldub_data_ra(env
, src
, ra
), l
+ 1, ra
);
259 } else if (dest
< src
|| src
+ l
< dest
) {
260 fast_memmove(env
, dest
, src
, l
+ 1, ra
);
262 /* slow version with byte accesses which always work */
263 for (i
= 0; i
<= l
; i
++) {
264 uint8_t x
= cpu_ldub_data_ra(env
, src
+ i
, ra
);
265 cpu_stb_data_ra(env
, dest
+ i
, x
, ra
);
272 void HELPER(mvc
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
274 do_helper_mvc(env
, l
, dest
, src
, GETPC());
278 void HELPER(mvcin
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
280 uintptr_t ra
= GETPC();
283 for (i
= 0; i
<= l
; i
++) {
284 uint8_t v
= cpu_ldub_data_ra(env
, src
- i
, ra
);
285 cpu_stb_data_ra(env
, dest
+ i
, v
, ra
);
290 void HELPER(mvn
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
292 uintptr_t ra
= GETPC();
295 for (i
= 0; i
<= l
; i
++) {
296 uint8_t v
= cpu_ldub_data_ra(env
, dest
+ i
, ra
) & 0xf0;
297 v
|= cpu_ldub_data_ra(env
, src
+ i
, ra
) & 0x0f;
298 cpu_stb_data_ra(env
, dest
+ i
, v
, ra
);
302 /* move with offset */
303 void HELPER(mvo
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
305 uintptr_t ra
= GETPC();
306 int len_dest
= l
>> 4;
307 int len_src
= l
& 0xf;
308 uint8_t byte_dest
, byte_src
;
314 /* Handle rightmost byte */
315 byte_src
= cpu_ldub_data_ra(env
, src
, ra
);
316 byte_dest
= cpu_ldub_data_ra(env
, dest
, ra
);
317 byte_dest
= (byte_dest
& 0x0f) | (byte_src
<< 4);
318 cpu_stb_data_ra(env
, dest
, byte_dest
, ra
);
320 /* Process remaining bytes from right to left */
321 for (i
= 1; i
<= len_dest
; i
++) {
322 byte_dest
= byte_src
>> 4;
323 if (len_src
- i
>= 0) {
324 byte_src
= cpu_ldub_data_ra(env
, src
- i
, ra
);
328 byte_dest
|= byte_src
<< 4;
329 cpu_stb_data_ra(env
, dest
- i
, byte_dest
, ra
);
334 void HELPER(mvz
)(CPUS390XState
*env
, uint32_t l
, uint64_t dest
, uint64_t src
)
336 uintptr_t ra
= GETPC();
339 for (i
= 0; i
<= l
; i
++) {
340 uint8_t b
= cpu_ldub_data_ra(env
, dest
+ i
, ra
) & 0x0f;
341 b
|= cpu_ldub_data_ra(env
, src
+ i
, ra
) & 0xf0;
342 cpu_stb_data_ra(env
, dest
+ i
, b
, ra
);
346 /* compare unsigned byte arrays */
347 static uint32_t do_helper_clc(CPUS390XState
*env
, uint32_t l
, uint64_t s1
,
348 uint64_t s2
, uintptr_t ra
)
353 HELPER_LOG("%s l %d s1 %" PRIx64
" s2 %" PRIx64
"\n",
354 __func__
, l
, s1
, s2
);
356 for (i
= 0; i
<= l
; i
++) {
357 uint8_t x
= cpu_ldub_data_ra(env
, s1
+ i
, ra
);
358 uint8_t y
= cpu_ldub_data_ra(env
, s2
+ i
, ra
);
359 HELPER_LOG("%02x (%c)/%02x (%c) ", x
, x
, y
, y
);
373 uint32_t HELPER(clc
)(CPUS390XState
*env
, uint32_t l
, uint64_t s1
, uint64_t s2
)
375 return do_helper_clc(env
, l
, s1
, s2
, GETPC());
378 /* compare logical under mask */
379 uint32_t HELPER(clm
)(CPUS390XState
*env
, uint32_t r1
, uint32_t mask
,
382 uintptr_t ra
= GETPC();
385 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64
"\n", __func__
, r1
,
390 uint8_t d
= cpu_ldub_data_ra(env
, addr
, ra
);
391 uint8_t r
= extract32(r1
, 24, 8);
392 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64
") ", mask
, r
, d
,
403 mask
= (mask
<< 1) & 0xf;
411 static inline uint64_t wrap_address(CPUS390XState
*env
, uint64_t a
)
413 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
414 if (!(env
->psw
.mask
& PSW_MASK_32
)) {
425 static inline uint64_t get_address(CPUS390XState
*env
, int reg
)
427 return wrap_address(env
, env
->regs
[reg
]);
430 static inline void set_address(CPUS390XState
*env
, int reg
, uint64_t address
)
432 if (env
->psw
.mask
& PSW_MASK_64
) {
434 env
->regs
[reg
] = address
;
436 if (!(env
->psw
.mask
& PSW_MASK_32
)) {
437 /* 24-Bit mode. According to the PoO it is implementation
438 dependent if bits 32-39 remain unchanged or are set to
439 zeros. Choose the former so that the function can also be
441 env
->regs
[reg
] = deposit64(env
->regs
[reg
], 0, 24, address
);
443 /* 31-Bit mode. According to the PoO it is implementation
444 dependent if bit 32 remains unchanged or is set to zero.
445 Choose the latter so that the function can also be used for
447 address
&= 0x7fffffff;
448 env
->regs
[reg
] = deposit64(env
->regs
[reg
], 0, 32, address
);
453 static inline uint64_t wrap_length(CPUS390XState
*env
, uint64_t length
)
455 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
456 /* 24-Bit and 31-Bit mode */
457 length
&= 0x7fffffff;
462 static inline uint64_t get_length(CPUS390XState
*env
, int reg
)
464 return wrap_length(env
, env
->regs
[reg
]);
467 static inline void set_length(CPUS390XState
*env
, int reg
, uint64_t length
)
469 if (env
->psw
.mask
& PSW_MASK_64
) {
471 env
->regs
[reg
] = length
;
473 /* 24-Bit and 31-Bit mode */
474 env
->regs
[reg
] = deposit64(env
->regs
[reg
], 0, 32, length
);
478 /* search string (c is byte to search, r2 is string, r1 end of string) */
479 uint64_t HELPER(srst
)(CPUS390XState
*env
, uint64_t r0
, uint64_t end
,
482 uintptr_t ra
= GETPC();
486 str
= wrap_address(env
, str
);
487 end
= wrap_address(env
, end
);
489 /* Assume for now that R2 is unmodified. */
492 /* Lest we fail to service interrupts in a timely manner, limit the
493 amount of work we're willing to do. For now, let's cap at 8k. */
494 for (len
= 0; len
< 0x2000; ++len
) {
495 if (str
+ len
== end
) {
496 /* Character not found. R1 & R2 are unmodified. */
500 v
= cpu_ldub_data_ra(env
, str
+ len
, ra
);
502 /* Character found. Set R1 to the location; R2 is unmodified. */
508 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
509 env
->retxl
= str
+ len
;
514 /* unsigned string compare (c is string terminator) */
515 uint64_t HELPER(clst
)(CPUS390XState
*env
, uint64_t c
, uint64_t s1
, uint64_t s2
)
517 uintptr_t ra
= GETPC();
521 s1
= wrap_address(env
, s1
);
522 s2
= wrap_address(env
, s2
);
524 /* Lest we fail to service interrupts in a timely manner, limit the
525 amount of work we're willing to do. For now, let's cap at 8k. */
526 for (len
= 0; len
< 0x2000; ++len
) {
527 uint8_t v1
= cpu_ldub_data_ra(env
, s1
+ len
, ra
);
528 uint8_t v2
= cpu_ldub_data_ra(env
, s2
+ len
, ra
);
531 /* Equal. CC=0, and don't advance the registers. */
537 /* Unequal. CC={1,2}, and advance the registers. Note that
538 the terminator need not be zero, but the string that contains
539 the terminator is by definition "low". */
540 env
->cc_op
= (v1
== c
? 1 : v2
== c
? 2 : v1
< v2
? 1 : 2);
541 env
->retxl
= s2
+ len
;
546 /* CPU-determined bytes equal; advance the registers. */
548 env
->retxl
= s2
+ len
;
553 uint32_t HELPER(mvpg
)(CPUS390XState
*env
, uint64_t r0
, uint64_t r1
, uint64_t r2
)
555 /* ??? missing r0 handling, which includes access keys, but more
556 importantly optional suppression of the exception! */
557 fast_memmove(env
, r1
, r2
, TARGET_PAGE_SIZE
, GETPC());
558 return 0; /* data moved */
561 /* string copy (c is string terminator) */
562 uint64_t HELPER(mvst
)(CPUS390XState
*env
, uint64_t c
, uint64_t d
, uint64_t s
)
564 uintptr_t ra
= GETPC();
568 d
= wrap_address(env
, d
);
569 s
= wrap_address(env
, s
);
571 /* Lest we fail to service interrupts in a timely manner, limit the
572 amount of work we're willing to do. For now, let's cap at 8k. */
573 for (len
= 0; len
< 0x2000; ++len
) {
574 uint8_t v
= cpu_ldub_data_ra(env
, s
+ len
, ra
);
575 cpu_stb_data_ra(env
, d
+ len
, v
, ra
);
577 /* Complete. Set CC=1 and advance R1. */
584 /* Incomplete. Set CC=3 and signal to advance R1 and R2. */
586 env
->retxl
= s
+ len
;
590 /* load access registers r1 to r3 from memory at a2 */
591 void HELPER(lam
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
593 uintptr_t ra
= GETPC();
596 for (i
= r1
;; i
= (i
+ 1) % 16) {
597 env
->aregs
[i
] = cpu_ldl_data_ra(env
, a2
, ra
);
606 /* store access registers r1 to r3 in memory at a2 */
607 void HELPER(stam
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
609 uintptr_t ra
= GETPC();
612 for (i
= r1
;; i
= (i
+ 1) % 16) {
613 cpu_stl_data_ra(env
, a2
, env
->aregs
[i
], ra
);
622 /* move long helper */
623 static inline uint32_t do_mvcl(CPUS390XState
*env
,
624 uint64_t *dest
, uint64_t *destlen
,
625 uint64_t *src
, uint64_t *srclen
,
626 uint16_t pad
, int wordsize
, uintptr_t ra
)
628 uint64_t len
= MIN(*srclen
, *destlen
);
631 if (*destlen
== *srclen
) {
633 } else if (*destlen
< *srclen
) {
639 /* Copy the src array */
640 fast_memmove(env
, *dest
, *src
, len
, ra
);
646 /* Pad the remaining area */
648 fast_memset(env
, *dest
, pad
, *destlen
, ra
);
652 /* If remaining length is odd, pad with odd byte first. */
654 cpu_stb_data_ra(env
, *dest
, pad
& 0xff, ra
);
658 /* The remaining length is even, pad using words. */
659 for (; *destlen
; *dest
+= 2, *destlen
-= 2) {
660 cpu_stw_data_ra(env
, *dest
, pad
, ra
);
668 uint32_t HELPER(mvcl
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
670 uintptr_t ra
= GETPC();
671 uint64_t destlen
= env
->regs
[r1
+ 1] & 0xffffff;
672 uint64_t dest
= get_address(env
, r1
);
673 uint64_t srclen
= env
->regs
[r2
+ 1] & 0xffffff;
674 uint64_t src
= get_address(env
, r2
);
675 uint8_t pad
= env
->regs
[r2
+ 1] >> 24;
678 cc
= do_mvcl(env
, &dest
, &destlen
, &src
, &srclen
, pad
, 1, ra
);
680 env
->regs
[r1
+ 1] = deposit64(env
->regs
[r1
+ 1], 0, 24, destlen
);
681 env
->regs
[r2
+ 1] = deposit64(env
->regs
[r2
+ 1], 0, 24, srclen
);
682 set_address(env
, r1
, dest
);
683 set_address(env
, r2
, src
);
688 /* move long extended */
689 uint32_t HELPER(mvcle
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
692 uintptr_t ra
= GETPC();
693 uint64_t destlen
= get_length(env
, r1
+ 1);
694 uint64_t dest
= get_address(env
, r1
);
695 uint64_t srclen
= get_length(env
, r3
+ 1);
696 uint64_t src
= get_address(env
, r3
);
700 cc
= do_mvcl(env
, &dest
, &destlen
, &src
, &srclen
, pad
, 1, ra
);
702 set_length(env
, r1
+ 1, destlen
);
703 set_length(env
, r3
+ 1, srclen
);
704 set_address(env
, r1
, dest
);
705 set_address(env
, r3
, src
);
710 /* move long unicode */
711 uint32_t HELPER(mvclu
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
714 uintptr_t ra
= GETPC();
715 uint64_t destlen
= get_length(env
, r1
+ 1);
716 uint64_t dest
= get_address(env
, r1
);
717 uint64_t srclen
= get_length(env
, r3
+ 1);
718 uint64_t src
= get_address(env
, r3
);
722 cc
= do_mvcl(env
, &dest
, &destlen
, &src
, &srclen
, pad
, 2, ra
);
724 set_length(env
, r1
+ 1, destlen
);
725 set_length(env
, r3
+ 1, srclen
);
726 set_address(env
, r1
, dest
);
727 set_address(env
, r3
, src
);
732 /* compare logical long helper */
733 static inline uint32_t do_clcl(CPUS390XState
*env
,
734 uint64_t *src1
, uint64_t *src1len
,
735 uint64_t *src3
, uint64_t *src3len
,
736 uint16_t pad
, uint64_t limit
,
737 int wordsize
, uintptr_t ra
)
739 uint64_t len
= MAX(*src1len
, *src3len
);
742 check_alignment(env
, *src1len
| *src3len
, wordsize
, ra
);
748 /* Lest we fail to service interrupts in a timely manner, limit the
749 amount of work we're willing to do. */
755 for (; len
; len
-= wordsize
) {
760 v1
= cpu_ldusize_data_ra(env
, *src1
, wordsize
, ra
);
763 v3
= cpu_ldusize_data_ra(env
, *src3
, wordsize
, ra
);
767 cc
= (v1
< v3
) ? 1 : 2;
773 *src1len
-= wordsize
;
777 *src3len
-= wordsize
;
785 /* compare logical long */
786 uint32_t HELPER(clcl
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
)
788 uintptr_t ra
= GETPC();
789 uint64_t src1len
= extract64(env
->regs
[r1
+ 1], 0, 24);
790 uint64_t src1
= get_address(env
, r1
);
791 uint64_t src3len
= extract64(env
->regs
[r2
+ 1], 0, 24);
792 uint64_t src3
= get_address(env
, r2
);
793 uint8_t pad
= env
->regs
[r2
+ 1] >> 24;
796 cc
= do_clcl(env
, &src1
, &src1len
, &src3
, &src3len
, pad
, -1, 1, ra
);
798 env
->regs
[r1
+ 1] = deposit64(env
->regs
[r1
+ 1], 0, 24, src1len
);
799 env
->regs
[r2
+ 1] = deposit64(env
->regs
[r2
+ 1], 0, 24, src3len
);
800 set_address(env
, r1
, src1
);
801 set_address(env
, r2
, src3
);
806 /* compare logical long extended memcompare insn with padding */
807 uint32_t HELPER(clcle
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
810 uintptr_t ra
= GETPC();
811 uint64_t src1len
= get_length(env
, r1
+ 1);
812 uint64_t src1
= get_address(env
, r1
);
813 uint64_t src3len
= get_length(env
, r3
+ 1);
814 uint64_t src3
= get_address(env
, r3
);
818 cc
= do_clcl(env
, &src1
, &src1len
, &src3
, &src3len
, pad
, 0x2000, 1, ra
);
820 set_length(env
, r1
+ 1, src1len
);
821 set_length(env
, r3
+ 1, src3len
);
822 set_address(env
, r1
, src1
);
823 set_address(env
, r3
, src3
);
828 /* compare logical long unicode memcompare insn with padding */
829 uint32_t HELPER(clclu
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
,
832 uintptr_t ra
= GETPC();
833 uint64_t src1len
= get_length(env
, r1
+ 1);
834 uint64_t src1
= get_address(env
, r1
);
835 uint64_t src3len
= get_length(env
, r3
+ 1);
836 uint64_t src3
= get_address(env
, r3
);
840 cc
= do_clcl(env
, &src1
, &src1len
, &src3
, &src3len
, pad
, 0x1000, 2, ra
);
842 set_length(env
, r1
+ 1, src1len
);
843 set_length(env
, r3
+ 1, src3len
);
844 set_address(env
, r1
, src1
);
845 set_address(env
, r3
, src3
);
851 uint64_t HELPER(cksm
)(CPUS390XState
*env
, uint64_t r1
,
852 uint64_t src
, uint64_t src_len
)
854 uintptr_t ra
= GETPC();
855 uint64_t max_len
, len
;
856 uint64_t cksm
= (uint32_t)r1
;
858 /* Lest we fail to service interrupts in a timely manner, limit the
859 amount of work we're willing to do. For now, let's cap at 8k. */
860 max_len
= (src_len
> 0x2000 ? 0x2000 : src_len
);
862 /* Process full words as available. */
863 for (len
= 0; len
+ 4 <= max_len
; len
+= 4, src
+= 4) {
864 cksm
+= (uint32_t)cpu_ldl_data_ra(env
, src
, ra
);
867 switch (max_len
- len
) {
869 cksm
+= cpu_ldub_data_ra(env
, src
, ra
) << 24;
873 cksm
+= cpu_lduw_data_ra(env
, src
, ra
) << 16;
877 cksm
+= cpu_lduw_data_ra(env
, src
, ra
) << 16;
878 cksm
+= cpu_ldub_data_ra(env
, src
+ 2, ra
) << 8;
883 /* Fold the carry from the checksum. Note that we can see carry-out
884 during folding more than once (but probably not more than twice). */
885 while (cksm
> 0xffffffffull
) {
886 cksm
= (uint32_t)cksm
+ (cksm
>> 32);
889 /* Indicate whether or not we've processed everything. */
890 env
->cc_op
= (len
== src_len
? 0 : 3);
892 /* Return both cksm and processed length. */
897 void HELPER(pack
)(CPUS390XState
*env
, uint32_t len
, uint64_t dest
, uint64_t src
)
899 uintptr_t ra
= GETPC();
900 int len_dest
= len
>> 4;
901 int len_src
= len
& 0xf;
907 /* last byte is special, it only flips the nibbles */
908 b
= cpu_ldub_data_ra(env
, src
, ra
);
909 cpu_stb_data_ra(env
, dest
, (b
<< 4) | (b
>> 4), ra
);
913 /* now pack every value */
914 while (len_dest
>= 0) {
918 b
= cpu_ldub_data_ra(env
, src
, ra
) & 0x0f;
923 b
|= cpu_ldub_data_ra(env
, src
, ra
) << 4;
930 cpu_stb_data_ra(env
, dest
, b
, ra
);
934 static inline void do_pkau(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
935 uint32_t srclen
, int ssize
, uintptr_t ra
)
938 /* The destination operand is always 16 bytes long. */
939 const int destlen
= 16;
941 /* The operands are processed from right to left. */
945 for (i
= 0; i
< destlen
; i
++) {
948 /* Start with a positive sign */
951 } else if (srclen
> ssize
) {
952 b
= cpu_ldub_data_ra(env
, src
, ra
) & 0x0f;
957 if (srclen
> ssize
) {
958 b
|= cpu_ldub_data_ra(env
, src
, ra
) << 4;
963 cpu_stb_data_ra(env
, dest
, b
, ra
);
969 void HELPER(pka
)(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
972 do_pkau(env
, dest
, src
, srclen
, 1, GETPC());
975 void HELPER(pku
)(CPUS390XState
*env
, uint64_t dest
, uint64_t src
,
978 do_pkau(env
, dest
, src
, srclen
, 2, GETPC());
981 void HELPER(unpk
)(CPUS390XState
*env
, uint32_t len
, uint64_t dest
,
984 uintptr_t ra
= GETPC();
985 int len_dest
= len
>> 4;
986 int len_src
= len
& 0xf;
988 int second_nibble
= 0;
993 /* last byte is special, it only flips the nibbles */
994 b
= cpu_ldub_data_ra(env
, src
, ra
);
995 cpu_stb_data_ra(env
, dest
, (b
<< 4) | (b
>> 4), ra
);
999 /* now pad every nibble with 0xf0 */
1001 while (len_dest
> 0) {
1002 uint8_t cur_byte
= 0;
1005 cur_byte
= cpu_ldub_data_ra(env
, src
, ra
);
1011 /* only advance one nibble at a time */
1012 if (second_nibble
) {
1017 second_nibble
= !second_nibble
;
1020 cur_byte
= (cur_byte
& 0xf);
1024 cpu_stb_data_ra(env
, dest
, cur_byte
, ra
);
1028 static inline uint32_t do_unpkau(CPUS390XState
*env
, uint64_t dest
,
1029 uint32_t destlen
, int dsize
, uint64_t src
,
1035 /* The source operand is always 16 bytes long. */
1036 const int srclen
= 16;
1038 /* The operands are processed from right to left. */
1040 dest
+= destlen
- dsize
;
1042 /* Check for the sign. */
1043 b
= cpu_ldub_data_ra(env
, src
, ra
);
1057 cc
= 3; /* invalid */
1061 /* Now pad every nibble with 0x30, advancing one nibble at a time. */
1062 for (i
= 0; i
< destlen
; i
+= dsize
) {
1063 if (i
== (31 * dsize
)) {
1064 /* If length is 32/64 bytes, the leftmost byte is 0. */
1066 } else if (i
% (2 * dsize
)) {
1067 b
= cpu_ldub_data_ra(env
, src
, ra
);
1072 cpu_stsize_data_ra(env
, dest
, 0x30 + (b
& 0xf), dsize
, ra
);
1079 uint32_t HELPER(unpka
)(CPUS390XState
*env
, uint64_t dest
, uint32_t destlen
,
1082 return do_unpkau(env
, dest
, destlen
, 1, src
, GETPC());
1085 uint32_t HELPER(unpku
)(CPUS390XState
*env
, uint64_t dest
, uint32_t destlen
,
1088 return do_unpkau(env
, dest
, destlen
, 2, src
, GETPC());
1091 uint32_t HELPER(tp
)(CPUS390XState
*env
, uint64_t dest
, uint32_t destlen
)
1093 uintptr_t ra
= GETPC();
1097 for (i
= 0; i
< destlen
; i
++) {
1098 uint8_t b
= cpu_ldub_data_ra(env
, dest
+ i
, ra
);
1100 cc
|= (b
& 0xf0) > 0x90 ? 2 : 0;
1102 if (i
== (destlen
- 1)) {
1104 cc
|= (b
& 0xf) < 0xa ? 1 : 0;
1107 cc
|= (b
& 0xf) > 0x9 ? 2 : 0;
1114 static uint32_t do_helper_tr(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
1115 uint64_t trans
, uintptr_t ra
)
1119 for (i
= 0; i
<= len
; i
++) {
1120 uint8_t byte
= cpu_ldub_data_ra(env
, array
+ i
, ra
);
1121 uint8_t new_byte
= cpu_ldub_data_ra(env
, trans
+ byte
, ra
);
1122 cpu_stb_data_ra(env
, array
+ i
, new_byte
, ra
);
1128 void HELPER(tr
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
1131 do_helper_tr(env
, len
, array
, trans
, GETPC());
1134 uint64_t HELPER(tre
)(CPUS390XState
*env
, uint64_t array
,
1135 uint64_t len
, uint64_t trans
)
1137 uintptr_t ra
= GETPC();
1138 uint8_t end
= env
->regs
[0] & 0xff;
1143 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
1144 array
&= 0x7fffffff;
1148 /* Lest we fail to service interrupts in a timely manner, limit the
1149 amount of work we're willing to do. For now, let's cap at 8k. */
1155 for (i
= 0; i
< l
; i
++) {
1156 uint8_t byte
, new_byte
;
1158 byte
= cpu_ldub_data_ra(env
, array
+ i
, ra
);
1165 new_byte
= cpu_ldub_data_ra(env
, trans
+ byte
, ra
);
1166 cpu_stb_data_ra(env
, array
+ i
, new_byte
, ra
);
1170 env
->retxl
= len
- i
;
1174 static uint32_t do_helper_trt(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
1175 uint64_t trans
, uintptr_t ra
)
1179 for (i
= 0; i
<= len
; i
++) {
1180 uint8_t byte
= cpu_ldub_data_ra(env
, array
+ i
, ra
);
1181 uint8_t sbyte
= cpu_ldub_data_ra(env
, trans
+ byte
, ra
);
1184 set_address(env
, 1, array
+ i
);
1185 env
->regs
[2] = deposit64(env
->regs
[2], 0, 8, sbyte
);
1186 return (i
== len
) ? 2 : 1;
1193 uint32_t HELPER(trt
)(CPUS390XState
*env
, uint32_t len
, uint64_t array
,
1196 return do_helper_trt(env
, len
, array
, trans
, GETPC());
1199 /* Translate one/two to one/two */
1200 uint32_t HELPER(trXX
)(CPUS390XState
*env
, uint32_t r1
, uint32_t r2
,
1201 uint32_t tst
, uint32_t sizes
)
1203 uintptr_t ra
= GETPC();
1204 int dsize
= (sizes
& 1) ? 1 : 2;
1205 int ssize
= (sizes
& 2) ? 1 : 2;
1206 uint64_t tbl
= get_address(env
, 1) & ~7;
1207 uint64_t dst
= get_address(env
, r1
);
1208 uint64_t len
= get_length(env
, r1
+ 1);
1209 uint64_t src
= get_address(env
, r2
);
1213 check_alignment(env
, len
, ssize
, ra
);
1215 /* Lest we fail to service interrupts in a timely manner, */
1216 /* limit the amount of work we're willing to do. */
1217 for (i
= 0; i
< 0x2000; i
++) {
1218 uint16_t sval
= cpu_ldusize_data_ra(env
, src
, ssize
, ra
);
1219 uint64_t tble
= tbl
+ (sval
* dsize
);
1220 uint16_t dval
= cpu_ldusize_data_ra(env
, tble
, dsize
, ra
);
1225 cpu_stsize_data_ra(env
, dst
, dval
, dsize
, ra
);
1237 set_address(env
, r1
, dst
);
1238 set_length(env
, r1
+ 1, len
);
1239 set_address(env
, r2
, src
);
1244 void HELPER(cdsg
)(CPUS390XState
*env
, uint64_t addr
,
1245 uint32_t r1
, uint32_t r3
)
1247 uintptr_t ra
= GETPC();
1248 Int128 cmpv
= int128_make128(env
->regs
[r1
+ 1], env
->regs
[r1
]);
1249 Int128 newv
= int128_make128(env
->regs
[r3
+ 1], env
->regs
[r3
]);
1253 if (parallel_cpus
) {
1254 #ifndef CONFIG_ATOMIC128
1255 cpu_loop_exit_atomic(ENV_GET_CPU(env
), ra
);
1257 int mem_idx
= cpu_mmu_index(env
, false);
1258 TCGMemOpIdx oi
= make_memop_idx(MO_TEQ
| MO_ALIGN_16
, mem_idx
);
1259 oldv
= helper_atomic_cmpxchgo_be_mmu(env
, addr
, cmpv
, newv
, oi
, ra
);
1260 fail
= !int128_eq(oldv
, cmpv
);
1263 uint64_t oldh
, oldl
;
1265 check_alignment(env
, addr
, 16, ra
);
1267 oldh
= cpu_ldq_data_ra(env
, addr
+ 0, ra
);
1268 oldl
= cpu_ldq_data_ra(env
, addr
+ 8, ra
);
1270 oldv
= int128_make128(oldl
, oldh
);
1271 fail
= !int128_eq(oldv
, cmpv
);
1276 cpu_stq_data_ra(env
, addr
+ 0, int128_gethi(newv
), ra
);
1277 cpu_stq_data_ra(env
, addr
+ 8, int128_getlo(newv
), ra
);
1281 env
->regs
[r1
] = int128_gethi(oldv
);
1282 env
->regs
[r1
+ 1] = int128_getlo(oldv
);
1285 #if !defined(CONFIG_USER_ONLY)
1286 void HELPER(lctlg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
1288 uintptr_t ra
= GETPC();
1289 S390CPU
*cpu
= s390_env_get_cpu(env
);
1290 bool PERchanged
= false;
1294 for (i
= r1
;; i
= (i
+ 1) % 16) {
1295 uint64_t val
= cpu_ldq_data_ra(env
, src
, ra
);
1296 if (env
->cregs
[i
] != val
&& i
>= 9 && i
<= 11) {
1299 env
->cregs
[i
] = val
;
1300 HELPER_LOG("load ctl %d from 0x%" PRIx64
" == 0x%" PRIx64
"\n",
1302 src
+= sizeof(uint64_t);
1309 if (PERchanged
&& env
->psw
.mask
& PSW_MASK_PER
) {
1310 s390_cpu_recompute_watchpoints(CPU(cpu
));
1313 tlb_flush(CPU(cpu
));
1316 void HELPER(lctl
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
1318 uintptr_t ra
= GETPC();
1319 S390CPU
*cpu
= s390_env_get_cpu(env
);
1320 bool PERchanged
= false;
1324 for (i
= r1
;; i
= (i
+ 1) % 16) {
1325 uint32_t val
= cpu_ldl_data_ra(env
, src
, ra
);
1326 if ((uint32_t)env
->cregs
[i
] != val
&& i
>= 9 && i
<= 11) {
1329 env
->cregs
[i
] = deposit64(env
->cregs
[i
], 0, 32, val
);
1330 HELPER_LOG("load ctl %d from 0x%" PRIx64
" == 0x%x\n", i
, src
, val
);
1331 src
+= sizeof(uint32_t);
1338 if (PERchanged
&& env
->psw
.mask
& PSW_MASK_PER
) {
1339 s390_cpu_recompute_watchpoints(CPU(cpu
));
1342 tlb_flush(CPU(cpu
));
1345 void HELPER(stctg
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
1347 uintptr_t ra
= GETPC();
1351 for (i
= r1
;; i
= (i
+ 1) % 16) {
1352 cpu_stq_data_ra(env
, dest
, env
->cregs
[i
], ra
);
1353 dest
+= sizeof(uint64_t);
1361 void HELPER(stctl
)(CPUS390XState
*env
, uint32_t r1
, uint64_t a2
, uint32_t r3
)
1363 uintptr_t ra
= GETPC();
1367 for (i
= r1
;; i
= (i
+ 1) % 16) {
1368 cpu_stl_data_ra(env
, dest
, env
->cregs
[i
], ra
);
1369 dest
+= sizeof(uint32_t);
1377 uint32_t HELPER(testblock
)(CPUS390XState
*env
, uint64_t real_addr
)
1379 uintptr_t ra
= GETPC();
1380 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1384 real_addr
= wrap_address(env
, real_addr
);
1385 abs_addr
= mmu_real2abs(env
, real_addr
) & TARGET_PAGE_MASK
;
1386 if (!address_space_access_valid(&address_space_memory
, abs_addr
,
1387 TARGET_PAGE_SIZE
, true)) {
1388 cpu_restore_state(cs
, ra
);
1389 program_interrupt(env
, PGM_ADDRESSING
, 4);
1393 /* Check low-address protection */
1394 if ((env
->cregs
[0] & CR0_LOWPROT
) && real_addr
< 0x2000) {
1395 cpu_restore_state(cs
, ra
);
1396 program_interrupt(env
, PGM_PROTECTION
, 4);
1400 for (i
= 0; i
< TARGET_PAGE_SIZE
; i
+= 8) {
1401 stq_phys(cs
->as
, abs_addr
+ i
, 0);
1407 uint32_t HELPER(tprot
)(uint64_t a1
, uint64_t a2
)
1413 /* insert storage key extended */
1414 uint64_t HELPER(iske
)(CPUS390XState
*env
, uint64_t r2
)
1416 static S390SKeysState
*ss
;
1417 static S390SKeysClass
*skeyclass
;
1418 uint64_t addr
= wrap_address(env
, r2
);
1421 if (addr
> ram_size
) {
1425 if (unlikely(!ss
)) {
1426 ss
= s390_get_skeys_device();
1427 skeyclass
= S390_SKEYS_GET_CLASS(ss
);
1430 if (skeyclass
->get_skeys(ss
, addr
/ TARGET_PAGE_SIZE
, 1, &key
)) {
1436 /* set storage key extended */
1437 void HELPER(sske
)(CPUS390XState
*env
, uint64_t r1
, uint64_t r2
)
1439 static S390SKeysState
*ss
;
1440 static S390SKeysClass
*skeyclass
;
1441 uint64_t addr
= wrap_address(env
, r2
);
1444 if (addr
> ram_size
) {
1448 if (unlikely(!ss
)) {
1449 ss
= s390_get_skeys_device();
1450 skeyclass
= S390_SKEYS_GET_CLASS(ss
);
1454 skeyclass
->set_skeys(ss
, addr
/ TARGET_PAGE_SIZE
, 1, &key
);
1457 /* reset reference bit extended */
1458 uint32_t HELPER(rrbe
)(CPUS390XState
*env
, uint64_t r2
)
1460 static S390SKeysState
*ss
;
1461 static S390SKeysClass
*skeyclass
;
1464 if (r2
> ram_size
) {
1468 if (unlikely(!ss
)) {
1469 ss
= s390_get_skeys_device();
1470 skeyclass
= S390_SKEYS_GET_CLASS(ss
);
1473 if (skeyclass
->get_skeys(ss
, r2
/ TARGET_PAGE_SIZE
, 1, &key
)) {
1477 re
= key
& (SK_R
| SK_C
);
1480 if (skeyclass
->set_skeys(ss
, r2
/ TARGET_PAGE_SIZE
, 1, &key
)) {
1487 * 0 Reference bit zero; change bit zero
1488 * 1 Reference bit zero; change bit one
1489 * 2 Reference bit one; change bit zero
1490 * 3 Reference bit one; change bit one
1496 uint32_t HELPER(mvcs
)(CPUS390XState
*env
, uint64_t l
, uint64_t a1
, uint64_t a2
)
1498 uintptr_t ra
= GETPC();
1501 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
1502 __func__
, l
, a1
, a2
);
1510 /* XXX replace w/ memcpy */
1511 for (i
= 0; i
< l
; i
++) {
1512 uint8_t x
= cpu_ldub_primary_ra(env
, a2
+ i
, ra
);
1513 cpu_stb_secondary_ra(env
, a1
+ i
, x
, ra
);
1519 uint32_t HELPER(mvcp
)(CPUS390XState
*env
, uint64_t l
, uint64_t a1
, uint64_t a2
)
1521 uintptr_t ra
= GETPC();
1524 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
1525 __func__
, l
, a1
, a2
);
1533 /* XXX replace w/ memcpy */
1534 for (i
= 0; i
< l
; i
++) {
1535 uint8_t x
= cpu_ldub_secondary_ra(env
, a2
+ i
, ra
);
1536 cpu_stb_primary_ra(env
, a1
+ i
, x
, ra
);
1542 /* invalidate pte */
1543 void HELPER(ipte
)(CPUS390XState
*env
, uint64_t pto
, uint64_t vaddr
,
1546 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1547 uint64_t page
= vaddr
& TARGET_PAGE_MASK
;
1548 uint64_t pte_addr
, pte
;
1550 /* Compute the page table entry address */
1551 pte_addr
= (pto
& _SEGMENT_ENTRY_ORIGIN
);
1552 pte_addr
+= (vaddr
& VADDR_PX
) >> 9;
1554 /* Mark the page table entry as invalid */
1555 pte
= ldq_phys(cs
->as
, pte_addr
);
1556 pte
|= _PAGE_INVALID
;
1557 stq_phys(cs
->as
, pte_addr
, pte
);
1559 /* XXX we exploit the fact that Linux passes the exact virtual
1560 address here - it's not obliged to! */
1561 /* XXX: the LC bit should be considered as 0 if the local-TLB-clearing
1562 facility is not installed. */
1564 tlb_flush_page(cs
, page
);
1566 tlb_flush_page_all_cpus_synced(cs
, page
);
1569 /* XXX 31-bit hack */
1571 tlb_flush_page(cs
, page
^ 0x80000000);
1573 tlb_flush_page_all_cpus_synced(cs
, page
^ 0x80000000);
1577 /* flush local tlb */
1578 void HELPER(ptlb
)(CPUS390XState
*env
)
1580 S390CPU
*cpu
= s390_env_get_cpu(env
);
1582 tlb_flush(CPU(cpu
));
1585 /* flush global tlb */
1586 void HELPER(purge
)(CPUS390XState
*env
)
1588 S390CPU
*cpu
= s390_env_get_cpu(env
);
1590 tlb_flush_all_cpus_synced(CPU(cpu
));
1593 /* load using real address */
1594 uint64_t HELPER(lura
)(CPUS390XState
*env
, uint64_t addr
)
1596 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1598 return (uint32_t)ldl_phys(cs
->as
, wrap_address(env
, addr
));
1601 uint64_t HELPER(lurag
)(CPUS390XState
*env
, uint64_t addr
)
1603 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1605 return ldq_phys(cs
->as
, wrap_address(env
, addr
));
1608 /* store using real address */
1609 void HELPER(stura
)(CPUS390XState
*env
, uint64_t addr
, uint64_t v1
)
1611 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1613 stl_phys(cs
->as
, wrap_address(env
, addr
), (uint32_t)v1
);
1615 if ((env
->psw
.mask
& PSW_MASK_PER
) &&
1616 (env
->cregs
[9] & PER_CR9_EVENT_STORE
) &&
1617 (env
->cregs
[9] & PER_CR9_EVENT_STORE_REAL
)) {
1618 /* PSW is saved just before calling the helper. */
1619 env
->per_address
= env
->psw
.addr
;
1620 env
->per_perc_atmid
= PER_CODE_EVENT_STORE_REAL
| get_per_atmid(env
);
1624 void HELPER(sturg
)(CPUS390XState
*env
, uint64_t addr
, uint64_t v1
)
1626 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1628 stq_phys(cs
->as
, wrap_address(env
, addr
), v1
);
1630 if ((env
->psw
.mask
& PSW_MASK_PER
) &&
1631 (env
->cregs
[9] & PER_CR9_EVENT_STORE
) &&
1632 (env
->cregs
[9] & PER_CR9_EVENT_STORE_REAL
)) {
1633 /* PSW is saved just before calling the helper. */
1634 env
->per_address
= env
->psw
.addr
;
1635 env
->per_perc_atmid
= PER_CODE_EVENT_STORE_REAL
| get_per_atmid(env
);
1639 /* load real address */
1640 uint64_t HELPER(lra
)(CPUS390XState
*env
, uint64_t addr
)
1642 CPUState
*cs
= CPU(s390_env_get_cpu(env
));
1644 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
1648 /* XXX incomplete - has more corner cases */
1649 if (!(env
->psw
.mask
& PSW_MASK_64
) && (addr
>> 32)) {
1650 cpu_restore_state(cs
, GETPC());
1651 program_interrupt(env
, PGM_SPECIAL_OP
, 2);
1654 old_exc
= cs
->exception_index
;
1655 if (mmu_translate(env
, addr
, 0, asc
, &ret
, &flags
, true)) {
1658 if (cs
->exception_index
== EXCP_PGM
) {
1659 ret
= env
->int_pgm_code
| 0x80000000;
1661 ret
|= addr
& ~TARGET_PAGE_MASK
;
1663 cs
->exception_index
= old_exc
;
1670 /* load pair from quadword */
1671 uint64_t HELPER(lpq
)(CPUS390XState
*env
, uint64_t addr
)
1673 uintptr_t ra
= GETPC();
1676 if (parallel_cpus
) {
1677 #ifndef CONFIG_ATOMIC128
1678 cpu_loop_exit_atomic(ENV_GET_CPU(env
), ra
);
1680 int mem_idx
= cpu_mmu_index(env
, false);
1681 TCGMemOpIdx oi
= make_memop_idx(MO_TEQ
| MO_ALIGN_16
, mem_idx
);
1682 Int128 v
= helper_atomic_ldo_be_mmu(env
, addr
, oi
, ra
);
1683 hi
= int128_gethi(v
);
1684 lo
= int128_getlo(v
);
1687 check_alignment(env
, addr
, 16, ra
);
1689 hi
= cpu_ldq_data_ra(env
, addr
+ 0, ra
);
1690 lo
= cpu_ldq_data_ra(env
, addr
+ 8, ra
);
1697 /* store pair to quadword */
1698 void HELPER(stpq
)(CPUS390XState
*env
, uint64_t addr
,
1699 uint64_t low
, uint64_t high
)
1701 uintptr_t ra
= GETPC();
1703 if (parallel_cpus
) {
1704 #ifndef CONFIG_ATOMIC128
1705 cpu_loop_exit_atomic(ENV_GET_CPU(env
), ra
);
1707 int mem_idx
= cpu_mmu_index(env
, false);
1708 TCGMemOpIdx oi
= make_memop_idx(MO_TEQ
| MO_ALIGN_16
, mem_idx
);
1710 Int128 v
= int128_make128(low
, high
);
1711 helper_atomic_sto_be_mmu(env
, addr
, v
, oi
, ra
);
1714 check_alignment(env
, addr
, 16, ra
);
1716 cpu_stq_data_ra(env
, addr
+ 0, high
, ra
);
1717 cpu_stq_data_ra(env
, addr
+ 8, low
, ra
);
1721 /* Execute instruction. This instruction executes an insn modified with
1722 the contents of r1. It does not change the executed instruction in memory;
1723 it does not change the program counter.
1725 Perform this by recording the modified instruction in env->ex_value.
1726 This will be noticed by cpu_get_tb_cpu_state and thus tb translation.
1728 void HELPER(ex
)(CPUS390XState
*env
, uint32_t ilen
, uint64_t r1
, uint64_t addr
)
1730 uint64_t insn
= cpu_lduw_code(env
, addr
);
1731 uint8_t opc
= insn
>> 8;
1733 /* Or in the contents of R1[56:63]. */
1736 /* Load the rest of the instruction. */
1738 switch (get_ilen(opc
)) {
1742 insn
|= (uint64_t)cpu_lduw_code(env
, addr
+ 2) << 32;
1745 insn
|= (uint64_t)(uint32_t)cpu_ldl_code(env
, addr
+ 2) << 16;
1748 g_assert_not_reached();
1751 /* The very most common cases can be sped up by avoiding a new TB. */
1752 if ((opc
& 0xf0) == 0xd0) {
1753 typedef uint32_t (*dx_helper
)(CPUS390XState
*, uint32_t, uint64_t,
1754 uint64_t, uintptr_t);
1755 static const dx_helper dx
[16] = {
1756 [0x2] = do_helper_mvc
,
1757 [0x4] = do_helper_nc
,
1758 [0x5] = do_helper_clc
,
1759 [0x6] = do_helper_oc
,
1760 [0x7] = do_helper_xc
,
1761 [0xc] = do_helper_tr
,
1762 [0xd] = do_helper_trt
,
1764 dx_helper helper
= dx
[opc
& 0xf];
1767 uint32_t l
= extract64(insn
, 48, 8);
1768 uint32_t b1
= extract64(insn
, 44, 4);
1769 uint32_t d1
= extract64(insn
, 32, 12);
1770 uint32_t b2
= extract64(insn
, 28, 4);
1771 uint32_t d2
= extract64(insn
, 16, 12);
1772 uint64_t a1
= wrap_address(env
, env
->regs
[b1
] + d1
);
1773 uint64_t a2
= wrap_address(env
, env
->regs
[b2
] + d2
);
1775 env
->cc_op
= helper(env
, l
, a1
, a2
, 0);
1776 env
->psw
.addr
+= ilen
;
1779 } else if (opc
== 0x0a) {
1780 env
->int_svc_code
= extract64(insn
, 48, 8);
1781 env
->int_svc_ilen
= ilen
;
1782 helper_exception(env
, EXCP_SVC
);
1783 g_assert_not_reached();
1786 /* Record the insn we want to execute as well as the ilen to use
1787 during the execution of the target insn. This will also ensure
1788 that ex_value is non-zero, which flags that we are in a state
1789 that requires such execution. */
1790 env
->ex_value
= insn
| ilen
;